From 370e1652ad97a2dbfd33400fc0e23a6c2fc4a5d5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 10 Nov 2021 16:49:31 -0800 Subject: [PATCH 0001/1588] PYTHON-3003 Add kms_tls_options to configure options for KMS provider connections (#784) --- pymongo/common.py | 13 +++++++++- pymongo/encryption.py | 42 ++++++++++++++++++++++---------- pymongo/encryption_options.py | 27 +++++++++++---------- pymongo/uri_parser.py | 34 ++++++++++++++++++++++++++ test/test_encryption.py | 45 +++++++++++++++++++++++++++++++++-- 5 files changed, 133 insertions(+), 28 deletions(-) diff --git a/pymongo/common.py b/pymongo/common.py index 3d68ba1c76..5dd7b180c0 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -30,7 +30,6 @@ validate_zlib_compression_level) from pymongo.driver_info import DriverInfo from pymongo.server_api import ServerApi -from pymongo.encryption_options import validate_auto_encryption_opts_or_none from pymongo.errors import ConfigurationError from pymongo.monitoring import _validate_event_listeners from pymongo.read_concern import ReadConcern @@ -582,6 +581,18 @@ def validate_tzinfo(dummy, value): return value +def validate_auto_encryption_opts_or_none(option, value): + """Validate the driver keyword arg.""" + if value is None: + return value + from pymongo.encryption_options import AutoEncryptionOpts + if not isinstance(value, AutoEncryptionOpts): + raise TypeError("%s must be an instance of AutoEncryptionOpts" % ( + option,)) + + return value + + # Dictionary where keys are the names of public URI options, and values # are lists of aliases for that option. URI_OPTIONS_ALIAS_MAP = { diff --git a/pymongo/encryption.py b/pymongo/encryption.py index ad19b26426..1fe2877bbc 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -46,6 +46,7 @@ EncryptionError, InvalidOperation, ServerSelectionTimeoutError) +from pymongo.encryption_options import AutoEncryptionOpts from pymongo.mongo_client import MongoClient from pymongo.pool import _configured_socket, PoolOptions from pymongo.read_concern import ReadConcern @@ -106,20 +107,23 @@ def kms_request(self, kms_context): """ endpoint = kms_context.endpoint message = kms_context.message - host, port = parse_host(endpoint, _HTTPS_PORT) - # Enable strict certificate verification, OCSP, match hostname, and - # SNI using the system default CA certificates. - ctx = get_ssl_context( - None, # certfile - None, # passphrase - None, # ca_certs - None, # crlfile - False, # allow_invalid_certificates - False, # allow_invalid_hostnames - False) # disable_ocsp_endpoint_check + provider = kms_context.kms_provider + ctx = self.opts._kms_ssl_contexts.get(provider) + if not ctx: + # Enable strict certificate verification, OCSP, match hostname, and + # SNI using the system default CA certificates. + ctx = get_ssl_context( + None, # certfile + None, # passphrase + None, # ca_certs + None, # crlfile + False, # allow_invalid_certificates + False, # allow_invalid_hostnames + False) # disable_ocsp_endpoint_check opts = PoolOptions(connect_timeout=_KMS_CONNECT_TIMEOUT, socket_timeout=_KMS_CONNECT_TIMEOUT, ssl_context=ctx) + host, port = parse_host(endpoint, _HTTPS_PORT) conn = _configured_socket((host, port), opts) try: conn.sendall(message) @@ -359,7 +363,7 @@ class ClientEncryption(object): """Explicit client-side field level encryption.""" def __init__(self, kms_providers, key_vault_namespace, key_vault_client, - codec_options): + codec_options, kms_tls_options=None): """Explicit client-side field level encryption. The ClientEncryption class encapsulates explicit operations on a key @@ -411,6 +415,16 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, should be the same CodecOptions instance configured on the MongoClient, Database, or Collection used to access application data. + - `kms_tls_options` (optional): A map of KMS provider names to TLS + options to use when creating secure connections to KMS providers. + Accepts the same TLS options as + :class:`pymongo.mongo_client.MongoClient`. For example, to + override the system default CA file:: + + kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} + + .. versionchanged:: 4.0 + Added the `kms_tls_options` parameter. .. versionadded:: 3.9 """ @@ -432,7 +446,9 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, db, coll = key_vault_namespace.split('.', 1) key_vault_coll = key_vault_client[db][coll] - self._io_callbacks = _EncryptionIO(None, key_vault_coll, None, None) + opts = AutoEncryptionOpts(kms_providers, key_vault_namespace, + kms_tls_options=kms_tls_options) + self._io_callbacks = _EncryptionIO(None, key_vault_coll, None, opts) self._encryption = ExplicitEncrypter( self._io_callbacks, MongoCryptOptions(kms_providers, None)) diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index fd1226c7c6..1d4aa0c7b0 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -23,6 +23,7 @@ _HAVE_PYMONGOCRYPT = False from pymongo.errors import ConfigurationError +from pymongo.uri_parser import _parse_kms_tls_options class AutoEncryptionOpts(object): @@ -35,7 +36,8 @@ def __init__(self, kms_providers, key_vault_namespace, mongocryptd_uri='mongodb://localhost:27020', mongocryptd_bypass_spawn=False, mongocryptd_spawn_path='mongocryptd', - mongocryptd_spawn_args=None): + mongocryptd_spawn_args=None, + kms_tls_options=None): """Options to configure automatic client-side field level encryption. Automatic client-side field level encryption requires MongoDB 4.2 @@ -118,6 +120,16 @@ def __init__(self, kms_providers, key_vault_namespace, ``['--idleShutdownTimeoutSecs=60']``. If the list does not include the ``idleShutdownTimeoutSecs`` option then ``'--idleShutdownTimeoutSecs=60'`` will be added. + - `kms_tls_options` (optional): A map of KMS provider names to TLS + options to use when creating secure connections to KMS providers. + Accepts the same TLS options as + :class:`pymongo.mongo_client.MongoClient`. For example, to + override the system default CA file:: + + kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} + + .. versionchanged:: 4.0 + Added the `kms_tls_options` parameter. .. versionadded:: 3.9 """ @@ -142,14 +154,5 @@ def __init__(self, kms_providers, key_vault_namespace, if not any('idleShutdownTimeoutSecs' in s for s in self._mongocryptd_spawn_args): self._mongocryptd_spawn_args.append('--idleShutdownTimeoutSecs=60') - - -def validate_auto_encryption_opts_or_none(option, value): - """Validate the driver keyword arg.""" - if value is None: - return value - if not isinstance(value, AutoEncryptionOpts): - raise TypeError("%s must be an instance of AutoEncryptionOpts" % ( - option,)) - - return value + # Maps KMS provider name to a SSLContext. + self._kms_ssl_contexts = _parse_kms_tls_options(kms_tls_options) diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 23db48bf4a..8c43d51770 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -20,6 +20,7 @@ from urllib.parse import unquote_plus +from pymongo.client_options import _parse_ssl_options from pymongo.common import ( SRV_SERVICE_NAME, get_validated_options, INTERNAL_URI_OPTION_NAME_MAP, @@ -569,6 +570,39 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, } +def _parse_kms_tls_options(kms_tls_options): + """Parse KMS TLS connection options.""" + if not kms_tls_options: + return {} + if not isinstance(kms_tls_options, dict): + raise TypeError('kms_tls_options must be a dict') + contexts = {} + for provider, opts in kms_tls_options.items(): + if not isinstance(opts, dict): + raise TypeError(f'kms_tls_options["{provider}"] must be a dict') + opts.setdefault('tls', True) + opts = _CaseInsensitiveDictionary(opts) + opts = _handle_security_options(opts) + opts = _normalize_options(opts) + opts = validate_options(opts) + ssl_context, allow_invalid_hostnames = _parse_ssl_options(opts) + if ssl_context is None: + raise ConfigurationError('TLS is required for KMS providers') + if allow_invalid_hostnames: + raise ConfigurationError('Insecure TLS options prohibited') + + for n in ['tlsInsecure', + 'tlsAllowInvalidCertificates', + 'tlsAllowInvalidHostnames', + 'tlsDisableOCSPEndpointCheck', + 'tlsDisableCertificateRevocationCheck']: + if n in opts: + raise ConfigurationError( + f'Insecure TLS options prohibited: {n}') + contexts[provider] = ssl_context + return contexts + + if __name__ == '__main__': import pprint import sys diff --git a/test/test_encryption.py b/test/test_encryption.py index 67681daba8..d94fcf3469 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -17,6 +17,7 @@ import base64 import copy import os +import ssl import traceback import socket import sys @@ -50,9 +51,8 @@ from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne from pymongo.write_concern import WriteConcern -from test.test_ssl import CA_PEM -from test import (unittest, +from test import (unittest, CA_PEM, CLIENT_PEM, client_context, IntegrationTest, PyMongoTestCase) @@ -92,6 +92,7 @@ def test_init(self): self.assertEqual(opts._mongocryptd_spawn_path, 'mongocryptd') self.assertEqual( opts._mongocryptd_spawn_args, ['--idleShutdownTimeoutSecs=60']) + self.assertEqual(opts._kms_ssl_contexts, {}) @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') def test_init_spawn_args(self): @@ -116,6 +117,46 @@ def test_init_spawn_args(self): opts._mongocryptd_spawn_args, ['--quiet', '--port=27020', '--idleShutdownTimeoutSecs=60']) + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + def test_init_kms_tls_options(self): + # Error cases: + with self.assertRaisesRegex( + TypeError, r'kms_tls_options\["kmip"\] must be a dict'): + AutoEncryptionOpts({}, 'k.d', kms_tls_options={'kmip': 1}) + for tls_opts in [ + {'kmip': {'tls': True, 'tlsInsecure': True}}, + {'kmip': {'tls': True, 'tlsAllowInvalidCertificates': True}}, + {'kmip': {'tls': True, 'tlsAllowInvalidHostnames': True}}, + {'kmip': {'tls': True, 'tlsDisableOCSPEndpointCheck': True}}]: + with self.assertRaisesRegex( + ConfigurationError, 'Insecure TLS options prohibited'): + opts = AutoEncryptionOpts({}, 'k.d', kms_tls_options=tls_opts) + with self.assertRaises(FileNotFoundError): + AutoEncryptionOpts({}, 'k.d', kms_tls_options={ + 'kmip': {'tlsCAFile': 'does-not-exist'}}) + # Success cases: + for tls_opts in [None, {}]: + opts = AutoEncryptionOpts({}, 'k.d', kms_tls_options=tls_opts) + self.assertEqual(opts._kms_ssl_contexts, {}) + opts = AutoEncryptionOpts( + {}, 'k.d', kms_tls_options={'kmip': {'tls': True}, 'aws': {}}) + ctx = opts._kms_ssl_contexts['kmip'] + # On < 3.7 we check hostnames manually. + if sys.version_info[:2] >= (3, 7): + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + ctx = opts._kms_ssl_contexts['aws'] + if sys.version_info[:2] >= (3, 7): + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + opts = AutoEncryptionOpts( + {}, 'k.d', kms_tls_options={'kmip': { + 'tlsCAFile': CA_PEM, 'tlsCertificateKeyFile': CLIENT_PEM}}) + ctx = opts._kms_ssl_contexts['kmip'] + if sys.version_info[:2] >= (3, 7): + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + class TestClientOptions(PyMongoTestCase): def test_default(self): From 99a413f81b620ca389330e24a9731386573aff79 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Thu, 11 Nov 2021 13:56:26 -0800 Subject: [PATCH 0002/1588] Update author and maintainer --- README.rst | 3 +-- setup.py | 4 +--- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/README.rst b/README.rst index ba505dbb3a..f83ad70b10 100644 --- a/README.rst +++ b/README.rst @@ -3,8 +3,7 @@ PyMongo ======= :Info: See `the mongo site `_ for more information. See `GitHub `_ for the latest source. :Documentation: Available at `pymongo.readthedocs.io `_ -:Author: Mike Dirolf -:Maintainer: Bernie Hackett +:Author: The MongoDB Python Team About ===== diff --git a/setup.py b/setup.py index cdf9113911..8fcad6cc60 100755 --- a/setup.py +++ b/setup.py @@ -314,10 +314,8 @@ def build_extension(self, ext): version=version, description="Python driver for MongoDB ", long_description=readme_content, - author="Mike Dirolf", + author="The MongoDB Python Team", author_email="mongodb-user@googlegroups.com", - maintainer="Bernie Hackett", - maintainer_email="bernie@mongodb.com", url="http://github.com/mongodb/mongo-python-driver", keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], install_requires=[], From e1884b44dfc5c70359fdc056104786cabe494108 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 11 Nov 2021 15:00:17 -0800 Subject: [PATCH 0003/1588] PYTHON-2512 Update Astrolabe's Workload Executor to use the unified test runner (#783) --- test/test_create_entities.py | 4 +-- test/unified_format.py | 56 +++++++++++++++++------------------- 2 files changed, 28 insertions(+), 32 deletions(-) diff --git a/test/test_create_entities.py b/test/test_create_entities.py index 9b5c30d64e..3f60eb9b76 100644 --- a/test/test_create_entities.py +++ b/test/test_create_entities.py @@ -15,8 +15,6 @@ from test.unified_format import UnifiedSpecTestMixinV1 -from pymongo.monitoring import PoolCreatedEvent - class TestCreateEntities(unittest.TestCase): def test_store_events_as_entities(self): @@ -53,7 +51,7 @@ def test_store_events_as_entities(self): self.assertIn("events1", final_entity_map) self.assertGreater(len(final_entity_map["events1"]), 0) for event in final_entity_map["events1"]: - self.assertEqual(type(event), PoolCreatedEvent) + self.assertIn("PoolCreatedEvent", event) def test_store_all_others_as_entities(self): self.scenario_runner = UnifiedSpecTestMixinV1() diff --git a/test/unified_format.py b/test/unified_format.py index 4c705299e9..0d60a05467 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -226,7 +226,7 @@ def add_event(self, event): if event_name in self._event_types: super(EventListenerUtil, self).add_event(event) for id in self._event_mapping[event_name]: - self.entity_map[id].append(event) + self.entity_map[id].append(str(event)) def _command_event(self, event): if event.command_name.lower() not in self._ignore_commands: @@ -284,7 +284,7 @@ def __setitem__(self, key, value): self._entities[key] = value - def _create_entity(self, entity_spec): + def _create_entity(self, entity_spec, uri=None): if len(entity_spec) != 1: self.test.fail( "Entity spec %s did not contain exactly one top-level key" % ( @@ -315,6 +315,8 @@ def _create_entity(self, entity_spec): kwargs['server_api'] = ServerApi( server_api['version'], strict=server_api.get('strict'), deprecation_errors=server_api.get('deprecationErrors')) + if uri: + kwargs['h'] = uri client = rs_or_single_client(**kwargs) self[spec['id']] = client self.test.addCleanup(client.close) @@ -366,9 +368,9 @@ def _create_entity(self, entity_spec): self.test.fail( 'Unable to create entity of unknown type %s' % (entity_type,)) - def create_entities_from_spec(self, entity_spec): + def create_entities_from_spec(self, entity_spec, uri=None): for spec in entity_spec: - self._create_entity(spec) + self._create_entity(spec, uri=uri) def get_listener_for_client(self, client_name): client = self[client_name] @@ -718,7 +720,6 @@ def insert_initial_data(self, initial_data): def setUpClass(cls): # super call creates internal client cls.client super(UnifiedSpecTestMixinV1, cls).setUpClass() - # process file-level runOnRequirements run_on_spec = cls.TEST_SPEC.get('runOnRequirements', []) if not cls.should_run_on(run_on_spec): @@ -733,7 +734,6 @@ def setUpClass(cls): def setUp(self): super(UnifiedSpecTestMixinV1, self).setUp() - # process schemaVersion # note: we check major schema version during class generation # note: we do this here because we cannot run assertions in setUpClass @@ -1080,15 +1080,14 @@ def _testOperation_loop(self, spec): successes_key = spec.get('storeSuccessesAsEntity') iteration_key = spec.get('storeIterationsAsEntity') iteration_limiter_key = spec.get('numIterations') - if failure_key: - self.entity_map[failure_key] = [] - if error_key: - self.entity_map[error_key] = [] - if successes_key: - self.entity_map[successes_key] = 0 - if iteration_key: - self.entity_map[iteration_key] = 0 + for i in [failure_key, error_key]: + if i: + self.entity_map[i] = [] + for i in [successes_key, iteration_key]: + if i: + self.entity_map[i] = 0 i = 0 + global IS_INTERRUPTED while True: if iteration_limiter_key and i >= iteration_limiter_key: break @@ -1096,24 +1095,24 @@ def _testOperation_loop(self, spec): if IS_INTERRUPTED: break try: + if iteration_key: + self.entity_map._entities[iteration_key] += 1 for op in spec["operations"]: self.run_entity_operation(op) if successes_key: self.entity_map._entities[successes_key] += 1 - if iteration_key: - self.entity_map._entities[iteration_key] += 1 - except AssertionError as exc: - if failure_key or error_key: - self.entity_map[failure_key or error_key].append({ - "error": exc, "time": time.time()}) - else: - raise exc except Exception as exc: - if error_key or failure_key: - self.entity_map[error_key or failure_key].append( - {"error": exc, "time": time.time()}) + if isinstance(exc, AssertionError): + key = failure_key or error_key else: - raise exc + key = error_key or failure_key + if not key: + raise + self.entity_map[key].append({ + "error": str(exc), + "time": time.time(), + "type": type(exc).__name__ + }) def run_special_operation(self, spec): opname = spec['name'] @@ -1174,7 +1173,7 @@ def verify_outcome(self, spec): self.assertListEqual(sorted_expected_documents, actual_documents) - def run_scenario(self, spec): + def run_scenario(self, spec, uri=None): # maybe skip test manually self.maybe_skip_test(spec) @@ -1191,8 +1190,7 @@ def run_scenario(self, spec): # process createEntities self.entity_map = EntityMapUtil(self) self.entity_map.create_entities_from_spec( - self.TEST_SPEC.get('createEntities', [])) - + self.TEST_SPEC.get('createEntities', []), uri=uri) # process initialData self.insert_initial_data(self.TEST_SPEC.get('initialData', [])) From 6d1dd6d63a43823204a33776a3da6c77f42068e2 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 12 Nov 2021 16:23:57 -0800 Subject: [PATCH 0004/1588] PYTHON-3014 Update how events are added to entity map to match specification (#785) --- test/test_create_entities.py | 4 +++- test/unified_format.py | 6 +++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/test/test_create_entities.py b/test/test_create_entities.py index 3f60eb9b76..b82b730aef 100644 --- a/test/test_create_entities.py +++ b/test/test_create_entities.py @@ -47,11 +47,12 @@ def test_store_events_as_entities(self): self.scenario_runner.TEST_SPEC = spec self.scenario_runner.setUp() self.scenario_runner.run_scenario(spec["tests"][0]) + self.scenario_runner.entity_map["client0"].close() final_entity_map = self.scenario_runner.entity_map self.assertIn("events1", final_entity_map) self.assertGreater(len(final_entity_map["events1"]), 0) for event in final_entity_map["events1"]: - self.assertIn("PoolCreatedEvent", event) + self.assertIn("PoolCreatedEvent", event["name"]) def test_store_all_others_as_entities(self): self.scenario_runner = UnifiedSpecTestMixinV1() @@ -130,6 +131,7 @@ def test_store_all_others_as_entities(self): self.scenario_runner.TEST_SPEC = spec self.scenario_runner.setUp() self.scenario_runner.run_scenario(spec["tests"][0]) + self.scenario_runner.entity_map["client0"].close() final_entity_map = self.scenario_runner.entity_map for entity in ["errors", "failures"]: self.assertIn(entity, final_entity_map) diff --git a/test/unified_format.py b/test/unified_format.py index 0d60a05467..25a980425f 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -226,7 +226,11 @@ def add_event(self, event): if event_name in self._event_types: super(EventListenerUtil, self).add_event(event) for id in self._event_mapping[event_name]: - self.entity_map[id].append(str(event)) + self.entity_map[id].append({ + "name": type(event).__name__, + "observedAt": time.time(), + "description": repr(event) + }) def _command_event(self, event): if event.command_name.lower() not in self._ignore_commands: From 754e52890f92969c56f274714041850d71c6f664 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 15 Nov 2021 13:01:45 -0800 Subject: [PATCH 0005/1588] PYTHON-2915 Skip large txn test on slow Windows hosts (#788) --- test/test_transactions.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/test_transactions.py b/test/test_transactions.py index 6c41f28cf4..32f02f8437 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -289,6 +289,8 @@ def gridfs_open_upload_stream(*args, **kwargs): # Require 4.2+ for large (16MB+) transactions. @client_context.require_version_min(4, 2) @client_context.require_transactions + @unittest.skipIf(sys.platform == 'win32', + 'Our Windows machines are too slow to pass this test') def test_transaction_starts_with_batched_write(self): if 'PyPy' in sys.version and client_context.tls: self.skipTest('PYTHON-2937 PyPy is so slow sending large ' From a7fb3281ea093103d0b19fe48f21593e7a7d1b8d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 15 Nov 2021 16:23:59 -0800 Subject: [PATCH 0006/1588] PYTHON-3004 Support kmip FLE KMS provider (#786) Resync CSFLE spec tests. --- .evergreen/config.yml | 43 + .evergreen/run-tests.sh | 7 - pymongo/encryption.py | 23 +- pymongo/encryption_options.py | 9 +- .../corpus/corpus-encrypted.json | 1830 +++++++++++++++++ .../corpus/corpus-key-kmip.json | 32 + .../corpus/corpus-schema.json | 1266 ++++++++++++ .../client-side-encryption/corpus/corpus.json | 1662 +++++++++++++++ .../client-side-encryption/spec/azureKMS.json | 14 + test/client-side-encryption/spec/gcpKMS.json | 14 + test/client-side-encryption/spec/kmipKMS.json | 223 ++ test/test_encryption.py | 244 ++- 12 files changed, 5328 insertions(+), 39 deletions(-) create mode 100644 test/client-side-encryption/corpus/corpus-key-kmip.json create mode 100644 test/client-side-encryption/spec/kmipKMS.json diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 6f5a19e478..16be7f882a 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -359,6 +359,49 @@ functions: PYTHON_BINARY=${PYTHON_BINARY} bash ${PROJECT_DIRECTORY}/.evergreen/run-doctests.sh "run tests": + # If testing FLE, start the KMS mock servers, first create the virtualenv. + - command: shell.exec + params: + script: | + if [ -n "${test_encryption}" ]; then + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/csfle + . ./activate_venv.sh + fi + # Run in the background so the mock servers don't block the EVG task. + - command: shell.exec + params: + background: true + script: | + if [ -n "${test_encryption}" ]; then + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/csfle + . ./activate_venv.sh + # The -u options forces the stdout and stderr streams to be unbuffered. + # TMPDIR is required to avoid "AF_UNIX path too long" errors. + TMPDIR="$(dirname $DRIVERS_TOOLS)" python -u kms_kmip_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 5698 & + python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000 & + python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 8001 & + python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 8002 --require_client_cert & + fi + # Wait up to 10 seconds for the KMIP server to start. + - command: shell.exec + params: + script: | + if [ -n "${test_encryption}" ]; then + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/csfle + . ./activate_venv.sh + for i in $(seq 1 1 10); do + sleep 1 + if python -u kms_kmip_client.py; then + echo 'KMS KMIP server started!' + exit 0 + fi + done + echo 'Failed to start KMIP server!' + exit 1 + fi - command: shell.exec type: test params: diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 1e08e3ce17..3f4d6d9459 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -146,13 +146,6 @@ if [ -n "$TEST_ENCRYPTION" ]; then # Get access to the AWS temporary credentials: # CSFLE_AWS_TEMP_ACCESS_KEY_ID, CSFLE_AWS_TEMP_SECRET_ACCESS_KEY, CSFLE_AWS_TEMP_SESSION_TOKEN . $DRIVERS_TOOLS/.evergreen/csfle/set-temp-creds.sh - - # Start the mock KMS servers. - pushd ${DRIVERS_TOOLS}/.evergreen/csfle - python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000 & - python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 8001 & - trap 'kill $(jobs -p)' EXIT HUP - popd fi if [ -z "$DATA_LAKE" ]; then diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 1fe2877bbc..117666ac82 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -109,7 +109,7 @@ def kms_request(self, kms_context): message = kms_context.message provider = kms_context.kms_provider ctx = self.opts._kms_ssl_contexts.get(provider) - if not ctx: + if ctx is None: # Enable strict certificate verification, OCSP, match hostname, and # SNI using the system default CA certificates. ctx = get_ssl_context( @@ -378,9 +378,8 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, See :ref:`explicit-client-side-encryption` for an example. :Parameters: - - `kms_providers`: Map of KMS provider options. Two KMS providers - are supported: "aws" and "local". The kmsProviders map values - differ by provider: + - `kms_providers`: Map of KMS provider options. The `kms_providers` + map values differ by provider: - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. These are the AWS access key ID and AWS secret access key used @@ -396,6 +395,8 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, Additionally, "endpoint" may also be specified as a string (defaults to 'oauth2.googleapis.com'). These are the credentials used to generate Google Cloud KMS messages. + - `kmip`: Map with "endpoint" as a host with required port. + For example: ``{"endpoint": "example.com:443"}``. - `local`: Map with "key" as `bytes` (96 bytes in length) or a base64 encoded string which decodes to 96 bytes. "key" is the master key used to encrypt/decrypt @@ -424,7 +425,7 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} .. versionchanged:: 4.0 - Added the `kms_tls_options` parameter. + Added the `kms_tls_options` parameter and the "kmip" KMS provider. .. versionadded:: 3.9 """ @@ -458,7 +459,7 @@ def create_data_key(self, kms_provider, master_key=None, :Parameters: - `kms_provider`: The KMS provider to use. Supported values are - "aws" and "local". + "aws", "azure", "gcp", "kmip", and "local". - `master_key`: Identifies a KMS-specific key used to encrypt the new data key. If the kmsProvider is "local" the `master_key` is not applicable and may be omitted. @@ -493,6 +494,16 @@ def create_data_key(self, kms_provider, master_key=None, - `endpoint` (string): Optional. Host with optional port. Defaults to "cloudkms.googleapis.com". + If the `kms_provider` is "kmip" it is optional and has the + following fields:: + + - `keyId` (string): Optional. `keyId` is the KMIP Unique + Identifier to a 96 byte KMIP Secret Data managed object. If + keyId is omitted, the driver creates a random 96 byte KMIP + Secret Data managed object. + - `endpoint` (string): Optional. Host with optional + port, e.g. "example.vault.azure.net:". + - `key_alt_names` (optional): An optional list of string alternate names used to reference a key. If a key is created with alternate names, then encryption may refer to the key by the unique alternate diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 1d4aa0c7b0..c96f4a6d67 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -55,9 +55,8 @@ def __init__(self, kms_providers, key_vault_namespace, See :ref:`automatic-client-side-encryption` for an example. :Parameters: - - `kms_providers`: Map of KMS provider options. Two KMS providers - are supported: "aws" and "local". The kmsProviders map values - differ by provider: + - `kms_providers`: Map of KMS provider options. The `kms_providers` + map values differ by provider: - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. These are the AWS access key ID and AWS secret access key used @@ -73,6 +72,8 @@ def __init__(self, kms_providers, key_vault_namespace, Additionally, "endpoint" may also be specified as a string (defaults to 'oauth2.googleapis.com'). These are the credentials used to generate Google Cloud KMS messages. + - `kmip`: Map with "endpoint" as a host with required port. + For example: ``{"endpoint": "example.com:443"}``. - `local`: Map with "key" as `bytes` (96 bytes in length) or a base64 encoded string which decodes to 96 bytes. "key" is the master key used to encrypt/decrypt @@ -129,7 +130,7 @@ def __init__(self, kms_providers, key_vault_namespace, kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} .. versionchanged:: 4.0 - Added the `kms_tls_options` parameter. + Added the `kms_tls_options` parameter and the "kmip" KMS provider. .. versionadded:: 3.9 """ diff --git a/test/client-side-encryption/corpus/corpus-encrypted.json b/test/client-side-encryption/corpus/corpus-encrypted.json index a11682688a..1b72aa8a39 100644 --- a/test/client-side-encryption/corpus/corpus-encrypted.json +++ b/test/client-side-encryption/corpus/corpus-encrypted.json @@ -7681,5 +7681,1835 @@ "value": { "$maxKey": 1 } + }, + "kmip_double_rand_auto_id": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAB1hL/nPkpQtqxQUANbIJr30PQ98vPvaoy4JWUoElOL+cCnrSra3o7W+12dydy0rCS2EKrVm7Fw0C8L9nf1hpWjw==", + "subType": "06" + } + } + }, + "kmip_double_rand_auto_altname": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAABxlcphy2SxXlkRBvO1Z3nNUqchmeOhIhkdYBbbW7CwYeLVRDciXFsZN73Nb9Bm+W4IpUNpo6mqFEtfjevIjtFyg==", + "subType": "06" + } + } + }, + "kmip_double_rand_explicit_id": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAABx5AfRSiblFc1DGwxRIaUSP2kaM76ryzPUKL9KnEgnX1kjIlFz5B15uMht2cxdrntHFe1qZZk8V9PxTBpWZhJ8Q==", + "subType": "06" + } + } + }, + "kmip_double_rand_explicit_altname": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAABXUC9v9HPrmU9tINzFmr2sQM9f7GHDus+y5T4pWX28PRtfnTysN/ANCfB9RosoR/wuKsbznwwD2JfSzOvlKo3PQ==", + "subType": "06" + } + } + }, + "kmip_double_det_explicit_id": { + "kms": "kmip", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "kmip_double_det_explicit_altname": { + "kms": "kmip", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "kmip_string_rand_auto_id": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAACGHmqW1qbfqVlfB0x0CkXCk9smhs3yXsxJ/8eypSgbDQqVLSW2nf5bbHpnoCHHNtQ7I7ZBXzPzDLH2GgMJpopeQ==", + "subType": "06" + } + } + }, + "kmip_string_rand_auto_altname": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAC9BJTD1pEMbslAjbJYt7yx/jzKkcZF3axu96+NYwp8afUCjXG5TOUZzODOwkbJuWgr7DBxa2GkZTvaAEk86h+Ow==", + "subType": "06" + } + } + }, + "kmip_string_rand_explicit_id": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAACQlG28ECy8KHXC7GEPdC8+raBo2RMJwl5pofcPaTGkPUEbkreguMd1mYctNb90vXxby1nNeJY4o5zJJCMiNhNXg==", + "subType": "06" + } + } + }, + "kmip_string_rand_explicit_altname": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAACbWuK+3nzeKSNVjmgHb0Ii7rA+CsAd+gYubPiMiHXZwE/o6i9FYWN+t/VK3p4K0CwIi6q3cycrMb2IgcvM27Q7Q==", + "subType": "06" + } + } + }, + "kmip_string_det_auto_id": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAC5OZgr9keCXOIj5Fi06i4win1xt7gpsyPA4Os+HdFn1MIP9tnktvWNRb8Rqhuj2O9KO83brx74Hu3EQ4nT6uCMw==", + "subType": "06" + } + } + }, + "kmip_string_det_explicit_id": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAC5OZgr9keCXOIj5Fi06i4win1xt7gpsyPA4Os+HdFn1MIP9tnktvWNRb8Rqhuj2O9KO83brx74Hu3EQ4nT6uCMw==", + "subType": "06" + } + } + }, + "kmip_string_det_explicit_altname": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAC5OZgr9keCXOIj5Fi06i4win1xt7gpsyPA4Os+HdFn1MIP9tnktvWNRb8Rqhuj2O9KO83brx74Hu3EQ4nT6uCMw==", + "subType": "06" + } + } + }, + "kmip_object_rand_auto_id": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAADh2nGqaAUwHDRVjqYpj8JAPH7scmiHp1Z9SGBZQ6Fapxm+zWDdTBHyitM9U69BctJ5DaaafyqFOj5yr6sJ+ebJQ==", + "subType": "06" + } + } + }, + "kmip_object_rand_auto_altname": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAD1YhOKyNle4y0Qbeio1HlCULLeTCALCLgKSITd50bilD+oDyqQawixJAwphcdjhLdFzbFwst5RWqpsiWMPHx4hQ==", + "subType": "06" + } + } + }, + "kmip_object_rand_explicit_id": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAADveILoWFgX7AhUWCv8UL52TUa75qHuoNadnTQydJlqd6PVmtRKj+8vS7VwxNWPaH4wB1Tk7emMyFEbZpvvzjxqQ==", + "subType": "06" + } + } + }, + "kmip_object_rand_explicit_altname": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAADB/LN9V/4SROJn+ESHRLM7wwcUltQUx3+LbbYXjPDXiiV14HK76Iyy6ZxJ+M5qC9bRj3afhTKuWLBblB8WwksOg==", + "subType": "06" + } + } + }, + "kmip_object_det_explicit_id": { + "kms": "kmip", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_det_explicit_altname": { + "kms": "kmip", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_array_rand_auto_id": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAEasWXQam8XtOkSO0nEttMCQ0iZ4V8DDmhMKyQDFDsiNHyF2h98Ya/xFv4ZSlbpGWXPBvBATEGgov/PDg2vhVi53y4Pk33RHfY60hABuksp3o=", + "subType": "06" + } + } + }, + "kmip_array_rand_auto_altname": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAEj3A1DYSEHm/3SlEmusA+pewxRPUoZ2NAjs60ioEBlCw9n6yiiB+X8d/w40TKsjZcOSfh05NC0z3gnpqQvrNolkxkvi9dmFiZeiiv5vBZUPI=", + "subType": "06" + } + } + }, + "kmip_array_rand_explicit_id": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAEqeJW+L6lP0bn5QcD0FMI0C8vv2n5kV7SKgqKi1o5mxaxmp3Cjlspf7yumfSiQ5js6G9yJVAvHuxlqv14UFyR9RgXS0PIA8WzsAqkL0sJSw0=", + "subType": "06" + } + } + }, + "kmip_array_rand_explicit_altname": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAEnPlPwy0B1VKuNum1GzkZwQjZia5jNYL5bf/k+PbfhnToTRWGxx8+E3R7XXp6YT/rFkjPlzU8ww9+iZNo2oqNpYuHdrIC8ybhO6HZAlvcERo=", + "subType": "06" + } + } + }, + "kmip_array_det_explicit_id": { + "kms": "kmip", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_det_explicit_altname": { + "kms": "kmip", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_binData=00_rand_auto_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFliNDZ6DmjoVcYQBCKDI9njpBsDELg+TD6XLF7xbZnMaJCCHLHr7w3x2/xFfrFSN44CtGAKOniYPCMAspaxHqOA==", + "subType": "06" + } + } + }, + "kmip_binData=00_rand_auto_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAF/P8LPmHKGgG0l5/Xi7jdkwfxpGPxoY0417suCvN6zjM3JNdufytzkektrm9CbBb1SnZCGYF9c0FCMzFG+tN/dg==", + "subType": "06" + } + } + }, + "kmip_binData=00_rand_explicit_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFWI0N4RbnYdEiFrzNpbRN9p+bSLm8Lthiu4K3/CvBg6GQpLMVQFhjW01Bud0lxpT2ohRnOK+ASUhiFcUU/t/lWQ==", + "subType": "06" + } + } + }, + "kmip_binData=00_rand_explicit_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFQZvAtpY4cjEr1rJWVoUGaZKmzocSJ0muHose7Tk5kRDczjFa4Jcu4hN7JLM9qz2z4g+WJC3KQTdW4ZBXStke/Q==", + "subType": "06" + } + } + }, + "kmip_binData=00_det_auto_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFohIHrvzu8xLxVHsnYEDhZmv8BpEoEtFSjMUQzvBLUInvvTuU/rOzlVL88CkAEII7M3hcvrz8FKY7b7lC1veoYg==", + "subType": "06" + } + } + }, + "kmip_binData=00_det_explicit_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFohIHrvzu8xLxVHsnYEDhZmv8BpEoEtFSjMUQzvBLUInvvTuU/rOzlVL88CkAEII7M3hcvrz8FKY7b7lC1veoYg==", + "subType": "06" + } + } + }, + "kmip_binData=00_det_explicit_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFohIHrvzu8xLxVHsnYEDhZmv8BpEoEtFSjMUQzvBLUInvvTuU/rOzlVL88CkAEII7M3hcvrz8FKY7b7lC1veoYg==", + "subType": "06" + } + } + }, + "kmip_binData=04_rand_auto_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFn7rhdO8tYq77uVxcqd9Qjz84Yg7JnJMYf0ULTMTh1vJHacckkhXw+8fIMMiAKwuOVwGkMAtu5RBvrFqdfxryCg8RLTxu1YYVthufiClEIS0=", + "subType": "06" + } + } + }, + "kmip_binData=04_rand_auto_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFwwXQx9dKyoyHq7GBMmHzYe9ysoJK/f/ZWzA6nErau9MtX1gqi7VRsYqkamb47/zVbsLZwPMmdgNyPxEh3kqbV2D61t5RG2A3VeqhO1pTF8c=", + "subType": "06" + } + } + }, + "kmip_binData=04_rand_explicit_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFALeGeinJ8DE+WZniLdCIW2gfJUj445Ukp9PvRLgBXLGedl8mIXlLF2eu3BA9vP6s5y9w6peQjhn+oEofrsUVYD2duyzeIRMKgNiNchjf6TU=", + "subType": "06" + } + } + }, + "kmip_binData=04_rand_explicit_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAF06Fx8CO3OSKE3fGri0VwK0e22YiG9LH2QkDTsRdFbT2lBm+bDD9FrEY8vKWS5RljMuysaxjBOzZ98d2LEs6k8LMOm83Nz/RESe4ZbbcfdQ0=", + "subType": "06" + } + } + }, + "kmip_binData=04_det_auto_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFzmZI909fJgxOykJtvOlv5LsX8z6BxUX2Xg5TsIwOxJMPSC8usm/zR7sZawoVBOuJxtNVLY/8oNP/4pFtAmQo02bUOtTo1yxNz/IZa9x+Q5E=", + "subType": "06" + } + } + }, + "kmip_binData=04_det_explicit_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFzmZI909fJgxOykJtvOlv5LsX8z6BxUX2Xg5TsIwOxJMPSC8usm/zR7sZawoVBOuJxtNVLY/8oNP/4pFtAmQo02bUOtTo1yxNz/IZa9x+Q5E=", + "subType": "06" + } + } + }, + "kmip_binData=04_det_explicit_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFzmZI909fJgxOykJtvOlv5LsX8z6BxUX2Xg5TsIwOxJMPSC8usm/zR7sZawoVBOuJxtNVLY/8oNP/4pFtAmQo02bUOtTo1yxNz/IZa9x+Q5E=", + "subType": "06" + } + } + }, + "kmip_undefined_rand_explicit_id": { + "kms": "kmip", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_rand_explicit_altname": { + "kms": "kmip", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_det_explicit_id": { + "kms": "kmip", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_det_explicit_altname": { + "kms": "kmip", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_objectId_rand_auto_id": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAHZFzE908RuO5deEt3t2QQdT12ybwqbm8D+sMJrdKt2Wp4kVPsw4ocAGGsRYN6VXe46P5fmyG5HqVWn0hkflZnQg==", + "subType": "06" + } + } + }, + "kmip_objectId_rand_auto_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAH3dPKyCCStvOtVGzlgIS33fsl8OAwQblt9i21pOVuLiliY1Tup9EtkSic88+nNEtXnq9gRknRzLthXv/k1ql+7Q==", + "subType": "06" + } + } + }, + "kmip_objectId_rand_explicit_id": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAHcEjxVfHDSfLzFxAuK/rs/Pn/XV7jLkgKXZYeY0PNlRi1MHojN2AvQqI3J2rOvAjuYfikGcpvGPp/goqUbV9HYw==", + "subType": "06" + } + } + }, + "kmip_objectId_rand_explicit_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAHX65sNHnRYpx3VbWPCdQyFe7u0Y5ItabLEduqDeVsPk/iK4X3GjCSHQfw1yPi+CA+/veVpgdonwws6RiYV4ZZ5Q==", + "subType": "06" + } + } + }, + "kmip_objectId_det_auto_id": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAHKU7mcdGEq2WGrDB6TicipLQstAk6G3PkiNt5F3bMavpKLjz04UBrd8aWGVG2gJTTON1UKRztiYFgRvb8f+LK/Q==", + "subType": "06" + } + } + }, + "kmip_objectId_det_explicit_id": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAHKU7mcdGEq2WGrDB6TicipLQstAk6G3PkiNt5F3bMavpKLjz04UBrd8aWGVG2gJTTON1UKRztiYFgRvb8f+LK/Q==", + "subType": "06" + } + } + }, + "kmip_objectId_det_explicit_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAHKU7mcdGEq2WGrDB6TicipLQstAk6G3PkiNt5F3bMavpKLjz04UBrd8aWGVG2gJTTON1UKRztiYFgRvb8f+LK/Q==", + "subType": "06" + } + } + }, + "kmip_bool_rand_auto_id": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAIw/xgJlKEvErmVtue3X3RFsOI2sttAbxnzh1INc9GUQ2vok1VwYt9k88RxMPiOwMAZG7P1MlAdx7zt865onPKOw==", + "subType": "06" + } + } + }, + "kmip_bool_rand_auto_altname": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAIn8IuzlNHbpTgXOd1wEp364zJOBxj2Zf7a9B5osUV1sDY0G1OVpEnuDvZeUsdiUSyRjTTxzyuD/KZlKZ3+qrnrA==", + "subType": "06" + } + } + }, + "kmip_bool_rand_explicit_id": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAI3Nz9PdjUYQRGfTtvYSR8EQuUKFL0wdlEdfSCTBmMBhBPuuF9KxqCgy+ldVu1DRRgg3346DOKEEtE9BJPPInJ6Q==", + "subType": "06" + } + } + }, + "kmip_bool_rand_explicit_altname": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAIEGjqoerIZBk8Rw+YTO7jFKWzagDS8mEpD+9Wm1Q0r0ZHUmV0dQZcIqRV4oUk8U8uHUn0N3t2qGLr+rhUs4GH/g==", + "subType": "06" + } + } + }, + "kmip_bool_det_explicit_id": { + "kms": "kmip", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "kmip_bool_det_explicit_altname": { + "kms": "kmip", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "kmip_date_rand_auto_id": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAJgr0v4xetUXjlLcPcyKv/rzjtWOKp9CZJcm23Noglu5RR/rXJS0qKI+W9MmJ64TMf27KvaJ0UXwfTRrvOC1plCg==", + "subType": "06" + } + } + }, + "kmip_date_rand_auto_altname": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAJoeysAaiPsVK+JL1P1vD/9xF92m5kKidUdn6yklPlSKN4VVEBTymDetTLujULs1u1TlrS71jVLxo3xEwpG/KQvg==", + "subType": "06" + } + } + }, + "kmip_date_rand_explicit_id": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAJVwu4+Su0DktpnZvzTBHYpWbWTq5gho/SLijrcIrFJcvq4YrjjPCXv+odCl95tkH+J1RlJdQ5Cr0umEIazLa6GA==", + "subType": "06" + } + } + }, + "kmip_date_rand_explicit_altname": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAJWTYpjbDkIf82QXHMGrvd0SqhP8cBIakfYJf5aNcNrs86vxRhiG3KwETWPeOOlPZ6n1WjE2bOLB+DJTAxmJvahA==", + "subType": "06" + } + } + }, + "kmip_date_det_auto_id": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAJ/+sQrUqQh+JADSVIKM0d68gDUhDy37M1z1uvROzQw6hUAbQeD0DWdztADKg560UTPM4uOgH4NAyhLyBLMrWWHg==", + "subType": "06" + } + } + }, + "kmip_date_det_explicit_id": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAJ/+sQrUqQh+JADSVIKM0d68gDUhDy37M1z1uvROzQw6hUAbQeD0DWdztADKg560UTPM4uOgH4NAyhLyBLMrWWHg==", + "subType": "06" + } + } + }, + "kmip_date_det_explicit_altname": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAJ/+sQrUqQh+JADSVIKM0d68gDUhDy37M1z1uvROzQw6hUAbQeD0DWdztADKg560UTPM4uOgH4NAyhLyBLMrWWHg==", + "subType": "06" + } + } + }, + "kmip_null_rand_explicit_id": { + "kms": "kmip", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "kmip_null_rand_explicit_altname": { + "kms": "kmip", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "kmip_null_det_explicit_id": { + "kms": "kmip", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "kmip_null_det_explicit_altname": { + "kms": "kmip", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "kmip_regex_rand_auto_id": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAALi8avMfpxSlDsSTqdxO8O2B1M79gOElyUIdXySQo7mvgHlf4oHQ7r94lL9dnsA2t/jmUmBKoGypaUQUSQE+9x+A==", + "subType": "06" + } + } + }, + "kmip_regex_rand_auto_altname": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAALfHerZ/KolaBrb5qi3SpeNVW+i/nh5mkcdtQg5f1pHePr68KryHucM/XDAzbMqrPlag2/41STGYdJqzYO7Mbppg==", + "subType": "06" + } + } + }, + "kmip_regex_rand_explicit_id": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAALOhKDVAN5cuDyB1EuRFWgKKt0wGJ63E5pPY8Tq2TXMNgCxUUc5O+TE+Ux4ls/uMyOBA3gPzND0CZKiru0i7ACUQ==", + "subType": "06" + } + } + }, + "kmip_regex_rand_explicit_altname": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAALK3Hg8xX9gX+d3vKh7aosRP9CS2CIFeG9sapZv3OAPv1eWjY62Cp/G16kJ0BQt33RYD+DzD3gWupfUSyNZR0gng==", + "subType": "06" + } + } + }, + "kmip_regex_det_auto_id": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAALaQXA8rItT7ELVxO8XtAWdHuiXFFPmnMhS5PMrUy/6mRtbq4fvU9dascW7ozonKOh8ad6+MIT7B/STv9dVBF4Kw==", + "subType": "06" + } + } + }, + "kmip_regex_det_explicit_id": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAALaQXA8rItT7ELVxO8XtAWdHuiXFFPmnMhS5PMrUy/6mRtbq4fvU9dascW7ozonKOh8ad6+MIT7B/STv9dVBF4Kw==", + "subType": "06" + } + } + }, + "kmip_regex_det_explicit_altname": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAALaQXA8rItT7ELVxO8XtAWdHuiXFFPmnMhS5PMrUy/6mRtbq4fvU9dascW7ozonKOh8ad6+MIT7B/STv9dVBF4Kw==", + "subType": "06" + } + } + }, + "kmip_dbPointer_rand_auto_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAMoGkfmmUWTI+0aW7jVyCJ5Dgru1SCXBUmJSRzDL0D57pNruQ+79tVVcI6Uz5j87DhZFxShHbPjj583vLOOBNM3WGzZCpqH3serhHTWvXK+NM=", + "subType": "06" + } + } + }, + "kmip_dbPointer_rand_auto_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAMwu1WaRhhv43xgxLNxuenbND9M6mxGtCs9o4J5+yfL95XNB9Daie3RcLlyngz0pncBie6IqjhTycXsxTLQ94Jdg6m5GD5cU541LYKvhbv5f4=", + "subType": "06" + } + } + }, + "kmip_dbPointer_rand_explicit_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAM+CIoCAisUwhhJtWQLolxQGQWafniwYyvaJQHmJC94Uwbf1gPfhMR42v2VtrmIVP0J0BaP/xf0cco2/qWRdKGZpgkK2CK6M972NtnZ/2x03A=", + "subType": "06" + } + } + }, + "kmip_dbPointer_rand_explicit_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAMjbeE9+EaJYjGfeAuxsV8teOdsW8bfnlkvji/tE11Zq89UMGx+oUsZzeLjUgVZ5nxsZKCZjEAq+DPnwFVC+MgqNeqWL7fRChODFlPGH2ZC+8=", + "subType": "06" + } + } + }, + "kmip_dbPointer_det_auto_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAM5B+fjbjYCZzCYUu4N/pJI3srCCXN+OCCHweeweqmpIEmB7yw87bQRIMGtCm6HuekcZ5J5q+nY5AQb0du/wh1YIoOrC3u4w7ZcLHkDmuAJPg=", + "subType": "06" + } + } + }, + "kmip_dbPointer_det_explicit_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAM5B+fjbjYCZzCYUu4N/pJI3srCCXN+OCCHweeweqmpIEmB7yw87bQRIMGtCm6HuekcZ5J5q+nY5AQb0du/wh1YIoOrC3u4w7ZcLHkDmuAJPg=", + "subType": "06" + } + } + }, + "kmip_dbPointer_det_explicit_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAM5B+fjbjYCZzCYUu4N/pJI3srCCXN+OCCHweeweqmpIEmB7yw87bQRIMGtCm6HuekcZ5J5q+nY5AQb0du/wh1YIoOrC3u4w7ZcLHkDmuAJPg=", + "subType": "06" + } + } + }, + "kmip_javascript_rand_auto_id": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAANuzlkWs/c8xArrAxPgYuCeShjj1zCfIMHOTPohspcyNofo9iY3P5MlhEOprZDiS8dBFg6EB7fZDzDdczx6VCN2A==", + "subType": "06" + } + } + }, + "kmip_javascript_rand_auto_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAANwJ72y7UqCBJh1NwVRiE3vU1ex7FMv/X5YWCMuO9MHPMo4g1V5eaO4KfOr+K8+9NtkflgMpeDkvwP92rfR5ud5Q==", + "subType": "06" + } + } + }, + "kmip_javascript_rand_explicit_id": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAANj5q+888itRnLsw9PNGsBLhgqpvem5IJBOE2292r6zwjVueoEK/2I2PesRnn0esnkwdia1ADoMkcLUegwcFRkWQ==", + "subType": "06" + } + } + }, + "kmip_javascript_rand_explicit_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAANnvbnmApys7OIe8LGTsZKDG1F1G1SI/rfZVmF6q1fq5U7feYPp1ejb2t2S2+v7LfcOHytsQWGcYuWCDcl+vosvQ==", + "subType": "06" + } + } + }, + "kmip_javascript_det_auto_id": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAANOR9R/Da8j5iVxllLiGFlv4U/bVn/PyN9/5WeGJkGJeE/j/osKrKx6IL1igI0YVI+pKKzsINqJGIv+bJX0s7MNw==", + "subType": "06" + } + } + }, + "kmip_javascript_det_explicit_id": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAANOR9R/Da8j5iVxllLiGFlv4U/bVn/PyN9/5WeGJkGJeE/j/osKrKx6IL1igI0YVI+pKKzsINqJGIv+bJX0s7MNw==", + "subType": "06" + } + } + }, + "kmip_javascript_det_explicit_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAANOR9R/Da8j5iVxllLiGFlv4U/bVn/PyN9/5WeGJkGJeE/j/osKrKx6IL1igI0YVI+pKKzsINqJGIv+bJX0s7MNw==", + "subType": "06" + } + } + }, + "kmip_symbol_rand_auto_id": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAOe+vXpJSkmBM3WkxZrn4ea9/C6iNyMXWUzkQIzIYlnbkyu8od8nfOdhobUhoFxcKnvdaxN1s5NhJ1FA97RN/upGYN+AI/7cTCElmFSpdSvkI=", + "subType": "06" + } + } + }, + "kmip_symbol_rand_auto_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAOPpCgK6Hc/M2elOJkwIU9J7PZa+h1chody2yvfDu/UlB6T5sxnEZ6aEY/ISNLhJlhsRzuApSgFOmnrcG6Eg9VnSKin2yK0ll+VFxQEDHAcSA=", + "subType": "06" + } + } + }, + "kmip_symbol_rand_explicit_id": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAOVoHX9GaOn71L5D9TpZmmxkx/asr0FHCLG5ZgLLA04yIhZHsDjt2DiVGGO/Mf4KwvoBn7Cf08qMhW7rQh2LgvvSLBO3zbw5l+MZ/bSn+Jylo=", + "subType": "06" + } + } + }, + "kmip_symbol_rand_explicit_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAOPobmcO/I4QObtCUEmGWpSCJ6tlYyhbO59q78LZBucSNl7DSkf/13tOJ9t+WKXACcMKVMmfPoFsgHbVj1nKWULBT07n1OWWDTZkuMD6C2+Fc=", + "subType": "06" + } + } + }, + "kmip_symbol_det_auto_id": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAOPpwX4mafoQJYHuzYfbKW1JunpjpB7Nd2slTC3n8Hsas9wQYf9VkModQhe5M4wZHOIXpehaODRcjKKfKRmpnNBOURSLm/ORJvy+UxtSLsnqo=", + "subType": "06" + } + } + }, + "kmip_symbol_det_explicit_id": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAOPpwX4mafoQJYHuzYfbKW1JunpjpB7Nd2slTC3n8Hsas9wQYf9VkModQhe5M4wZHOIXpehaODRcjKKfKRmpnNBOURSLm/ORJvy+UxtSLsnqo=", + "subType": "06" + } + } + }, + "kmip_symbol_det_explicit_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAOPpwX4mafoQJYHuzYfbKW1JunpjpB7Nd2slTC3n8Hsas9wQYf9VkModQhe5M4wZHOIXpehaODRcjKKfKRmpnNBOURSLm/ORJvy+UxtSLsnqo=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_rand_auto_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAPW2VMMm+EvsYpVtJQhsxgxgvV35kr9nxqKxP2qqIOAOQ58R/1oyYScFkNwB/tw0A1/zdvhoo+ERa7c0tjLIojFrosXhX2N/8Z4VnbZruz0Nk=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_rand_auto_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAPjPq9BQR4EwG/CD+RthOJY04m99LCl/shY6HnaU/QL627kN1dbBAG5vs+MXfa+glg8waVTNgB94vm3j72FMV1ZOKvbl4faWF1Rl2EOpOlR9U=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_rand_explicit_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAPtqebrCAidKzBMvp3B5/vBeetqeCoMKS+vo+hLAYooXrnBunWxwRHpr45XYUvroG3aqOMkLtVZSgw8sO6Y/3z1viO2G0sGQW1ZMoW0/PX5Uw=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_rand_explicit_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAPtkJwXKlq8Fx1f1+9HFofM4uKi6lHQRFRyiOyUFJYxxZY1LR/2WXXTqWz3MWtrcJFCB+QSVOb1N/ieC7AZUboPgIuPJISM3Hu5VU2x/Isbdc=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_det_explicit_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_det_explicit_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_int_rand_auto_id": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAQ50kE7Tby9od2OsmIGZhp9k/mj4vy/YdnmF6YsSPxihbjV1vXGMraI/nGCr+0H1riwzq3m4sCT7aPw2VgiuwKMA==", + "subType": "06" + } + } + }, + "kmip_int_rand_auto_altname": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAQkNL14OSMX/bJbsLtB/UumRoat6QOY7fvwZxRrkXTS3VJVHigthI1cUX7Is/uUsY8oHOfk/ZuHklQkifmfdcklQ==", + "subType": "06" + } + } + }, + "kmip_int_rand_explicit_id": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAQtN2gNVU9Itoj+vgcK/4jEB5baSUH+Qz2WqTY7m0XaA3bPWGFCiWY4Sdw+qovednrSSSbC+azWi1QYclFRraldQ==", + "subType": "06" + } + } + }, + "kmip_int_rand_explicit_altname": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAQk6uBqwXXFF9zEM4bc124goI3pBy2Jdi8Cd0ycKkjXrPG7GVCUm2UMbO+zEzYODeVo35N11g2yMXcv9RVgjWtNA==", + "subType": "06" + } + } + }, + "kmip_int_det_auto_id": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAQgrkPEf+RBZMn/J7HZObqEfus8icYls6ecaUrlabI6v1ALgxLuv23WSIfTr6mqpQCounqdA14DWS/Wl3kSkVC0w==", + "subType": "06" + } + } + }, + "kmip_int_det_explicit_id": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAQgrkPEf+RBZMn/J7HZObqEfus8icYls6ecaUrlabI6v1ALgxLuv23WSIfTr6mqpQCounqdA14DWS/Wl3kSkVC0w==", + "subType": "06" + } + } + }, + "kmip_int_det_explicit_altname": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAQgrkPEf+RBZMn/J7HZObqEfus8icYls6ecaUrlabI6v1ALgxLuv23WSIfTr6mqpQCounqdA14DWS/Wl3kSkVC0w==", + "subType": "06" + } + } + }, + "kmip_timestamp_rand_auto_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAR2Cu3o2e/u5o69MndeZPJU5ngVA1G2MNYn00t+up/GlmaUC1ni1CVl0ZR0EVZ0gCDUrfxwPISPib8y23tNjbsog==", + "subType": "06" + } + } + }, + "kmip_timestamp_rand_auto_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAARgi8stgSQwqnN4Ws2ZBILOREsjreZcS1MBerL7dbGLVfzW99tqECglhGokkrE0aY69L0xMgcAUIaFRN4GanQAPg==", + "subType": "06" + } + } + }, + "kmip_timestamp_rand_explicit_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAARPxEEI8L5Q3Jybu88BLdf31T3uYEUbijgSlKlkTt141RYrlE8nxtiYU5/5H9GXBis0Qq1s2C+MauD2h/cNijTCA==", + "subType": "06" + } + } + }, + "kmip_timestamp_rand_explicit_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAARh/QaU1dnGbii4LtXCpT5o6vencc8E2fzarjJFbSEd0ixW/UV1ppZdvD729d0umkaIwIEVA4q+XVvHfl/ckKPFg==", + "subType": "06" + } + } + }, + "kmip_timestamp_det_auto_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAARqdpLb72mmzb75QBrE+ATMfS5LLqzAD/1g5ScT8zfgh0IHsZZBWCJlSVRNC12Sgr3zdXHMtYp8C3OZT6/tPkQGg==", + "subType": "06" + } + } + }, + "kmip_timestamp_det_explicit_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAARqdpLb72mmzb75QBrE+ATMfS5LLqzAD/1g5ScT8zfgh0IHsZZBWCJlSVRNC12Sgr3zdXHMtYp8C3OZT6/tPkQGg==", + "subType": "06" + } + } + }, + "kmip_timestamp_det_explicit_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAARqdpLb72mmzb75QBrE+ATMfS5LLqzAD/1g5ScT8zfgh0IHsZZBWCJlSVRNC12Sgr3zdXHMtYp8C3OZT6/tPkQGg==", + "subType": "06" + } + } + }, + "kmip_long_rand_auto_id": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAASVv+ClXkh9spIaXWJYRV/o8UZjG+WWWrNpIjZ9LQn2bXakrKJ3REvdkrzGuxASmBhBYTplEyvxVCJwXuWRAGGYw==", + "subType": "06" + } + } + }, + "kmip_long_rand_auto_altname": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAASeAz/dK+Gc4/jx3W07B2rNFvQ0LoyCllFRvRVGu1Xf1NByc4cRZLOMzlr99syz/fifF6WY30bOi5Pani9QtFuGg==", + "subType": "06" + } + } + }, + "kmip_long_rand_explicit_id": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAASP1HD9uoDlwTldaznKxW71JUQcLsa4/cUWzeTnelQwdpohCbZsM8fBZBqgwwTWnjpYY/LBUipC6yhwLKfUXBoBQ==", + "subType": "06" + } + } + }, + "kmip_long_rand_explicit_altname": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAASnGPH77bS/ETB1hn+VTvsBrxEvIHA6EAb8Z2SEz6BHt7SVeI+I7DLERvRVpV5kNJFcKgXDrvRmD+Et0rhSmk9sw==", + "subType": "06" + } + } + }, + "kmip_long_det_auto_id": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAS+zKmtijSTPOEVlpwmaeMIOuzVNuZpV4Jw9zP8Yqa1xYtlItXDozqdibacRaA74KU49KNySdR1T7fxwxa2OOTrQ==", + "subType": "06" + } + } + }, + "kmip_long_det_explicit_id": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAS+zKmtijSTPOEVlpwmaeMIOuzVNuZpV4Jw9zP8Yqa1xYtlItXDozqdibacRaA74KU49KNySdR1T7fxwxa2OOTrQ==", + "subType": "06" + } + } + }, + "kmip_long_det_explicit_altname": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAS+zKmtijSTPOEVlpwmaeMIOuzVNuZpV4Jw9zP8Yqa1xYtlItXDozqdibacRaA74KU49KNySdR1T7fxwxa2OOTrQ==", + "subType": "06" + } + } + }, + "kmip_decimal_rand_auto_id": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAATu/BbCc5Ti9SBlMR2B8zj3Q1yQ16Uob+10LWaT5QKS192IcnBGy4wmmNkIsTys060xUby9KKQF80dVPnjYfqJwEXCe/pVaPQZftE0DolKv78=", + "subType": "06" + } + } + }, + "kmip_decimal_rand_auto_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAATpq6/dtxq2ZUZHrK10aB0YjjPalEaXYcyAyRZjfXWAYCLZdT9sIybjX3Axjxisim+VSHx0QU7oXkKUfcbLgHyjUXj8g9059FHxKFkUsNv4Z8=", + "subType": "06" + } + } + }, + "kmip_decimal_rand_explicit_id": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAATS++9KcfM7uiShZYxRpFPrBJquKv7dyvFRTjnxs6aaaPo0fiqpv6bco/cMLsldEVpWDEA/Tc2HtSXYPp4UJsMfASyBjoxCloL5SaRWyD9Ye8=", + "subType": "06" + } + } + }, + "kmip_decimal_rand_explicit_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAATREcETS5KoAGyj/P45owPrdFfy5ng8Z1ND+F+780lLddOyPeDnIsa7yg6uvhTZ65mHfGLvKcFocclYenq/AX1dY4xdjLRg/AfT088A27ORUA=", + "subType": "06" + } + } + }, + "kmip_decimal_det_explicit_id": { + "kms": "kmip", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_det_explicit_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_minKey_rand_explicit_id": { + "kms": "kmip", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_rand_explicit_altname": { + "kms": "kmip", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_det_explicit_id": { + "kms": "kmip", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_det_explicit_altname": { + "kms": "kmip", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_maxKey_rand_explicit_id": { + "kms": "kmip", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_rand_explicit_altname": { + "kms": "kmip", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_det_explicit_id": { + "kms": "kmip", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_det_explicit_altname": { + "kms": "kmip", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } } } \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus-key-kmip.json b/test/client-side-encryption/corpus/corpus-key-kmip.json new file mode 100644 index 0000000000..7c7069700e --- /dev/null +++ b/test/client-side-encryption/corpus/corpus-key-kmip.json @@ -0,0 +1,32 @@ +{ + "_id": { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "eUYDyB0HuWb+lQgUwO+6qJQyTTDTY2gp9FbemL7ZFo0pvr0x6rm6Ff9OVUTGH6HyMKipaeHdiIJU1dzsLwvqKvi7Beh+U4iaIWX/K0oEg1GOsJc0+Z/in8gNHbGUYLmycHViM3LES3kdt7FdFSUl5rEBHrM71yoNEXImz17QJWMGOuT4x6yoi2pvnaRJwfrI4DjpmnnTrDMac92jgZehbg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "kmip", + "keyId": "1" + }, + "keyAltNames": ["kmip"] +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus-schema.json b/test/client-side-encryption/corpus/corpus-schema.json index f145f712a4..e74bc914f5 100644 --- a/test/client-side-encryption/corpus/corpus-schema.json +++ b/test/client-side-encryption/corpus/corpus-schema.json @@ -5064,6 +5064,1272 @@ "bsonType": "binData" } } + }, + "kmip_double_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "kmip_double_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "kmip_double_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_double_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_string_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "kmip_string_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "kmip_string_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_string_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_string_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + } + } + }, + "kmip_string_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_string_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_object_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "kmip_object_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "kmip_object_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_object_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_array_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "kmip_array_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "kmip_array_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_array_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=00_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=00_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=00_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=00_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=00_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=00_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=00_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=04_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=04_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=04_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=04_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=04_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=04_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=04_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_objectId_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "kmip_objectId_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "kmip_objectId_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_objectId_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_objectId_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "objectId" + } + } + } + }, + "kmip_objectId_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_objectId_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_bool_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "kmip_bool_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "kmip_bool_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_bool_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_date_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "kmip_date_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "kmip_date_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_date_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_date_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "date" + } + } + } + }, + "kmip_date_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_date_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_regex_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "kmip_regex_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "kmip_regex_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_regex_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_regex_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "regex" + } + } + } + }, + "kmip_regex_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_regex_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_dbPointer_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "kmip_dbPointer_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "kmip_dbPointer_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_dbPointer_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_dbPointer_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "dbPointer" + } + } + } + }, + "kmip_dbPointer_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_dbPointer_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascript_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "kmip_javascript_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "kmip_javascript_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascript_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascript_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "javascript" + } + } + } + }, + "kmip_javascript_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascript_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_symbol_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "kmip_symbol_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "kmip_symbol_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_symbol_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_symbol_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "symbol" + } + } + } + }, + "kmip_symbol_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_symbol_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascriptWithScope_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "kmip_javascriptWithScope_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "kmip_javascriptWithScope_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascriptWithScope_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_int_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "kmip_int_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "kmip_int_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_int_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_int_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "int" + } + } + } + }, + "kmip_int_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_int_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_timestamp_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "kmip_timestamp_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "kmip_timestamp_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_timestamp_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_timestamp_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "timestamp" + } + } + } + }, + "kmip_timestamp_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_timestamp_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_long_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "kmip_long_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "kmip_long_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_long_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_long_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "long" + } + } + } + }, + "kmip_long_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_long_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_decimal_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "kmip_decimal_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "kmip_decimal_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_decimal_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } } } } \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus.json b/test/client-side-encryption/corpus/corpus.json index 55bbaf99c2..559711b347 100644 --- a/test/client-side-encryption/corpus/corpus.json +++ b/test/client-side-encryption/corpus/corpus.json @@ -4,6 +4,7 @@ "altname_local": "local", "altname_azure": "azure", "altname_gcp": "gcp", + "altname_kmip": "kmip", "aws_double_rand_auto_id": { "kms": "aws", "type": "double", @@ -6648,6 +6649,1667 @@ "$maxKey": 1 } }, + "kmip_double_rand_auto_id": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_rand_auto_altname": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_rand_explicit_id": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_rand_explicit_altname": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_det_explicit_id": { + "kms": "kmip", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_det_explicit_altname": { + "kms": "kmip", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_string_rand_auto_id": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_rand_auto_altname": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_rand_explicit_id": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_rand_explicit_altname": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_det_auto_id": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_det_explicit_id": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_det_explicit_altname": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "kmip_object_rand_auto_id": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_rand_auto_altname": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_rand_explicit_id": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_rand_explicit_altname": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_det_explicit_id": { + "kms": "kmip", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_det_explicit_altname": { + "kms": "kmip", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_array_rand_auto_id": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_rand_auto_altname": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_rand_explicit_id": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_rand_explicit_altname": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_det_explicit_id": { + "kms": "kmip", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_det_explicit_altname": { + "kms": "kmip", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_binData=00_rand_auto_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_rand_auto_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_rand_explicit_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_rand_explicit_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_det_auto_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_det_explicit_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_det_explicit_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=04_rand_auto_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_rand_auto_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_rand_explicit_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_rand_explicit_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_det_auto_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_det_explicit_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_det_explicit_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_undefined_rand_explicit_id": { + "kms": "kmip", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_rand_explicit_altname": { + "kms": "kmip", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_det_explicit_id": { + "kms": "kmip", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_det_explicit_altname": { + "kms": "kmip", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_objectId_rand_auto_id": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_rand_auto_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_rand_explicit_id": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_rand_explicit_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_det_auto_id": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_det_explicit_id": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_det_explicit_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_bool_rand_auto_id": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": true + }, + "kmip_bool_rand_auto_altname": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": true + }, + "kmip_bool_rand_explicit_id": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": true + }, + "kmip_bool_rand_explicit_altname": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": true + }, + "kmip_bool_det_explicit_id": { + "kms": "kmip", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "kmip_bool_det_explicit_altname": { + "kms": "kmip", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "kmip_date_rand_auto_id": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_rand_auto_altname": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_rand_explicit_id": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_rand_explicit_altname": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_det_auto_id": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_det_explicit_id": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_det_explicit_altname": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_null_rand_explicit_id": { + "kms": "kmip", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "kmip_null_rand_explicit_altname": { + "kms": "kmip", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "kmip_null_det_explicit_id": { + "kms": "kmip", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "kmip_null_det_explicit_altname": { + "kms": "kmip", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "kmip_regex_rand_auto_id": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_rand_auto_altname": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_rand_explicit_id": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_rand_explicit_altname": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_det_auto_id": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_det_explicit_id": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_det_explicit_altname": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_dbPointer_rand_auto_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_rand_auto_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_rand_explicit_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_rand_explicit_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_det_auto_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_det_explicit_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_det_explicit_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_javascript_rand_auto_id": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_rand_auto_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_rand_explicit_id": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_rand_explicit_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_det_auto_id": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_det_explicit_id": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_det_explicit_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_symbol_rand_auto_id": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_rand_auto_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_rand_explicit_id": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_rand_explicit_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_det_auto_id": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_det_explicit_id": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_det_explicit_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_javascriptWithScope_rand_auto_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_rand_auto_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_rand_explicit_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_rand_explicit_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_det_explicit_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_det_explicit_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_int_rand_auto_id": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_rand_auto_altname": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_rand_explicit_id": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_rand_explicit_altname": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_det_auto_id": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_det_explicit_id": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_det_explicit_altname": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_timestamp_rand_auto_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_rand_auto_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_rand_explicit_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_rand_explicit_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_det_auto_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_det_explicit_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_det_explicit_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_long_rand_auto_id": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_rand_auto_altname": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_rand_explicit_id": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_rand_explicit_altname": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_det_auto_id": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_det_explicit_id": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_det_explicit_altname": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_decimal_rand_auto_id": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_rand_auto_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_rand_explicit_id": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_rand_explicit_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_det_explicit_id": { + "kms": "kmip", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_det_explicit_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_minKey_rand_explicit_id": { + "kms": "kmip", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_rand_explicit_altname": { + "kms": "kmip", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_det_explicit_id": { + "kms": "kmip", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_det_explicit_altname": { + "kms": "kmip", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_maxKey_rand_explicit_id": { + "kms": "kmip", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_rand_explicit_altname": { + "kms": "kmip", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_det_explicit_id": { + "kms": "kmip", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_det_explicit_altname": { + "kms": "kmip", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, "payload=0,algo=rand": { "kms": "local", "type": "string", diff --git a/test/client-side-encryption/spec/azureKMS.json b/test/client-side-encryption/spec/azureKMS.json index f0f5329d70..afecf40b0a 100644 --- a/test/client-side-encryption/spec/azureKMS.json +++ b/test/client-side-encryption/spec/azureKMS.json @@ -64,6 +64,20 @@ "bsonType": "string", "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } } }, "bsonType": "object" diff --git a/test/client-side-encryption/spec/gcpKMS.json b/test/client-side-encryption/spec/gcpKMS.json index 297d5d0dc8..c2c08b8a23 100644 --- a/test/client-side-encryption/spec/gcpKMS.json +++ b/test/client-side-encryption/spec/gcpKMS.json @@ -64,6 +64,20 @@ "bsonType": "string", "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } } }, "bsonType": "object" diff --git a/test/client-side-encryption/spec/kmipKMS.json b/test/client-side-encryption/spec/kmipKMS.json new file mode 100644 index 0000000000..5749d21ab8 --- /dev/null +++ b/test/client-side-encryption/spec/kmipKMS.json @@ -0,0 +1,223 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "eUYDyB0HuWb+lQgUwO+6qJQyTTDTY2gp9FbemL7ZFo0pvr0x6rm6Ff9OVUTGH6HyMKipaeHdiIJU1dzsLwvqKvi7Beh+U4iaIWX/K0oEg1GOsJc0+Z/in8gNHbGUYLmycHViM3LES3kdt7FdFSUl5rEBHrM71yoNEXImz17QJWMGOuT4x6yoi2pvnaRJwfrI4DjpmnnTrDMac92jgZehbg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "kmip", + "keyId": "1" + }, + "keyAltNames": [ + "altname", + "kmip_altname" + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using KMIP KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "kmip": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_kmip": "string0" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_kmip": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6UCKCwtKFIsL8eKObDVxvqGupJNUk7kXswHhB7G5j/C1D+6no+Asra0KgSU43bTL3ooIBLVyIzbV5CDJYqzAsa4WQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string_kmip": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6UCKCwtKFIsL8eKObDVxvqGupJNUk7kXswHhB7G5j/C1D+6no+Asra0KgSU43bTL3ooIBLVyIzbV5CDJYqzAsa4WQ==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/test_encryption.py b/test/test_encryption.py index d94fcf3469..72e7dbbf1c 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -17,11 +17,12 @@ import base64 import copy import os +import re import ssl -import traceback import socket import sys import textwrap +import traceback import uuid sys.path[0:0] = [""] @@ -516,6 +517,10 @@ def test_with_statement(self): 'email': os.environ.get('FLE_GCP_EMAIL', ''), 'privateKey': os.environ.get('FLE_GCP_PRIVATEKEY', '')} +KMIP = {'endpoint': os.environ.get('FLE_KMIP_ENDPOINT', 'localhost:5698')} +KMS_TLS_OPTS = {'kmip': {'tlsCAFile': CA_PEM, + 'tlsCertificateKeyFile': CLIENT_PEM}} + class TestSpec(SpecRunner): @@ -550,6 +555,9 @@ def parse_auto_encrypt_opts(self, opts): kms_providers['gcp'] = GCP_CREDS if not any(AZURE_CREDS.values()): self.skipTest('GCP environment credentials are not set') + if 'kmip' in kms_providers: + kms_providers['kmip'] = KMIP + opts['kms_tls_options'] = KMS_TLS_OPTS if 'key_vault_namespace' not in opts: opts['key_vault_namespace'] = 'keyvault.datakeys' opts = dict(opts) @@ -631,6 +639,13 @@ def run_scenario(self): b'Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ' b'5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk') +ALL_KMS_PROVIDERS = { + 'aws': AWS_CREDS, + 'azure': AZURE_CREDS, + 'gcp': GCP_CREDS, + 'kmip': KMIP, + 'local': {'key': LOCAL_MASTER_KEY}} + LOCAL_KEY_ID = Binary( base64.b64decode(b'LOCALAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) AWS_KEY_ID = Binary( @@ -639,6 +654,8 @@ def run_scenario(self): base64.b64decode(b'AZUREAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) GCP_KEY_ID = Binary( base64.b64decode(b'GCPAAAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) +KMIP_KEY_ID = Binary( + base64.b64decode(b'KMIPAAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) def create_with_schema(coll, json_schema): @@ -661,10 +678,7 @@ def create_key_vault(vault, *data_keys): class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): - KMS_PROVIDERS = {'aws': AWS_CREDS, - 'azure': AZURE_CREDS, - 'gcp': GCP_CREDS, - 'local': {'key': LOCAL_MASTER_KEY}} + KMS_PROVIDERS = ALL_KMS_PROVIDERS MASTER_KEYS = { 'aws': { @@ -679,6 +693,7 @@ class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): 'location': 'global', 'keyRing': 'key-ring-csfle', 'keyName': 'key-name-csfle'}, + 'kmip': {}, 'local': None } @@ -710,11 +725,13 @@ def setUpClass(cls): } } opts = AutoEncryptionOpts( - cls.KMS_PROVIDERS, 'keyvault.datakeys', schema_map=schemas) + cls.KMS_PROVIDERS, 'keyvault.datakeys', schema_map=schemas, + kms_tls_options=KMS_TLS_OPTS) cls.client_encrypted = rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation='standard') cls.client_encryption = ClientEncryption( - cls.KMS_PROVIDERS, 'keyvault.datakeys', cls.client, OPTS) + cls.KMS_PROVIDERS, 'keyvault.datakeys', cls.client, OPTS, + kms_tls_options=KMS_TLS_OPTS) @classmethod def tearDownClass(cls): @@ -784,6 +801,9 @@ def test_data_key_azure(self): def test_data_key_gcp(self): self.run_test('gcp') + def test_data_key_kmip(self): + self.run_test('kmip') + class TestExternalKeyVault(EncryptionIntegrationTest): @@ -882,10 +902,7 @@ def setUpClass(cls): @staticmethod def kms_providers(): - return {'aws': AWS_CREDS, - 'azure': AZURE_CREDS, - 'gcp': GCP_CREDS, - 'local': {'key': LOCAL_MASTER_KEY}} + return ALL_KMS_PROVIDERS @staticmethod def fix_up_schema(json_schema): @@ -923,7 +940,8 @@ def _test_corpus(self, opts): json_data('corpus', 'corpus-key-local.json'), json_data('corpus', 'corpus-key-aws.json'), json_data('corpus', 'corpus-key-azure.json'), - json_data('corpus', 'corpus-key-gcp.json')) + json_data('corpus', 'corpus-key-gcp.json'), + json_data('corpus', 'corpus-key-kmip.json')) self.addCleanup(vault.drop) client_encrypted = rs_or_single_client( @@ -932,7 +950,7 @@ def _test_corpus(self, opts): client_encryption = ClientEncryption( self.kms_providers(), 'keyvault.datakeys', client_context.client, - OPTS) + OPTS, kms_tls_options=KMS_TLS_OPTS) self.addCleanup(client_encryption.close) corpus = self.fix_up_curpus(json_data('corpus', 'corpus.json')) @@ -940,7 +958,7 @@ def _test_corpus(self, opts): for key, value in corpus.items(): corpus_copied[key] = copy.deepcopy(value) if key in ('_id', 'altname_aws', 'altname_azure', 'altname_gcp', - 'altname_local'): + 'altname_local', 'altname_kmip'): continue if value['method'] == 'auto': continue @@ -948,7 +966,7 @@ def _test_corpus(self, opts): identifier = value['identifier'] self.assertIn(identifier, ('id', 'altname')) kms = value['kms'] - self.assertIn(kms, ('local', 'aws', 'azure', 'gcp')) + self.assertIn(kms, ('local', 'aws', 'azure', 'gcp', 'kmip')) if identifier == 'id': if kms == 'local': kwargs = dict(key_id=LOCAL_KEY_ID) @@ -956,8 +974,10 @@ def _test_corpus(self, opts): kwargs = dict(key_id=AWS_KEY_ID) elif kms == 'azure': kwargs = dict(key_id=AZURE_KEY_ID) - else: + elif kms == 'gcp': kwargs = dict(key_id=GCP_KEY_ID) + else: + kwargs = dict(key_id=KMIP_KEY_ID) else: kwargs = dict(key_alt_name=kms) @@ -990,7 +1010,7 @@ def _test_corpus(self, opts): corpus_encrypted_actual = coll.find_one() for key, value in corpus_encrypted_actual.items(): if key in ('_id', 'altname_aws', 'altname_azure', - 'altname_gcp', 'altname_local'): + 'altname_gcp', 'altname_local', 'altname_kmip'): continue if value['algo'] == 'det': @@ -1011,7 +1031,8 @@ def _test_corpus(self, opts): self.assertEqual(value['value'], corpus[key]['value'], key) def test_corpus(self): - opts = AutoEncryptionOpts(self.kms_providers(), 'keyvault.datakeys') + opts = AutoEncryptionOpts(self.kms_providers(), 'keyvault.datakeys', + kms_tls_options=KMS_TLS_OPTS) self._test_corpus(opts) def test_corpus_local_schema(self): @@ -1019,7 +1040,8 @@ def test_corpus_local_schema(self): schemas = {'db.coll': self.fix_up_schema( json_data('corpus', 'corpus-schema.json'))} opts = AutoEncryptionOpts( - self.kms_providers(), 'keyvault.datakeys', schema_map=schemas) + self.kms_providers(), 'keyvault.datakeys', schema_map=schemas, + kms_tls_options=KMS_TLS_OPTS) self._test_corpus(opts) @@ -1142,21 +1164,26 @@ def setUpClass(cls): def setUp(self): kms_providers = {'aws': AWS_CREDS, 'azure': AZURE_CREDS, - 'gcp': GCP_CREDS} + 'gcp': GCP_CREDS, + 'kmip': KMIP} self.client_encryption = ClientEncryption( kms_providers=kms_providers, key_vault_namespace='keyvault.datakeys', key_vault_client=client_context.client, - codec_options=OPTS) + codec_options=OPTS, + kms_tls_options=KMS_TLS_OPTS) kms_providers_invalid = copy.deepcopy(kms_providers) kms_providers_invalid['azure']['identityPlatformEndpoint'] = 'example.com:443' kms_providers_invalid['gcp']['endpoint'] = 'example.com:443' + kms_providers_invalid['kmip']['endpoint'] = 'doesnotexist.local:5698' self.client_encryption_invalid = ClientEncryption( kms_providers=kms_providers_invalid, key_vault_namespace='keyvault.datakeys', key_vault_client=client_context.client, - codec_options=OPTS) + codec_options=OPTS, + kms_tls_options=KMS_TLS_OPTS) + self._kmip_host_error = '' def tearDown(self): self.client_encryption.close() @@ -1289,6 +1316,41 @@ def test_09_gcp_invalid_endpoint(self): self.client_encryption.create_data_key( 'gcp', master_key=master_key) + def kmip_host_error(self): + if self._kmip_host_error: + return self._kmip_host_error + # The full error should be something like: + # "[Errno 8] nodename nor servname provided, or not known" + try: + socket.getaddrinfo('doesnotexist.local', 5698, socket.AF_INET, + socket.SOCK_STREAM) + except Exception as exc: + self._kmip_host_error = re.escape(str(exc)) + return self._kmip_host_error + + def test_10_kmip_invalid_endpoint(self): + key = {'keyId': '1'} + self.run_test_expected_success('kmip', key) + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error()): + self.client_encryption_invalid.create_data_key('kmip', key) + + def test_11_kmip_master_key_endpoint(self): + key = {'keyId': '1', 'endpoint': KMIP['endpoint']} + self.run_test_expected_success('kmip', key) + # Override invalid endpoint: + data_key_id = self.client_encryption_invalid.create_data_key( + 'kmip', master_key=key) + encrypted = self.client_encryption_invalid.encrypt( + 'test', Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=data_key_id) + self.assertEqual( + 'test', self.client_encryption_invalid.decrypt(encrypted)) + + def test_12_kmip_master_key_invalid_endpoint(self): + key = {'keyId': '1', 'endpoint': 'doesnotexist.local:5698'} + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error()): + self.client_encryption.create_data_key('kmip', key) + class AzureGCPEncryptionTestMixin(object): DEK = None @@ -1709,5 +1771,143 @@ def test_invalid_hostname_in_kms_certificate(self): self.client_encrypted.create_data_key('aws', master_key=key) +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#kms-tls-options-tests +class TestKmsTLSOptions(EncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), + 'AWS environment credentials are not set') + @unittest.skipIf(sys.version_info[:2] >= (3, 10) and + sys.platform == 'win32', + 'These tests hang with Python 3.10 on Windows') + def setUp(self): + super(TestKmsTLSOptions, self).setUp() + # 1, create client with only tlsCAFile. + providers = copy.deepcopy(ALL_KMS_PROVIDERS) + providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8002' + providers['gcp']['endpoint'] = '127.0.0.1:8002' + kms_tls_opts_ca_only = { + 'aws': {'tlsCAFile': CA_PEM}, + 'azure': {'tlsCAFile': CA_PEM}, + 'gcp': {'tlsCAFile': CA_PEM}, + 'kmip': {'tlsCAFile': CA_PEM}, + } + self.client_encryption_no_client_cert = ClientEncryption( + providers, 'keyvault.datakeys', self.client, OPTS, + kms_tls_options=kms_tls_opts_ca_only) + self.addCleanup(self.client_encryption_no_client_cert.close) + # 2, same providers as above but with tlsCertificateKeyFile. + kms_tls_opts = copy.deepcopy(kms_tls_opts_ca_only) + for p in kms_tls_opts: + kms_tls_opts[p]['tlsCertificateKeyFile'] = CLIENT_PEM + self.client_encryption_with_tls = ClientEncryption( + providers, 'keyvault.datakeys', self.client, OPTS, + kms_tls_options=kms_tls_opts) + self.addCleanup(self.client_encryption_with_tls.close) + # 3, update endpoints to expired host. + providers = copy.deepcopy(providers) + providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8000' + providers['gcp']['endpoint'] = '127.0.0.1:8000' + providers['kmip']['endpoint'] = '127.0.0.1:8000' + self.client_encryption_expired = ClientEncryption( + providers, 'keyvault.datakeys', self.client, OPTS, + kms_tls_options=kms_tls_opts_ca_only) + self.addCleanup(self.client_encryption_expired.close) + # 3, update endpoints to invalid host. + providers = copy.deepcopy(providers) + providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8001' + providers['gcp']['endpoint'] = '127.0.0.1:8001' + providers['kmip']['endpoint'] = '127.0.0.1:8001' + self.client_encryption_invalid_hostname = ClientEncryption( + providers, 'keyvault.datakeys', self.client, OPTS, + kms_tls_options=kms_tls_opts_ca_only) + self.addCleanup(self.client_encryption_invalid_hostname.close) + # Errors when client has no cert, some examples: + # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) + self.cert_error = 'certificate required|SSL handshake failed' + # On Windows this error might be: + # [WinError 10054] An existing connection was forcibly closed by the remote host + if sys.platform == 'win32': + self.cert_error += '|forcibly closed' + # On Windows Python 3.10+ this error might be: + # EOF occurred in violation of protocol (_ssl.c:2384) + if sys.version_info[:2] >= (3, 10): + self.cert_error += '|forcibly closed' + + def test_01_aws(self): + key = { + 'region': 'us-east-1', + 'key': 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0', + 'endpoint': '127.0.0.1:8002', + } + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_no_client_cert.create_data_key('aws', key) + # "parse error" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, 'parse error'): + self.client_encryption_with_tls.create_data_key('aws', key) + # Some examples: + # certificate verify failed: certificate has expired (_ssl.c:1129) + # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) + key['endpoint'] = '127.0.0.1:8000' + with self.assertRaisesRegex( + EncryptionError, 'expired|certificate verify failed'): + self.client_encryption_expired.create_data_key('aws', key) + # Some examples: + # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" + # hostname '127.0.0.1' doesn't match 'wronghost.com' + key['endpoint'] = '127.0.0.1:8001' + with self.assertRaisesRegex( + EncryptionError, 'IP address mismatch|wronghost'): + self.client_encryption_invalid_hostname.create_data_key('aws', key) + + def test_02_azure(self): + key = {'keyVaultEndpoint': 'doesnotexist.local', 'keyName': 'foo'} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_no_client_cert.create_data_key('azure', key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, 'HTTP status=404'): + self.client_encryption_with_tls.create_data_key('azure', key) + # Expired cert error. + with self.assertRaisesRegex( + EncryptionError, 'expired|certificate verify failed'): + self.client_encryption_expired.create_data_key('azure', key) + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, 'IP address mismatch|wronghost'): + self.client_encryption_invalid_hostname.create_data_key( + 'azure', key) + + def test_03_gcp(self): + key = {'projectId': 'foo', 'location': 'bar', 'keyRing': 'baz', + 'keyName': 'foo'} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_no_client_cert.create_data_key('gcp', key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, 'HTTP status=404'): + self.client_encryption_with_tls.create_data_key('gcp', key) + # Expired cert error. + with self.assertRaisesRegex( + EncryptionError, 'expired|certificate verify failed'): + self.client_encryption_expired.create_data_key('gcp', key) + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, 'IP address mismatch|wronghost'): + self.client_encryption_invalid_hostname.create_data_key('gcp', key) + + def test_04_kmip(self): + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_no_client_cert.create_data_key('kmip') + self.client_encryption_with_tls.create_data_key('kmip') + # Expired cert error. + with self.assertRaisesRegex( + EncryptionError, 'expired|certificate verify failed'): + self.client_encryption_expired.create_data_key('kmip') + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, 'IP address mismatch|wronghost'): + self.client_encryption_invalid_hostname.create_data_key('kmip') + + if __name__ == "__main__": unittest.main() From 278a50d4b0365206412044be450e5cdda46a1b65 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 16 Nov 2021 12:51:36 -0800 Subject: [PATCH 0007/1588] PYTHON-3005 Make maxConnecting configurable (#789) --- doc/changelog.rst | 2 + pymongo/client_options.py | 2 + pymongo/common.py | 1 + pymongo/mongo_client.py | 5 +- ...kout-custom-maxConnecting-is-enforced.json | 81 +++++++++++++++++++ test/uri_options/connection-pool-options.json | 23 +++++- 6 files changed, 111 insertions(+), 3 deletions(-) create mode 100644 test/cmap/pool-checkout-custom-maxConnecting-is-enforced.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 3b3a700b3b..3b46667cee 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -185,6 +185,8 @@ Notable improvements - Enhanced connection pooling to create connections more efficiently and avoid connection storms. +- Added the ``maxConnecting`` URI and + :class:`~pymongo.mongo_client.MongoClient` keyword argument. - :class:`~pymongo.mongo_client.MongoClient` now accepts a URI and keyword argument `srvMaxHosts` that limits the number of mongos-like hosts a client will connect to. More specifically, when a mongodb+srv:// connection string diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 845d4ef9a1..f7dbf255bc 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -138,6 +138,7 @@ def _parse_pool_options(options): options.get('zlibcompressionlevel', -1)) ssl_context, tls_allow_invalid_hostnames = _parse_ssl_options(options) load_balanced = options.get('loadbalanced') + max_connecting = options.get('maxconnecting', common.MAX_CONNECTING) return PoolOptions(max_pool_size, min_pool_size, max_idle_time_seconds, @@ -148,6 +149,7 @@ def _parse_pool_options(options): appname, driver, compression_settings, + max_connecting=max_connecting, server_api=server_api, load_balanced=load_balanced) diff --git a/pymongo/common.py b/pymongo/common.py index 5dd7b180c0..772f2f299b 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -615,6 +615,7 @@ def validate_auto_encryption_opts_or_none(option, value): 'journal': validate_boolean_or_string, 'localthresholdms': validate_positive_float_or_zero, 'maxidletimems': validate_timeout_or_none, + 'maxconnecting': validate_positive_integer, 'maxpoolsize': validate_non_negative_integer_or_none, 'maxstalenessseconds': validate_max_staleness, 'readconcernlevel': validate_string_or_none, diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index a308219cfb..1f8b781487 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -216,6 +216,8 @@ def __init__( - `maxIdleTimeMS` (optional): The maximum number of milliseconds that a connection can remain idle in the pool before being removed and replaced. Defaults to `None` (no limit). + - `maxConnecting` (optional): The maximum number of connections that + each pool can establish concurrently. Defaults to `2`. - `socketTimeoutMS`: (integer or None) Controls how long (in milliseconds) the driver will wait for a response after sending an ordinary (non-monitoring) database operation before concluding that @@ -506,7 +508,8 @@ def __init__( arguments. The default for `uuidRepresentation` was changed from ``pythonLegacy`` to ``unspecified``. - Added the ``srvServiceName`` URI and keyword argument. + Added the ``srvServiceName`` and ``maxConnecting`` URI and keyword + argument. .. versionchanged:: 3.12 Added the ``server_api`` keyword argument. diff --git a/test/cmap/pool-checkout-custom-maxConnecting-is-enforced.json b/test/cmap/pool-checkout-custom-maxConnecting-is-enforced.json new file mode 100644 index 0000000000..6620f82fd9 --- /dev/null +++ b/test/cmap/pool-checkout-custom-maxConnecting-is-enforced.json @@ -0,0 +1,81 @@ +{ + "version": 1, + "style": "integration", + "description": "custom maxConnecting is enforced", + "runOn": [ + { + "minServerVersion": "4.4.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 500 + } + }, + "poolOptions": { + "maxConnecting": 1, + "maxPoolSize": 2, + "waitQueueTimeoutMS": 5000 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 1 + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "waitForEvent", + "event": "ConnectionReady", + "count": 2 + } + ], + "events": [ + { + "type": "ConnectionCreated" + }, + { + "type": "ConnectionReady" + }, + { + "type": "ConnectionCreated" + }, + { + "type": "ConnectionReady" + } + ], + "ignore": [ + "ConnectionCheckOutStarted", + "ConnectionCheckedIn", + "ConnectionCheckedOut", + "ConnectionClosed", + "ConnectionPoolCreated", + "ConnectionPoolReady" + ] +} diff --git a/test/uri_options/connection-pool-options.json b/test/uri_options/connection-pool-options.json index aae16190ba..118b2f6783 100644 --- a/test/uri_options/connection-pool-options.json +++ b/test/uri_options/connection-pool-options.json @@ -2,7 +2,7 @@ "tests": [ { "description": "Valid connection pool options are parsed correctly", - "uri": "mongodb://example.com/?maxIdleTimeMS=50000&maxPoolSize=5&minPoolSize=3", + "uri": "mongodb://example.com/?maxIdleTimeMS=50000&maxPoolSize=5&minPoolSize=3&maxConnecting=1", "valid": true, "warning": false, "hosts": null, @@ -10,7 +10,8 @@ "options": { "maxIdleTimeMS": 50000, "maxPoolSize": 5, - "minPoolSize": 3 + "minPoolSize": 3, + "maxConnecting": 1 } }, { @@ -52,6 +53,24 @@ "options": { "minPoolSize": 0 } + }, + { + "description": "maxConnecting=0 causes a warning", + "uri": "mongodb://example.com/?maxConnecting=0", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "maxConnecting<0 causes a warning", + "uri": "mongodb://example.com/?maxConnecting=-1", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} } ] } From a655c576c9902ddd928f1c425037d38d50fddd92 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Tue, 16 Nov 2021 14:23:55 -0800 Subject: [PATCH 0008/1588] PYTHON-3015 Document cipher mismatch issues (#791) --- doc/examples/tls.rst | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 2f72555d7a..03ac63a633 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -213,3 +213,21 @@ revocation checking failed:: [('SSL routines', 'tls_process_initial_server_flight', 'invalid status response')] See :ref:`OCSP` for more details. + +Python 3.10+ incompatibilities with TLS/SSL on MongoDB <= 4.0 +............................................................. + +Note that `changes made to the ssl module in Python 3.10+ +`_ may cause incompatibilities +with MongoDB <= 4.0. The following are some example errors that may occur with this +combination:: + + SSL handshake failed: localhost:27017: [SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] sslv3 alert handshake failure (_ssl.c:997) + SSL handshake failed: localhost:27017: EOF occurred in violation of protocol (_ssl.c:997) + +The MongoDB server logs may show the following error:: + + 2021-06-30T21:22:44.917+0100 E NETWORK [conn16] SSL: error:1408A0C1:SSL routines:ssl3_get_client_hello:no shared cipher + +To resolve this issue, use Python <=3.10, upgrade to MongoDB 4.2+, or install +pymongo with the :ref:`OCSP` extra which relies on PyOpenSSL. From 99aab1b0ba71afaa278882b860af9cb0e3163087 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 16 Nov 2021 14:34:36 -0800 Subject: [PATCH 0009/1588] PYTHON-3017 Properly check for closed KMS connections (#790) --- pymongo/encryption.py | 2 ++ test/test_encryption.py | 8 +++----- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 117666ac82..cb4080397f 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -129,6 +129,8 @@ def kms_request(self, kms_context): conn.sendall(message) while kms_context.bytes_needed > 0: data = conn.recv(kms_context.bytes_needed) + if not data: + raise OSError('KMS connection closed') kms_context.feed(data) finally: conn.close() diff --git a/test/test_encryption.py b/test/test_encryption.py index 72e7dbbf1c..f77d3fffc7 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1775,9 +1775,6 @@ def test_invalid_hostname_in_kms_certificate(self): class TestKmsTLSOptions(EncryptionIntegrationTest): @unittest.skipUnless(any(AWS_CREDS.values()), 'AWS environment credentials are not set') - @unittest.skipIf(sys.version_info[:2] >= (3, 10) and - sys.platform == 'win32', - 'These tests hang with Python 3.10 on Windows') def setUp(self): super(TestKmsTLSOptions, self).setUp() # 1, create client with only tlsCAFile. @@ -1822,7 +1819,8 @@ def setUp(self): self.addCleanup(self.client_encryption_invalid_hostname.close) # Errors when client has no cert, some examples: # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) - self.cert_error = 'certificate required|SSL handshake failed' + self.cert_error = ('certificate required|SSL handshake failed|' + 'KMS connection closed') # On Windows this error might be: # [WinError 10054] An existing connection was forcibly closed by the remote host if sys.platform == 'win32': @@ -1830,7 +1828,7 @@ def setUp(self): # On Windows Python 3.10+ this error might be: # EOF occurred in violation of protocol (_ssl.c:2384) if sys.version_info[:2] >= (3, 10): - self.cert_error += '|forcibly closed' + self.cert_error += '|EOF' def test_01_aws(self): key = { From 8fc6dc3c4ffd358005bd3cae8a6c13f0d85a31cc Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 16 Nov 2021 16:26:18 -0800 Subject: [PATCH 0010/1588] PYTHON-1489 Merge ajdavis/pymongo-mockup-tests into pymongo (#787) --- .evergreen/config.yml | 2 +- .evergreen/run-mockupdb-tests.sh | 14 +- test/mockupdb/operations.py | 148 ++++++++ test/mockupdb/test_auth_recovering_member.py | 50 +++ test/mockupdb/test_cluster_time.py | 193 ++++++++++ test/mockupdb/test_cursor_namespace.py | 122 +++++++ test/mockupdb/test_getmore_sharded.py | 62 ++++ test/mockupdb/test_handshake.py | 172 +++++++++ test/mockupdb/test_initial_ismaster.py | 46 +++ test/mockupdb/test_legacy_crud.py | 126 +++++++ test/mockupdb/test_list_indexes.py | 78 ++++ test/mockupdb/test_max_staleness.py | 67 ++++ test/mockupdb/test_mixed_version_sharded.py | 89 +++++ .../mockupdb/test_mongos_command_read_mode.py | 125 +++++++ .../test_network_disconnect_primary.py | 87 +++++ test/mockupdb/test_op_msg.py | 340 ++++++++++++++++++ test/mockupdb/test_op_msg_read_preference.py | 197 ++++++++++ test/mockupdb/test_projection.py | 56 +++ test/mockupdb/test_query_read_pref_sharded.py | 107 ++++++ test/mockupdb/test_reset_and_request_check.py | 145 ++++++++ test/mockupdb/test_slave_okay_rs.py | 79 ++++ test/mockupdb/test_slave_okay_sharded.py | 101 ++++++ test/mockupdb/test_slave_okay_single.py | 104 ++++++ test/mockupdb/test_starting_from_overflow.py | 76 ++++ 24 files changed, 2577 insertions(+), 9 deletions(-) mode change 100644 => 100755 .evergreen/run-mockupdb-tests.sh create mode 100644 test/mockupdb/operations.py create mode 100755 test/mockupdb/test_auth_recovering_member.py create mode 100644 test/mockupdb/test_cluster_time.py create mode 100644 test/mockupdb/test_cursor_namespace.py create mode 100644 test/mockupdb/test_getmore_sharded.py create mode 100644 test/mockupdb/test_handshake.py create mode 100644 test/mockupdb/test_initial_ismaster.py create mode 100755 test/mockupdb/test_legacy_crud.py create mode 100644 test/mockupdb/test_list_indexes.py create mode 100644 test/mockupdb/test_max_staleness.py create mode 100644 test/mockupdb/test_mixed_version_sharded.py create mode 100644 test/mockupdb/test_mongos_command_read_mode.py create mode 100755 test/mockupdb/test_network_disconnect_primary.py create mode 100755 test/mockupdb/test_op_msg.py create mode 100644 test/mockupdb/test_op_msg_read_preference.py create mode 100644 test/mockupdb/test_projection.py create mode 100644 test/mockupdb/test_query_read_pref_sharded.py create mode 100755 test/mockupdb/test_reset_and_request_check.py create mode 100644 test/mockupdb/test_slave_okay_rs.py create mode 100644 test/mockupdb/test_slave_okay_sharded.py create mode 100644 test/mockupdb/test_slave_okay_single.py create mode 100644 test/mockupdb/test_starting_from_overflow.py diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 16be7f882a..d4c95105bf 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -346,7 +346,7 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-mockupdb-tests.sh + PYTHON_BINARY=${PYTHON_BINARY} bash ${PROJECT_DIRECTORY}/.evergreen/run-mockupdb-tests.sh "run doctests": - command: shell.exec diff --git a/.evergreen/run-mockupdb-tests.sh b/.evergreen/run-mockupdb-tests.sh old mode 100644 new mode 100755 index d833fdea82..a0b67302a4 --- a/.evergreen/run-mockupdb-tests.sh +++ b/.evergreen/run-mockupdb-tests.sh @@ -1,20 +1,18 @@ #!/bin/bash - +# Must be run from pymongo repo root set -o xtrace set -o errexit . .evergreen/utils.sh ${PYTHON_BINARY} setup.py clean -cd .. createvirtualenv ${PYTHON_BINARY} mockuptests -trap "deactivatei, rm -rf mockuptests" EXIT HUP +trap "deactivate, rm -rf mockuptests" EXIT HUP # Install PyMongo from git clone so mockup-tests don't # download it from pypi. -python -m pip install ${PROJECT_DIRECTORY} - -git clone https://github.com/ajdavis/pymongo-mockup-tests.git -cd pymongo-mockup-tests -python setup.py test +python -m pip install . +python -m pip install --upgrade 'https://github.com/ajdavis/mongo-mockup-db/archive/master.zip' +cd ./test/mockupdb +python -m unittest discover -v diff --git a/test/mockupdb/operations.py b/test/mockupdb/operations.py new file mode 100644 index 0000000000..2c8701ae83 --- /dev/null +++ b/test/mockupdb/operations.py @@ -0,0 +1,148 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"),; +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import namedtuple + +from mockupdb import * +from pymongo import ReadPreference + +__all__ = ['operations', 'upgrades'] + + +Operation = namedtuple( + 'operation', + ['name', 'function', 'reply', 'op_type', 'not_master']) +"""Client operations on MongoDB. + +Each has a human-readable name, a function that actually executes a test, and +a type that maps to one of the types in the Server Selection Spec: +'may-use-secondary', 'must-use-primary', etc. + +The special type 'always-use-secondary' applies to an operation with an explicit +read mode, like the operation "command('c', read_preference=SECONDARY)". + +The not-master response is how a secondary responds to a must-use-primary op, +or how a recovering member responds to a may-use-secondary op. + +Example uses: + +We can use "find_one" to validate that the SlaveOk bit is set when querying a +standalone, even with mode PRIMARY, but that it isn't set when sent to a mongos +with mode PRIMARY. Or it can validate that "$readPreference" is included in +mongos queries except with mode PRIMARY or SECONDARY_PREFERRED (PYTHON-865). + +We can use "options_old" and "options_new" to test that the driver queries an +old server's system.namespaces collection, but uses the listCollections command +on a new server (PYTHON-857). + +"secondary command" is good to test that the client can direct reads to +secondaries in a replica set, or select a mongos for secondary reads in a +sharded cluster (PYTHON-868). +""" + +not_master_reply_to_query = OpReply( + {'$err': 'not master'}, + flags=REPLY_FLAGS['QueryFailure']) + +not_master_reply_to_command = OpReply(ok=0, errmsg='not master') + +operations = [ + Operation( + 'find_one', + lambda client: client.db.collection.find_one(), + reply={'cursor': {'id': 0, 'firstBatch': []}}, + op_type='may-use-secondary', + not_master=not_master_reply_to_query), + Operation( + 'count', + lambda client: client.db.collection.count_documents({}), + reply={'n': 1}, + op_type='may-use-secondary', + not_master=not_master_reply_to_command), + Operation( + 'aggregate', + lambda client: client.db.collection.aggregate([]), + reply={'cursor': {'id': 0, 'firstBatch': []}}, + op_type='may-use-secondary', + not_master=not_master_reply_to_command), + Operation( + 'mapreduce', + lambda client: client.db.collection.map_reduce( + 'function() {}', 'function() {}'), + reply={'result': {'db': 'db', 'collection': 'out_collection'}}, + op_type='must-use-primary', + not_master=not_master_reply_to_command), + Operation( + 'inline_mapreduce', + lambda client: client.db.collection.inline_map_reduce( + 'function() {}', 'function() {}', {'out': {'inline': 1}}), + reply={'results': []}, + op_type='may-use-secondary', + not_master=not_master_reply_to_command), + Operation( + 'options', + lambda client: client.db.collection.options(), + reply={'cursor': {'id': 0, 'firstBatch': []}}, + op_type='must-use-primary', + not_master=not_master_reply_to_command), + Operation( + 'command', + lambda client: client.db.command('foo'), + reply={'ok': 1}, + op_type='must-use-primary', # Ignores client's read preference. + not_master=not_master_reply_to_command), + Operation( + 'secondary command', + lambda client: + client.db.command('foo', read_preference=ReadPreference.SECONDARY), + reply={'ok': 1}, + op_type='always-use-secondary', + not_master=OpReply(ok=0, errmsg='node is recovering')), + Operation( + 'listCollections', + lambda client: client.db.collection_names(), + reply={'cursor': {'id': 0, 'firstBatch': []}}, + op_type='must-use-primary', + not_master=not_master_reply_to_command), + Operation( + 'listIndexes', + lambda client: client.db.collection.index_information(), + reply={'cursor': {'id': 0, 'firstBatch': []}}, + op_type='must-use-primary', + not_master=not_master_reply_to_command), +] + + +_ops_by_name = dict([(op.name, op) for op in operations]) + +Upgrade = namedtuple('Upgrade', + ['name', 'function', 'old', 'new', 'wire_version']) + +upgrades = [ + Upgrade('index_information', + lambda client: client.db.collection.index_information(), + old=OpQuery(namespace='db.system.indexes'), + new=Command('listIndexes', 'collection', namespace='db'), + wire_version=3), + Upgrade('collection_names', + lambda client: client.db.collection_names(), + old=Command('aggregate', 'system.namespaces', namespace='db'), + new=Command('listCollections', namespace='db'), + wire_version=3), + Upgrade('options', + lambda client: client.db.collection.options(), + old=Command('aggregate', 'system.namespaces', namespace='db'), + new=Command('listCollections', namespace='db'), + wire_version=3), +] diff --git a/test/mockupdb/test_auth_recovering_member.py b/test/mockupdb/test_auth_recovering_member.py new file mode 100755 index 0000000000..360c593a00 --- /dev/null +++ b/test/mockupdb/test_auth_recovering_member.py @@ -0,0 +1,50 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mockupdb import MockupDB +from pymongo import MongoClient +from pymongo.errors import ServerSelectionTimeoutError + +import unittest + + +class TestAuthRecoveringMember(unittest.TestCase): + def test_auth_recovering_member(self): + # Test that we don't attempt auth against a recovering RS member. + server = MockupDB() + server.autoresponds('ismaster', { + 'minWireVersion': 2, + 'maxWireVersion': 6, + 'ismaster': False, + 'secondary': False, + 'setName': 'rs'}) + + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri, + replicaSet='rs', + serverSelectionTimeoutMS=100, + socketTimeoutMS=100) + + self.addCleanup(client.close) + + # Should see there's no primary or secondary and raise selection timeout + # error. If it raises AutoReconnect we know it actually tried the + # server, and that's wrong. + with self.assertRaises(ServerSelectionTimeoutError): + client.db.authenticate('user', 'password') + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_cluster_time.py b/test/mockupdb/test_cluster_time.py new file mode 100644 index 0000000000..fae6c3faae --- /dev/null +++ b/test/mockupdb/test_cluster_time.py @@ -0,0 +1,193 @@ +# Copyright 2017-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test $clusterTime handling.""" + +from bson import Timestamp +from mockupdb import going, MockupDB +from pymongo import (MongoClient, + InsertOne, + UpdateOne, + DeleteMany, + version_tuple) + +import unittest + + +class TestClusterTime(unittest.TestCase): + def cluster_time_conversation(self, callback, replies): + cluster_time = Timestamp(0, 0) + server = MockupDB() + + # First test all commands include $clusterTime with wire version 6. + responder = server.autoresponds( + 'ismaster', + {'minWireVersion': 0, + 'maxWireVersion': 6, + '$clusterTime': {'clusterTime': cluster_time}}) + + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri) + self.addCleanup(client.close) + + with going(callback, client): + for reply in replies: + request = server.receives() + self.assertIn('$clusterTime', request) + self.assertEqual(request['$clusterTime']['clusterTime'], + cluster_time) + cluster_time = Timestamp(cluster_time.time, + cluster_time.inc + 1) + reply['$clusterTime'] = {'clusterTime': cluster_time} + request.reply(reply) + + # Now test that no commands include $clusterTime with wire version 5, + # even though the isMaster reply still has $clusterTime. + server.cancel_responder(responder) + server.autoresponds('ismaster', + {'minWireVersion': 0, + 'maxWireVersion': 5, + '$clusterTime': {'clusterTime': cluster_time}}) + + client = MongoClient(server.uri) + self.addCleanup(client.close) + + with going(callback, client): + for reply in replies: + request = server.receives() + self.assertNotIn('$clusterTime', request) + request.reply(reply) + + def test_command(self): + def callback(client): + client.db.command('ping') + client.db.command('ping') + + self.cluster_time_conversation(callback, [{'ok': 1}] * 2) + + def test_bulk(self): + def callback(client): + client.db.collection.bulk_write([ + InsertOne({}), + InsertOne({}), + UpdateOne({}, {'$inc': {'x': 1}}), + DeleteMany({})]) + + self.cluster_time_conversation( + callback, + [{'ok': 1, 'nInserted': 2}, + {'ok': 1, 'nModified': 1}, + {'ok': 1, 'nDeleted': 2}]) + + batches = [ + {'cursor': {'id': 123, 'firstBatch': [{'a': 1}]}}, + {'cursor': {'id': 123, 'nextBatch': [{'a': 2}]}}, + {'cursor': {'id': 0, 'nextBatch': [{'a': 3}]}}] + + def test_cursor(self): + def callback(client): + list(client.db.collection.find()) + + self.cluster_time_conversation(callback, self.batches) + + def test_aggregate(self): + def callback(client): + list(client.db.collection.aggregate([])) + + self.cluster_time_conversation(callback, self.batches) + + def test_explain(self): + def callback(client): + client.db.collection.find().explain() + + self.cluster_time_conversation(callback, [{'ok': 1}]) + + def test_monitor(self): + cluster_time = Timestamp(0, 0) + reply = {'minWireVersion': 0, + 'maxWireVersion': 6, + '$clusterTime': {'clusterTime': cluster_time}} + + server = MockupDB() + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri, heartbeatFrequencyMS=500) + self.addCleanup(client.close) + + request = server.receives('ismaster') + # No $clusterTime in first ismaster, only in subsequent ones + self.assertNotIn('$clusterTime', request) + request.ok(reply) + + # Next exchange: client returns first clusterTime, we send the second. + request = server.receives('ismaster') + self.assertIn('$clusterTime', request) + self.assertEqual(request['$clusterTime']['clusterTime'], + cluster_time) + cluster_time = Timestamp(cluster_time.time, + cluster_time.inc + 1) + reply['$clusterTime'] = {'clusterTime': cluster_time} + request.reply(reply) + + # Third exchange: client returns second clusterTime. + request = server.receives('ismaster') + self.assertEqual(request['$clusterTime']['clusterTime'], + cluster_time) + + # Return command error with a new clusterTime. + cluster_time = Timestamp(cluster_time.time, + cluster_time.inc + 1) + error = {'ok': 0, + 'code': 211, + 'errmsg': 'Cache Reader No keys found for HMAC ...', + '$clusterTime': {'clusterTime': cluster_time}} + request.reply(error) + + # PyMongo 3.11+ closes the monitoring connection on command errors. + if version_tuple >= (3, 11, -1): + # Fourth exchange: the Monitor closes the connection and runs the + # handshake on a new connection. + request = server.receives('ismaster') + # No $clusterTime in first ismaster, only in subsequent ones + self.assertNotIn('$clusterTime', request) + + # Reply without $clusterTime. + reply.pop('$clusterTime') + request.reply(reply) + else: + # Fourth exchange: the Monitor retry attempt uses the clusterTime + # from the previous isMaster error. + request = server.receives('ismaster') + self.assertEqual(request['$clusterTime']['clusterTime'], + cluster_time) + + cluster_time = Timestamp(cluster_time.time, + cluster_time.inc + 1) + error['$clusterTime'] = {'clusterTime': cluster_time} + request.reply(error) + + # Fifth exchange: the Monitor attempt uses the clusterTime from + # the previous isMaster error. + request = server.receives('ismaster') + self.assertEqual(request['$clusterTime']['clusterTime'], + cluster_time) + request.reply(reply) + client.close() + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_cursor_namespace.py b/test/mockupdb/test_cursor_namespace.py new file mode 100644 index 0000000000..600f7bca6d --- /dev/null +++ b/test/mockupdb/test_cursor_namespace.py @@ -0,0 +1,122 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test list_indexes with more than one batch.""" + +from mockupdb import going, MockupDB +from pymongo import MongoClient, version_tuple + +import unittest + + +class TestCursorNamespace(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.server = MockupDB(auto_ismaster={'maxWireVersion': 6}) + cls.server.run() + cls.client = MongoClient(cls.server.uri) + + @classmethod + def tearDownClass(cls): + cls.client.close() + cls.server.stop() + + def _test_cursor_namespace(self, cursor_op, command): + with going(cursor_op) as docs: + request = self.server.receives( + **{command: 'collection', 'namespace': 'test'}) + # Respond with a different namespace. + request.reply({'cursor': { + 'firstBatch': [{'doc': 1}], + 'id': 123, + 'ns': 'different_db.different.coll'}}) + # Client uses the namespace we returned. + request = self.server.receives( + getMore=123, namespace='different_db', + collection='different.coll') + + request.reply({'cursor': { + 'nextBatch': [{'doc': 2}], + 'id': 0}}) + + self.assertEqual([{'doc': 1}, {'doc': 2}], docs()) + + def test_aggregate_cursor(self): + def op(): + return list(self.client.test.collection.aggregate([])) + self._test_cursor_namespace(op, 'aggregate') + + @unittest.skipUnless(version_tuple >= (3, 11, -1), 'Fixed in pymongo 3.11') + def test_find_cursor(self): + def op(): + return list(self.client.test.collection.find()) + self._test_cursor_namespace(op, 'find') + + def test_list_indexes(self): + def op(): + return list(self.client.test.collection.list_indexes()) + self._test_cursor_namespace(op, 'listIndexes') + + +class TestKillCursorsNamespace(unittest.TestCase): + @classmethod + @unittest.skipUnless(version_tuple >= (3, 12, -1), 'Fixed in pymongo 3.12') + def setUpClass(cls): + cls.server = MockupDB(auto_ismaster={'maxWireVersion': 6}) + cls.server.run() + cls.client = MongoClient(cls.server.uri) + + @classmethod + def tearDownClass(cls): + cls.client.close() + cls.server.stop() + + def _test_killCursors_namespace(self, cursor_op, command): + with going(cursor_op): + request = self.server.receives( + **{command: 'collection', 'namespace': 'test'}) + # Respond with a different namespace. + request.reply({'cursor': { + 'firstBatch': [{'doc': 1}], + 'id': 123, + 'ns': 'different_db.different.coll'}}) + # Client uses the namespace we returned for killCursors. + request = self.server.receives(**{ + 'killCursors': 'different.coll', + 'cursors': [123], + '$db': 'different_db'}) + request.reply({ + 'ok': 1, + 'cursorsKilled': [123], + 'cursorsNotFound': [], + 'cursorsAlive': [], + 'cursorsUnknown': []}) + + def test_aggregate_killCursor(self): + def op(): + cursor = self.client.test.collection.aggregate([], batchSize=1) + next(cursor) + cursor.close() + self._test_killCursors_namespace(op, 'aggregate') + + def test_find_killCursor(self): + def op(): + cursor = self.client.test.collection.find(batch_size=1) + next(cursor) + cursor.close() + self._test_killCursors_namespace(op, 'find') + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_getmore_sharded.py b/test/mockupdb/test_getmore_sharded.py new file mode 100644 index 0000000000..2b3a1fd6ce --- /dev/null +++ b/test/mockupdb/test_getmore_sharded.py @@ -0,0 +1,62 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo cursor with a sharded cluster.""" +from pymongo import MongoClient + +try: + from queue import Queue +except ImportError: + from Queue import Queue + +from mockupdb import MockupDB, going + +import unittest + + +class TestGetmoreSharded(unittest.TestCase): + def test_getmore_sharded(self): + servers = [MockupDB(), MockupDB()] + + # Collect queries to either server in one queue. + q = Queue() + for server in servers: + server.subscribe(q.put) + server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', + minWireVersion=2, maxWireVersion=6) + server.run() + self.addCleanup(server.stop) + + client = MongoClient('mongodb://%s:%d,%s:%d' % ( + servers[0].host, servers[0].port, + servers[1].host, servers[1].port)) + self.addCleanup(client.close) + collection = client.db.collection + cursor = collection.find() + with going(next, cursor): + query = q.get(timeout=1) + query.replies({'cursor': {'id': 123, 'firstBatch': [{}]}}) + + # 10 batches, all getMores go to same server. + for i in range(1, 10): + with going(next, cursor): + getmore = q.get(timeout=1) + self.assertEqual(query.server, getmore.server) + cursor_id = 123 if i < 9 else 0 + getmore.replies({'cursor': {'id': cursor_id, + 'nextBatch': [{}]}}) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py new file mode 100644 index 0000000000..9dbdec3057 --- /dev/null +++ b/test/mockupdb/test_handshake.py @@ -0,0 +1,172 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from mockupdb import MockupDB, OpReply, OpMsg, absent, Command, go +from pymongo import MongoClient, version as pymongo_version, version_tuple +from pymongo.errors import OperationFailure + +import unittest + + +def _check_handshake_data(request): + assert 'client' in request + data = request['client'] + + assert data['application'] == {'name': 'my app'} + assert data['driver'] == {'name': 'PyMongo', 'version': pymongo_version} + + # Keep it simple, just check these fields exist. + assert 'os' in data + assert 'platform' in data + + +class TestHandshake(unittest.TestCase): + @unittest.skipUnless(version_tuple >= (3, 4), "requires PyMongo 3.4") + def test_client_handshake_data(self): + primary, secondary = MockupDB(), MockupDB() + for server in primary, secondary: + server.run() + self.addCleanup(server.stop) + + hosts = [server.address_string for server in (primary, secondary)] + primary_response = OpReply('ismaster', True, + setName='rs', hosts=hosts, + minWireVersion=2, maxWireVersion=6) + error_response = OpReply( + 0, errmsg='Cache Reader No keys found for HMAC ...', code=211) + + secondary_response = OpReply('ismaster', False, + setName='rs', hosts=hosts, + secondary=True, + minWireVersion=2, maxWireVersion=6) + + client = MongoClient(primary.uri, + replicaSet='rs', + appname='my app', + heartbeatFrequencyMS=500) # Speed up the test. + + self.addCleanup(client.close) + + # New monitoring sockets send data during handshake. + heartbeat = primary.receives('ismaster') + _check_handshake_data(heartbeat) + heartbeat.ok(primary_response) + + heartbeat = secondary.receives('ismaster') + _check_handshake_data(heartbeat) + heartbeat.ok(secondary_response) + + # Subsequent heartbeats have no client data. + primary.receives('ismaster', 1, client=absent).ok(error_response) + secondary.receives('ismaster', 1, client=absent).ok(error_response) + + # PyMongo 3.11+ closes the monitoring connection on command errors. + if version_tuple >= (3, 11, -1): + # The heartbeat retry (on a new connection) does have client data. + heartbeat = primary.receives('ismaster') + _check_handshake_data(heartbeat) + heartbeat.ok(primary_response) + + heartbeat = secondary.receives('ismaster') + _check_handshake_data(heartbeat) + heartbeat.ok(secondary_response) + else: + # The heartbeat retry has no client data after a command failure. + primary.receives('ismaster', 1, client=absent).ok(error_response) + secondary.receives('ismaster', 1, client=absent).ok(error_response) + + # Still no client data. + primary.receives('ismaster', 1, client=absent).ok(primary_response) + secondary.receives('ismaster', 1, client=absent).ok(secondary_response) + + # After a disconnect, next ismaster has client data again. + primary.receives('ismaster', 1, client=absent).hangup() + heartbeat = primary.receives('ismaster') + _check_handshake_data(heartbeat) + heartbeat.ok(primary_response) + + secondary.autoresponds('ismaster', secondary_response) + + # Start a command, so the client opens an application socket. + future = go(client.db.command, 'whatever') + + for request in primary: + if request.matches(Command('ismaster')): + if request.client_port == heartbeat.client_port: + # This is the monitor again, keep going. + request.ok(primary_response) + else: + # Handshaking a new application socket. + _check_handshake_data(request) + request.ok(primary_response) + else: + # Command succeeds. + if version_tuple >= (3, 7): + request.assert_matches(OpMsg('whatever')) + else: + request.assert_matches(Command('whatever')) + request.ok() + assert future() + return + + @unittest.skipUnless(version_tuple >= (3, 11, -1), "requires PyMongo 3.11") + def test_client_handshake_saslSupportedMechs(self): + server = MockupDB() + server.run() + self.addCleanup(server.stop) + + primary_response = OpReply('ismaster', True, + minWireVersion=2, maxWireVersion=6) + client = MongoClient(server.uri, + username='username', + password='password') + + self.addCleanup(client.close) + + # New monitoring sockets send data during handshake. + heartbeat = server.receives('ismaster') + heartbeat.ok(primary_response) + + future = go(client.db.command, 'whatever') + for request in server: + if request.matches('ismaster'): + if request.client_port == heartbeat.client_port: + # This is the monitor again, keep going. + request.ok(primary_response) + else: + # Handshaking a new application socket should send + # saslSupportedMechs and speculativeAuthenticate. + self.assertEqual(request['saslSupportedMechs'], + 'admin.username') + self.assertIn( + 'saslStart', request['speculativeAuthenticate']) + auth = {'conversationId': 1, 'done': False, + 'payload': b'r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0' + b'1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky' + b'tXdF9r,s=4dcxugMJq2P4hQaDbGXZR8uR3ei' + b'PHrSmh4uhkg==,i=15000'} + request.ok('ismaster', True, + saslSupportedMechs=['SCRAM-SHA-256'], + speculativeAuthenticate=auth, + minWireVersion=2, maxWireVersion=6) + # Authentication should immediately fail with: + # OperationFailure: Server returned an invalid nonce. + with self.assertRaises(OperationFailure): + future() + return + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_initial_ismaster.py b/test/mockupdb/test_initial_ismaster.py new file mode 100644 index 0000000000..c67fcbf9e1 --- /dev/null +++ b/test/mockupdb/test_initial_ismaster.py @@ -0,0 +1,46 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + +from mockupdb import MockupDB, wait_until +from pymongo import MongoClient + +import unittest + + +class TestInitialIsMaster(unittest.TestCase): + def test_initial_ismaster(self): + server = MockupDB() + server.run() + self.addCleanup(server.stop) + + start = time.time() + client = MongoClient(server.uri) + self.addCleanup(client.close) + + # A single ismaster is enough for the client to be connected. + self.assertFalse(client.nodes) + server.receives('ismaster').ok(ismaster=True, + minWireVersion=2, maxWireVersion=6) + wait_until(lambda: client.nodes, + 'update nodes', timeout=1) + + # At least 10 seconds before next heartbeat. + server.receives('ismaster').ok(ismaster=True, + minWireVersion=2, maxWireVersion=6) + self.assertGreaterEqual(time.time() - start, 10) + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_legacy_crud.py b/test/mockupdb/test_legacy_crud.py new file mode 100755 index 0000000000..508313dbbd --- /dev/null +++ b/test/mockupdb/test_legacy_crud.py @@ -0,0 +1,126 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from bson.son import SON +from mockupdb import (MockupDB, going, OpInsert, OpMsg, absent, Command, + OP_MSG_FLAGS) +from pymongo import MongoClient, WriteConcern, version_tuple + +import unittest + + +class TestLegacyCRUD(unittest.TestCase): + def test_op_insert_manipulate_false(self): + # Test three aspects of legacy insert with manipulate=False: + # 1. The return value is None, [None], or [None, None] as appropriate. + # 2. _id is not set on the passed-in document object. + # 3. _id is not sent to server. + server = MockupDB(auto_ismaster=True) + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri) + self.addCleanup(client.close) + + coll = client.db.get_collection('coll', write_concern=WriteConcern(w=0)) + doc = {} + with going(coll.insert, doc, manipulate=False) as future: + if version_tuple >= (3, 7): + server.receives(OpMsg(SON([ + ("insert", coll.name), + ("ordered", True), + ("writeConcern", {"w": 0}), + ("documents", [{}])]), flags=OP_MSG_FLAGS['moreToCome'])) + else: + server.receives(OpInsert({'_id': absent})) + + self.assertFalse('_id' in doc) + self.assertIsNone(future()) + + docs = [{}] # One doc in a list. + with going(coll.insert, docs, manipulate=False) as future: + if version_tuple >= (3, 7): + # PyMongo 3.7 ordered bulk w:0 writes use implicit w:1. + request = server.receives() + request.assert_matches(OpMsg(SON([ + ("insert", coll.name), + ("ordered", True), + ("documents", [{}])]), flags=0)) + request.reply({"n": 1}) + else: + server.receives(OpInsert({'_id': absent})) + + self.assertFalse('_id' in docs[0]) + self.assertEqual(future(), [None]) + + docs = [{}, {}] # Two docs. + with going(coll.insert, docs, manipulate=False) as future: + if version_tuple >= (3, 7): + # PyMongo 3.7 ordered bulk w:0 writes use implicit w:1. + request = server.receives() + request.assert_matches(OpMsg(SON([ + ("insert", coll.name), + ("ordered", True), + ("documents", [{}, {}])]), flags=0)) + request.reply({"n": 2}) + else: + server.receives(OpInsert({'_id': absent}, {'_id': absent})) + + self.assertFalse('_id' in docs[0]) + self.assertFalse('_id' in docs[1]) + self.assertEqual(future(), [None, None]) + + def test_insert_command_manipulate_false(self): + # Test same three aspects as test_op_insert_manipulate_false does, + # with the "insert" command. + server = MockupDB(auto_ismaster={'maxWireVersion': 2}) + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri) + self.addCleanup(client.close) + + doc = {} + with going(client.db.coll.insert, doc, manipulate=False) as future: + r = server.receives(Command("insert", "coll", documents=[{}])) + # MockupDB doesn't understand "absent" in subdocuments yet. + self.assertFalse('_id' in r.doc['documents'][0]) + r.ok() + + self.assertFalse('_id' in doc) + self.assertIsNone(future()) + + docs = [{}] # One doc in a list. + with going(client.db.coll.insert, docs, manipulate=False) as future: + r = server.receives(Command("insert", "coll", documents=[{}])) + self.assertFalse('_id' in r.doc['documents'][0]) + r.ok() + + self.assertFalse('_id' in docs[0]) + self.assertEqual(future(), [None]) + + docs = [{}, {}] # Two docs. + with going(client.db.coll.insert, docs, manipulate=False) as future: + r = server.receives(Command("insert", "coll", documents=[{}, {}])) + self.assertFalse('_id' in r.doc['documents'][0]) + self.assertFalse('_id' in r.doc['documents'][1]) + r.ok() + + self.assertFalse('_id' in docs[0]) + self.assertFalse('_id' in docs[1]) + self.assertEqual(future(), [None, None]) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_list_indexes.py b/test/mockupdb/test_list_indexes.py new file mode 100644 index 0000000000..7483e80df2 --- /dev/null +++ b/test/mockupdb/test_list_indexes.py @@ -0,0 +1,78 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test list_indexes with more than one batch.""" + +from bson import SON + +from mockupdb import going, MockupDB, OpGetMore +from pymongo import MongoClient + +import unittest + + +class TestListIndexes(unittest.TestCase): + def test_list_indexes_opquery(self): + server = MockupDB(auto_ismaster={'maxWireVersion': 3}) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + self.addCleanup(client.close) + with going(client.test.collection.list_indexes) as cursor: + request = server.receives( + listIndexes='collection', namespace='test') + request.reply({'cursor': { + 'firstBatch': [{'name': 'index_0'}], + 'id': 123}}) + + with going(list, cursor()) as indexes: + request = server.receives(OpGetMore, + namespace='test.collection', + cursor_id=123) + + request.reply([{'name': 'index_1'}], cursor_id=0) + + self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes()) + for index_info in indexes(): + self.assertIsInstance(index_info, SON) + + def test_list_indexes_command(self): + server = MockupDB(auto_ismaster={'maxWireVersion': 6}) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + self.addCleanup(client.close) + with going(client.test.collection.list_indexes) as cursor: + request = server.receives( + listIndexes='collection', namespace='test') + request.reply({'cursor': { + 'firstBatch': [{'name': 'index_0'}], + 'id': 123}}) + + with going(list, cursor()) as indexes: + request = server.receives(getMore=123, + namespace='test', + collection='collection') + + request.reply({'cursor': { + 'nextBatch': [{'name': 'index_1'}], + 'id': 0}}) + + self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes()) + for index_info in indexes(): + self.assertIsInstance(index_info, SON) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_max_staleness.py b/test/mockupdb/test_max_staleness.py new file mode 100644 index 0000000000..89d17a133f --- /dev/null +++ b/test/mockupdb/test_max_staleness.py @@ -0,0 +1,67 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mockupdb import MockupDB, going +from pymongo import MongoClient + +import unittest + + +class TestMaxStalenessMongos(unittest.TestCase): + def test_mongos(self): + mongos = MockupDB() + mongos.autoresponds('ismaster', maxWireVersion=5, + ismaster=True, msg='isdbgrid') + mongos.run() + self.addCleanup(mongos.stop) + + # No maxStalenessSeconds. + uri = 'mongodb://localhost:%d/?readPreference=secondary' % mongos.port + + client = MongoClient(uri) + self.addCleanup(client.close) + with going(client.db.coll.find_one) as future: + request = mongos.receives() + self.assertNotIn( + 'maxStalenessSeconds', + request.doc['$readPreference']) + + self.assertTrue(request.slave_okay) + request.ok(cursor={'firstBatch': [], 'id': 0}) + + # find_one succeeds with no result. + self.assertIsNone(future()) + + # Set maxStalenessSeconds to 1. Client has no minimum with mongos, + # we let mongos enforce the 90-second minimum and return an error: + # SERVER-27146. + uri = 'mongodb://localhost:%d/?readPreference=secondary' \ + '&maxStalenessSeconds=1' % mongos.port + + client = MongoClient(uri) + self.addCleanup(client.close) + with going(client.db.coll.find_one) as future: + request = mongos.receives() + self.assertEqual( + 1, + request.doc['$readPreference']['maxStalenessSeconds']) + + self.assertTrue(request.slave_okay) + request.ok(cursor={'firstBatch': [], 'id': 0}) + + self.assertIsNone(future()) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py new file mode 100644 index 0000000000..9f57dd0f43 --- /dev/null +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -0,0 +1,89 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo with a mixed-version cluster.""" + +import time + +try: + from queue import Queue +except ImportError: + from Queue import Queue + +from mockupdb import MockupDB, go +from pymongo import MongoClient + +import unittest +from operations import upgrades + + +class TestMixedVersionSharded(unittest.TestCase): + def setup_server(self, upgrade): + self.mongos_old, self.mongos_new = MockupDB(), MockupDB() + + # Collect queries to either server in one queue. + self.q = Queue() + for server in self.mongos_old, self.mongos_new: + server.subscribe(self.q.put) + server.autoresponds('getlasterror') + server.run() + self.addCleanup(server.stop) + + # Max wire version is too old for the upgraded operation. + self.mongos_old.autoresponds('ismaster', ismaster=True, msg='isdbgrid', + maxWireVersion=upgrade.wire_version - 1) + + # Up-to-date max wire version. + self.mongos_new.autoresponds('ismaster', ismaster=True, msg='isdbgrid', + maxWireVersion=upgrade.wire_version) + + self.mongoses_uri = 'mongodb://%s,%s' % (self.mongos_old.address_string, + self.mongos_new.address_string) + + self.client = MongoClient(self.mongoses_uri) + + def tearDown(self): + if hasattr(self, 'client') and self.client: + self.client.close() + + +def create_mixed_version_sharded_test(upgrade): + def test(self): + self.setup_server(upgrade) + start = time.time() + servers_used = set() + while len(servers_used) < 2: + go(upgrade.function, self.client) + request = self.q.get(timeout=1) + servers_used.add(request.server) + request.assert_matches(upgrade.old + if request.server is self.mongos_old + else upgrade.new) + if time.time() > start + 10: + self.fail('never used both mongoses') + return test + + +def generate_mixed_version_sharded_tests(): + for upgrade in upgrades: + test = create_mixed_version_sharded_test(upgrade) + test_name = 'test_%s' % upgrade.name.replace(' ', '_') + test.__name__ = test_name + setattr(TestMixedVersionSharded, test_name, test) + + +generate_mixed_version_sharded_tests() + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py new file mode 100644 index 0000000000..1fe2ea5869 --- /dev/null +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -0,0 +1,125 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools + +from bson import SON +from mockupdb import MockupDB, going +from pymongo import MongoClient, ReadPreference +from pymongo.read_preferences import (make_read_preference, + read_pref_mode_from_name, + _MONGOS_MODES) + +import unittest +from operations import operations + + +class TestMongosCommandReadMode(unittest.TestCase): + def test_aggregate(self): + server = MockupDB() + server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', + minWireVersion=2, maxWireVersion=5) + self.addCleanup(server.stop) + server.run() + + client = MongoClient(server.uri) + self.addCleanup(client.close) + collection = client.test.collection + with going(collection.aggregate, []): + command = server.receives(aggregate='collection', pipeline=[]) + self.assertFalse(command.slave_ok, 'SlaveOkay set') + self.assertNotIn('$readPreference', command) + command.ok(result=[{}]) + + secondary_collection = collection.with_options( + read_preference=ReadPreference.SECONDARY) + + with going(secondary_collection.aggregate, []): + command = server.receives( + {'$query': SON([('aggregate', 'collection'), + ('pipeline', []), + ('cursor', {})]), + '$readPreference': {'mode': 'secondary'}}) + command.ok(result=[{}]) + self.assertTrue(command.slave_ok, 'SlaveOkay not set') + + +def create_mongos_read_mode_test(mode, operation): + def test(self): + server = MockupDB() + self.addCleanup(server.stop) + server.run() + server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', + minWireVersion=2, maxWireVersion=5) + + pref = make_read_preference(read_pref_mode_from_name(mode), + tag_sets=None) + + client = MongoClient(server.uri, read_preference=pref) + self.addCleanup(client.close) + with going(operation.function, client) as future: + request = server.receive() + request.reply(operation.reply) + + future() # No error. + + if operation.op_type == 'always-use-secondary': + self.assertEqual(ReadPreference.SECONDARY.document, + request.doc.get('$readPreference')) + slave_ok = mode != 'primary' + self.assertIn('$query', request.doc) + elif operation.op_type == 'must-use-primary': + self.assertNotIn('$readPreference', request) + self.assertNotIn('$query', request.doc) + slave_ok = False + elif operation.op_type == 'may-use-secondary': + slave_ok = mode != 'primary' + if mode in ('primary', 'secondaryPreferred'): + self.assertNotIn('$readPreference', request) + self.assertNotIn('$query', request.doc) + else: + self.assertEqual(pref.document, + request.doc.get('$readPreference')) + self.assertIn('$query', request.doc) + else: + self.fail('unrecognized op_type %r' % operation.op_type) + + if slave_ok: + self.assertTrue(request.slave_ok, 'SlaveOkay not set') + else: + self.assertFalse(request.slave_ok, 'SlaveOkay set') + + return test + + +def generate_mongos_read_mode_tests(): + matrix = itertools.product(_MONGOS_MODES, operations) + + for entry in matrix: + mode, operation = entry + if mode == 'primary' and operation.op_type == 'always-use-secondary': + # Skip something like command('foo', read_preference=SECONDARY). + continue + test = create_mongos_read_mode_test(mode, operation) + test_name = 'test_%s_with_mode_%s' % ( + operation.name.replace(' ', '_'), mode) + test.__name__ = test_name + setattr(TestMongosCommandReadMode, test_name, test) + + +generate_mongos_read_mode_tests() + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_network_disconnect_primary.py b/test/mockupdb/test_network_disconnect_primary.py new file mode 100755 index 0000000000..1df5febb78 --- /dev/null +++ b/test/mockupdb/test_network_disconnect_primary.py @@ -0,0 +1,87 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + from queue import Queue +except ImportError: + from Queue import Queue + +from mockupdb import MockupDB, wait_until, OpReply, going, Future +from pymongo.errors import ConnectionFailure +from pymongo.topology_description import TOPOLOGY_TYPE +from pymongo import MongoClient + +import unittest + + +class TestNetworkDisconnectPrimary(unittest.TestCase): + def test_network_disconnect_primary(self): + # Application operation fails against primary. Test that topology + # type changes from ReplicaSetWithPrimary to ReplicaSetNoPrimary. + # http://bit.ly/1B5ttuL + primary, secondary = servers = [MockupDB() for _ in range(2)] + for server in servers: + server.run() + self.addCleanup(server.stop) + + hosts = [server.address_string for server in servers] + primary_response = OpReply(ismaster=True, setName='rs', hosts=hosts, + minWireVersion=2, maxWireVersion=6) + primary.autoresponds('ismaster', primary_response) + secondary.autoresponds( + 'ismaster', + ismaster=False, secondary=True, setName='rs', hosts=hosts, + minWireVersion=2, maxWireVersion=6) + + client = MongoClient(primary.uri, replicaSet='rs') + self.addCleanup(client.close) + wait_until(lambda: client.primary == primary.address, + 'discover primary') + + topology = client._topology + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, + topology.description.topology_type) + + # Open a socket in the application pool (calls ismaster). + with going(client.db.command, 'buildinfo'): + primary.receives('buildinfo').ok() + + # The primary hangs replying to ismaster. + ismaster_future = Future() + primary.autoresponds('ismaster', + lambda r: r.ok(ismaster_future.result())) + + # Network error on application operation. + with self.assertRaises(ConnectionFailure): + with going(client.db.command, 'buildinfo'): + primary.receives('buildinfo').hangup() + + # Topology type is updated. + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, + topology.description.topology_type) + + # Let ismasters through again. + ismaster_future.set_result(primary_response) + + # Demand a primary. + with going(client.db.command, 'buildinfo'): + wait_until(lambda: client.primary == primary.address, + 'rediscover primary') + primary.receives('buildinfo').ok() + + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, + topology.description.topology_type) + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_op_msg.py b/test/mockupdb/test_op_msg.py new file mode 100755 index 0000000000..dc574226bc --- /dev/null +++ b/test/mockupdb/test_op_msg.py @@ -0,0 +1,340 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import namedtuple + +from mockupdb import MockupDB, going, OpMsg, OpMsgReply, OP_MSG_FLAGS +from pymongo import MongoClient, WriteConcern, version_tuple +from pymongo.operations import InsertOne, UpdateOne, DeleteOne +from pymongo.cursor import CursorType + +import unittest + + +Operation = namedtuple( + 'Operation', + ['name', 'function', 'request', 'reply']) + +operations = [ + Operation( + 'find_one', + lambda coll: coll.find_one({}), + request=OpMsg({"find": "coll"}, flags=0), + reply={'ok': 1, 'cursor': {'firstBatch': [], 'id': 0}}), + Operation( + 'aggregate', + lambda coll: coll.aggregate([]), + request=OpMsg({"aggregate": "coll"}, flags=0), + reply={'ok': 1, 'cursor': {'firstBatch': [], 'id': 0}}), + Operation( + 'insert_one', + lambda coll: coll.insert_one({}), + request=OpMsg({"insert": "coll"}, flags=0), + reply={'ok': 1, 'n': 1}), + Operation( + 'insert_one-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).insert_one({}), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'insert_many', + lambda coll: coll.insert_many([{}, {}, {}]), + request=OpMsg({"insert": "coll"}, flags=0), + reply={'ok': 1, 'n': 3}), + Operation( + 'insert_many-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).insert_many([{}, {}, {}]), + request=OpMsg({"insert": "coll"}, flags=0), + reply={'ok': 1, 'n': 3}), + Operation( + 'insert_many-w0-unordered', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).insert_many( + [{}, {}, {}], ordered=False), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'replace_one', + lambda coll: coll.replace_one({"_id": 1}, {"new": 1}), + request=OpMsg({"update": "coll"}, flags=0), + reply={'ok': 1, 'n': 1, 'nModified': 1}), + Operation( + 'replace_one-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).replace_one({"_id": 1}, + {"new": 1}), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'update_one', + lambda coll: coll.update_one({"_id": 1}, {"$set": {"new": 1}}), + request=OpMsg({"update": "coll"}, flags=0), + reply={'ok': 1, 'n': 1, 'nModified': 1}), + Operation( + 'replace_one-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).update_one({"_id": 1}, + {"$set": {"new": 1}}), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'update_many', + lambda coll: coll.update_many({"_id": 1}, {"$set": {"new": 1}}), + request=OpMsg({"update": "coll"}, flags=0), + reply={'ok': 1, 'n': 1, 'nModified': 1}), + Operation( + 'update_many-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).update_many({"_id": 1}, + {"$set": {"new": 1}}), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'delete_one', + lambda coll: coll.delete_one({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=0), + reply={'ok': 1, 'n': 1}), + Operation( + 'delete_one-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).delete_one({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'delete_many', + lambda coll: coll.delete_many({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=0), + reply={'ok': 1, 'n': 1}), + Operation( + 'delete_many-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).delete_many({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + # Legacy methods + Operation( + 'insert', + lambda coll: coll.insert({}), + request=OpMsg({"insert": "coll"}, flags=0), + reply={'ok': 1, 'n': 1}), + Operation( + 'insert-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).insert({}), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'insert-w0-argument', + lambda coll: coll.insert({}, w=0), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'update', + lambda coll: coll.update({"_id": 1}, {"new": 1}), + request=OpMsg({"update": "coll"}, flags=0), + reply={'ok': 1, 'n': 1, 'nModified': 1}), + Operation( + 'update-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).update({"_id": 1}, {"new": 1}), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'update-w0-argument', + lambda coll: coll.update({"_id": 1}, {"new": 1}, w=0), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'remove', + lambda coll: coll.remove({"_id": 1}), + request=OpMsg({"delete": "coll"}, flags=0), + reply={'ok': 1, 'n': 1}), + Operation( + 'remove-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).remove({"_id": 1}), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'remove-w0-argument', + lambda coll: coll.remove({"_id": 1}, w=0), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'bulk_write_insert', + lambda coll: coll.bulk_write([InsertOne({}), InsertOne({})]), + request=OpMsg({"insert": "coll"}, flags=0), + reply={'ok': 1, 'n': 2}), + Operation( + 'bulk_write_insert-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).bulk_write([InsertOne({}), + InsertOne({})]), + request=OpMsg({"insert": "coll"}, flags=0), + reply={'ok': 1, 'n': 2}), + Operation( + 'bulk_write_insert-w0-unordered', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).bulk_write( + [InsertOne({}), InsertOne({})], ordered=False), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'bulk_write_update', + lambda coll: coll.bulk_write([ + UpdateOne({"_id": 1}, {"$set": {"new": 1}}), + UpdateOne({"_id": 2}, {"$set": {"new": 1}})]), + request=OpMsg({"update": "coll"}, flags=0), + reply={'ok': 1, 'n': 2, 'nModified': 2}), + Operation( + 'bulk_write_update-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).bulk_write([ + UpdateOne({"_id": 1}, {"$set": {"new": 1}}), + UpdateOne({"_id": 2}, {"$set": {"new": 1}})]), + request=OpMsg({"update": "coll"}, flags=0), + reply={'ok': 1, 'n': 2, 'nModified': 2}), + Operation( + 'bulk_write_update-w0-unordered', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).bulk_write([ + UpdateOne({"_id": 1}, {"$set": {"new": 1}}), + UpdateOne({"_id": 2}, {"$set": {"new": 1}})], ordered=False), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'bulk_write_delete', + lambda coll: coll.bulk_write([ + DeleteOne({"_id": 1}), DeleteOne({"_id": 2})]), + request=OpMsg({"delete": "coll"}, flags=0), + reply={'ok': 1, 'n': 2}), + Operation( + 'bulk_write_delete-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).bulk_write([ + DeleteOne({"_id": 1}), DeleteOne({"_id": 2})]), + request=OpMsg({"delete": "coll"}, flags=0), + reply={'ok': 1, 'n': 2}), + Operation( + 'bulk_write_delete-w0-unordered', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).bulk_write([ + DeleteOne({"_id": 1}), DeleteOne({"_id": 2})], ordered=False), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), +] + +operations_312 = [ + Operation( + 'find_raw_batches', + lambda coll: list(coll.find_raw_batches({})), + request=[ + OpMsg({"find": "coll"}, flags=0), + OpMsg({"getMore": 7}, flags=0), + ], + reply=[ + {'ok': 1, 'cursor': {'firstBatch': [{}], 'id': 7}}, + {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 0}}, + ]), + Operation( + 'aggregate_raw_batches', + lambda coll: list(coll.aggregate_raw_batches([])), + request=[ + OpMsg({"aggregate": "coll"}, flags=0), + OpMsg({"getMore": 7}, flags=0), + ], + reply=[ + {'ok': 1, 'cursor': {'firstBatch': [], 'id': 7}}, + {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 0}}, + ]), + Operation( + 'find_exhaust_cursor', + lambda coll: list(coll.find({}, cursor_type=CursorType.EXHAUST)), + request=[ + OpMsg({"find": "coll"}, flags=0), + OpMsg({"getMore": 7}, flags=1 << 16), + ], + reply=[ + OpMsgReply( + {'ok': 1, 'cursor': {'firstBatch': [{}], 'id': 7}}, flags=0), + OpMsgReply( + {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 7}}, flags=2), + OpMsgReply( + {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 7}}, flags=2), + OpMsgReply( + {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 0}}, flags=0), + ]), +] + + +class TestOpMsg(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.server = MockupDB(auto_ismaster=True, max_wire_version=8) + cls.server.run() + cls.client = MongoClient(cls.server.uri) + + @classmethod + def tearDownClass(cls): + cls.server.stop() + cls.client.close() + + def _test_operation(self, op): + coll = self.client.db.coll + with going(op.function, coll) as future: + expected_requests = op.request + replies = op.reply + if not isinstance(op.request, list): + expected_requests = [op.request] + replies = [op.reply] + + for expected_request in expected_requests: + request = self.server.receives() + request.assert_matches(expected_request) + reply = None + if replies: + reply = replies.pop(0) + if reply is not None: + request.reply(reply) + for reply in replies: + if reply is not None: + request.reply(reply) + + future() # No error. + + +def operation_test(op, decorator): + @decorator() + def test(self): + self._test_operation(op) + return test + + +def create_tests(ops, decorator): + for op in ops: + test_name = "test_op_msg_%s" % (op.name,) + setattr(TestOpMsg, test_name, operation_test(op, decorator)) + + +create_tests(operations, lambda: unittest.skipUnless( + version_tuple >= (3, 7), "requires PyMongo 3.7")) + +create_tests(operations_312, lambda: unittest.skipUnless( + version_tuple >= (3, 12), "requires PyMongo 3.12")) + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py new file mode 100644 index 0000000000..925a00f6a5 --- /dev/null +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -0,0 +1,197 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import itertools + +from mockupdb import MockupDB, going, CommandBase +from pymongo import MongoClient, ReadPreference, version_tuple +from pymongo.read_preferences import (make_read_preference, + read_pref_mode_from_name, + _MONGOS_MODES) + +import unittest +from operations import operations + + +class OpMsgReadPrefBase(unittest.TestCase): + single_mongod = False + + @classmethod + def setUpClass(cls): + super(OpMsgReadPrefBase, cls).setUpClass() + if version_tuple < (3, 7): + raise unittest.SkipTest("requires PyMongo 3.7") + + @classmethod + def add_test(cls, mode, test_name, test): + setattr(cls, test_name, test) + + def setup_client(self, read_preference): + client = MongoClient(self.primary.uri, + read_preference=read_preference) + self.addCleanup(client.close) + return client + + +class TestOpMsgMongos(OpMsgReadPrefBase): + + @classmethod + def setUpClass(cls): + super(TestOpMsgMongos, cls).setUpClass() + auto_ismaster = { + 'ismaster': True, + 'msg': 'isdbgrid', # Mongos. + 'minWireVersion': 2, + 'maxWireVersion': 6, + } + cls.primary = MockupDB(auto_ismaster=auto_ismaster) + cls.primary.run() + cls.secondary = cls.primary + + @classmethod + def tearDownClass(cls): + cls.primary.stop() + super(TestOpMsgMongos, cls).tearDownClass() + + +class TestOpMsgReplicaSet(OpMsgReadPrefBase): + + @classmethod + def setUpClass(cls): + super(TestOpMsgReplicaSet, cls).setUpClass() + cls.primary, cls.secondary = MockupDB(), MockupDB() + for server in cls.primary, cls.secondary: + server.run() + + hosts = [server.address_string + for server in (cls.primary, cls.secondary)] + + primary_ismaster = { + 'ismaster': True, + 'setName': 'rs', + 'hosts': hosts, + 'minWireVersion': 2, + 'maxWireVersion': 6, + } + cls.primary.autoresponds(CommandBase('ismaster'), primary_ismaster) + secondary_ismaster = copy.copy(primary_ismaster) + secondary_ismaster['ismaster'] = False + secondary_ismaster['secondary'] = True + cls.secondary.autoresponds(CommandBase('ismaster'), secondary_ismaster) + + @classmethod + def tearDownClass(cls): + for server in cls.primary, cls.secondary: + server.stop() + super(TestOpMsgReplicaSet, cls).tearDownClass() + + @classmethod + def add_test(cls, mode, test_name, test): + # Skip nearest tests since we don't know if we will select the primary + # or secondary. + if mode != 'nearest': + setattr(cls, test_name, test) + + def setup_client(self, read_preference): + client = MongoClient(self.primary.uri, + replicaSet='rs', + read_preference=read_preference) + + # Run a command on a secondary to discover the topology. This ensures + # that secondaryPreferred commands will select the secondary. + client.admin.command('ismaster', + read_preference=ReadPreference.SECONDARY) + self.addCleanup(client.close) + return client + + +class TestOpMsgSingle(OpMsgReadPrefBase): + single_mongod = True + + @classmethod + def setUpClass(cls): + super(TestOpMsgSingle, cls).setUpClass() + auto_ismaster = { + 'ismaster': True, + 'minWireVersion': 2, + 'maxWireVersion': 6, + } + cls.primary = MockupDB(auto_ismaster=auto_ismaster) + cls.primary.run() + cls.secondary = cls.primary + + @classmethod + def tearDownClass(cls): + cls.primary.stop() + super(TestOpMsgSingle, cls).tearDownClass() + + +def create_op_msg_read_mode_test(mode, operation): + def test(self): + pref = make_read_preference(read_pref_mode_from_name(mode), + tag_sets=None) + + client = self.setup_client(read_preference=pref) + + if operation.op_type == 'always-use-secondary': + expected_server = self.secondary + expected_pref = ReadPreference.SECONDARY + elif operation.op_type == 'must-use-primary': + expected_server = self.primary + expected_pref = ReadPreference.PRIMARY + elif operation.op_type == 'may-use-secondary': + if mode in ('primary', 'primaryPreferred'): + expected_server = self.primary + else: + expected_server = self.secondary + expected_pref = pref + else: + self.fail('unrecognized op_type %r' % operation.op_type) + + # For single mongod we send primaryPreferred instead of primary. + if expected_pref == ReadPreference.PRIMARY and self.single_mongod: + expected_pref = ReadPreference.PRIMARY_PREFERRED + + with going(operation.function, client) as future: + request = expected_server.receive() + request.reply(operation.reply) + + future() # No error. + + self.assertEqual(expected_pref.document, + request.doc.get('$readPreference')) + self.assertNotIn('$query', request.doc) + + return test + + +def generate_op_msg_read_mode_tests(): + matrix = itertools.product(_MONGOS_MODES, operations) + + for entry in matrix: + mode, operation = entry + test = create_op_msg_read_mode_test(mode, operation) + test_name = 'test_%s_with_mode_%s' % ( + operation.name.replace(' ', '_'), mode) + test.__name__ = test_name + for cls in TestOpMsgMongos, TestOpMsgReplicaSet, TestOpMsgSingle: + cls.add_test(mode, test_name, test) + + +generate_op_msg_read_mode_tests() + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_projection.py b/test/mockupdb/test_projection.py new file mode 100644 index 0000000000..0b74c22cbd --- /dev/null +++ b/test/mockupdb/test_projection.py @@ -0,0 +1,56 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""PyMongo shouldn't append projection fields to "find" command, PYTHON-1479.""" + +from bson import SON +from mockupdb import Command, MockupDB, OpQuery, going +from pymongo import MongoClient + +import unittest + + +class TestProjection(unittest.TestCase): + def test_projection(self): + q = {} + fields = {'foo': True} + + # OP_QUERY, + server = MockupDB(auto_ismaster=True, + min_wire_version=0, max_wire_version=3) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + cursor = client.test.collection.find(q, fields) + with going(next, cursor): + request = server.receives(OpQuery(q, fields=fields)) + request.reply([], cursor_id=0) + + # "find" command. + server = MockupDB(auto_ismaster=True, + min_wire_version=0, max_wire_version=4) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + cursor = client.test.collection.find(q, fields) + cmd = Command(SON([('find', 'collection'), ('filter', q), + ('projection', fields)])) + + with going(next, cursor): + request = server.receives(cmd) + request.ok(cursor={'id': 0, 'firstBatch': []}) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_query_read_pref_sharded.py b/test/mockupdb/test_query_read_pref_sharded.py new file mode 100644 index 0000000000..033cdeff19 --- /dev/null +++ b/test/mockupdb/test_query_read_pref_sharded.py @@ -0,0 +1,107 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo query and read preference with a sharded cluster.""" + +from bson import SON +from pymongo import MongoClient, version_tuple +from pymongo.read_preferences import (Primary, + PrimaryPreferred, + Secondary, + SecondaryPreferred, + Nearest) +from mockupdb import MockupDB, going, Command, OpMsg + +import unittest + + +class TestQueryAndReadModeSharded(unittest.TestCase): + def test_query_and_read_mode_sharded_op_query(self): + server = MockupDB() + server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', + minWireVersion=2, maxWireVersion=5) + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri) + self.addCleanup(client.close) + + modes_without_query = ( + Primary(), + SecondaryPreferred(),) + + modes_with_query = ( + PrimaryPreferred(), + Secondary(), + Nearest(), + SecondaryPreferred([{'tag': 'value'}]),) + + find_command = SON([('find', 'test'), ('filter', {'a': 1})]) + for query in ({'a': 1}, {'$query': {'a': 1}},): + for mode in modes_with_query + modes_without_query: + collection = client.db.get_collection('test', + read_preference=mode) + cursor = collection.find(query.copy()) + with going(next, cursor): + request = server.receives() + if mode in modes_without_query: + # Filter is hoisted out of $query. + request.assert_matches(Command(find_command)) + self.assertFalse('$readPreference' in request) + else: + # Command is nested in $query. + request.assert_matches(Command( + SON([('$query', find_command), + ('$readPreference', mode.document)]))) + + request.replies({'cursor': {'id': 0, 'firstBatch': [{}]}}) + + @unittest.skipUnless(version_tuple >= (3, 7), "requires PyMongo 3.7") + def test_query_and_read_mode_sharded_op_msg(self): + """Test OP_MSG sends non-primary $readPreference and never $query.""" + server = MockupDB() + server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', + minWireVersion=2, maxWireVersion=6) + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri) + self.addCleanup(client.close) + + read_prefs = ( + Primary(), + SecondaryPreferred(), + PrimaryPreferred(), + Secondary(), + Nearest(), + SecondaryPreferred([{'tag': 'value'}]),) + + for query in ({'a': 1}, {'$query': {'a': 1}},): + for mode in read_prefs: + collection = client.db.get_collection('test', + read_preference=mode) + cursor = collection.find(query.copy()) + with going(next, cursor): + request = server.receives() + # Command is not nested in $query. + request.assert_matches(OpMsg( + SON([('find', 'test'), + ('filter', {'a': 1}), + ('$readPreference', mode.document)]))) + + request.replies({'cursor': {'id': 0, 'firstBatch': [{}]}}) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py new file mode 100755 index 0000000000..27b55f3180 --- /dev/null +++ b/test/mockupdb/test_reset_and_request_check.py @@ -0,0 +1,145 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import itertools + +from mockupdb import MockupDB, going, wait_until +from pymongo.server_type import SERVER_TYPE +from pymongo.errors import ConnectionFailure +from pymongo import MongoClient, version_tuple + +import unittest +from operations import operations + + +class TestResetAndRequestCheck(unittest.TestCase): + def __init__(self, *args, **kwargs): + super(TestResetAndRequestCheck, self).__init__(*args, **kwargs) + self.ismaster_time = 0 + self.client = None + self.server = None + + def setup_server(self): + self.server = MockupDB() + + def responder(request): + self.ismaster_time = time.time() + return request.ok(ismaster=True, minWireVersion=2, maxWireVersion=6) + + self.server.autoresponds('ismaster', responder) + self.server.run() + self.addCleanup(self.server.stop) + + kwargs = {'socketTimeoutMS': 100} + # Disable retryable reads when pymongo supports it. + if version_tuple[:3] >= (3, 9): + kwargs['retryReads'] = False + self.client = MongoClient(self.server.uri, **kwargs) + wait_until(lambda: self.client.nodes, 'connect to standalone') + + def tearDown(self): + if hasattr(self, 'client') and self.client: + self.client.close() + + def _test_disconnect(self, operation): + # Application operation fails. Test that client resets server + # description and does *not* schedule immediate check. + self.setup_server() + + # Network error on application operation. + with self.assertRaises(ConnectionFailure): + with going(operation.function, self.client): + self.server.receives().hangup() + + # Server is Unknown. + topology = self.client._topology + with self.assertRaises(ConnectionFailure): + topology.select_server_by_address(self.server.address, 0) + + time.sleep(0.5) + after = time.time() + + # Demand a reconnect. + with going(self.client.db.command, 'buildinfo'): + self.server.receives('buildinfo').ok() + + last = self.ismaster_time + self.assertGreaterEqual(last, after, 'called ismaster before needed') + + def _test_timeout(self, operation): + # Application operation times out. Test that client does *not* reset + # server description and does *not* schedule immediate check. + self.setup_server() + + with self.assertRaises(ConnectionFailure): + with going(operation.function, self.client): + self.server.receives() + before = self.ismaster_time + time.sleep(0.5) + + # Server is *not* Unknown. + topology = self.client._topology + server = topology.select_server_by_address(self.server.address, 0) + self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) + + after = self.ismaster_time + self.assertEqual(after, before, 'unneeded ismaster call') + + def _test_not_master(self, operation): + # Application operation gets a "not master" error. + self.setup_server() + + with self.assertRaises(ConnectionFailure): + with going(operation.function, self.client): + self.server.receives().replies(operation.not_master) + before = self.ismaster_time + time.sleep(1) + + # Server is rediscovered. + topology = self.client._topology + server = topology.select_server_by_address(self.server.address, 0) + self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) + + after = self.ismaster_time + self.assertGreater(after, before, 'ismaster not called') + + +def create_reset_test(operation, test_method): + def test(self): + test_method(self, operation) + + return test + + +def generate_reset_tests(): + test_methods = [ + (TestResetAndRequestCheck._test_disconnect, 'test_disconnect'), + (TestResetAndRequestCheck._test_timeout, 'test_timeout'), + (TestResetAndRequestCheck._test_not_master, 'test_not_master'), + ] + + matrix = itertools.product(operations, test_methods) + + for entry in matrix: + operation, (test_method, name) = entry + test = create_reset_test(operation, test_method) + test_name = '%s_%s' % (name, operation.name.replace(' ', '_')) + test.__name__ = test_name + setattr(TestResetAndRequestCheck, test_name, test) + +generate_reset_tests() + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_slave_okay_rs.py b/test/mockupdb/test_slave_okay_rs.py new file mode 100644 index 0000000000..5ff6fced4e --- /dev/null +++ b/test/mockupdb/test_slave_okay_rs.py @@ -0,0 +1,79 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo's SlaveOkay with a replica set connection. + +Just make sure SlaveOkay is *not* set on primary reads. +""" + +from mockupdb import MockupDB, going +from pymongo import MongoClient + +import unittest +from operations import operations + + +class TestSlaveOkayRS(unittest.TestCase): + def setup_server(self): + self.primary, self.secondary = MockupDB(), MockupDB() + for server in self.primary, self.secondary: + server.run() + self.addCleanup(server.stop) + + hosts = [server.address_string + for server in (self.primary, self.secondary)] + self.primary.autoresponds( + 'ismaster', + ismaster=True, setName='rs', hosts=hosts, + minWireVersion=2, maxWireVersion=6) + self.secondary.autoresponds( + 'ismaster', + ismaster=False, secondary=True, setName='rs', hosts=hosts, + minWireVersion=2, maxWireVersion=6) + + +def create_slave_ok_rs_test(operation): + def test(self): + self.setup_server() + assert not operation.op_type == 'always-use-secondary' + + client = MongoClient(self.primary.uri, replicaSet='rs') + self.addCleanup(client.close) + with going(operation.function, client): + request = self.primary.receive() + request.reply(operation.reply) + + self.assertFalse(request.slave_ok, 'SlaveOkay set read mode "primary"') + + return test + + +def generate_slave_ok_rs_tests(): + for operation in operations: + # Don't test secondary operations with MockupDB, the server enforces the + # SlaveOkay bit so integration tests prove we set it. + if operation.op_type == 'always-use-secondary': + continue + test = create_slave_ok_rs_test(operation) + + test_name = 'test_%s' % operation.name.replace(' ', '_') + test.__name__ = test_name + setattr(TestSlaveOkayRS, test_name, test) + + +generate_slave_ok_rs_tests() + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py new file mode 100644 index 0000000000..719de57553 --- /dev/null +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -0,0 +1,101 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo's SlaveOkay with: + +- A direct connection to a standalone. +- A direct connection to a slave. +- A direct connection to a mongos. +""" +import itertools + +from pymongo.read_preferences import make_read_preference +from pymongo.read_preferences import read_pref_mode_from_name + +try: + from queue import Queue +except ImportError: + from Queue import Queue + +from mockupdb import MockupDB, going +from pymongo import MongoClient + +import unittest +from operations import operations + + +class TestSlaveOkaySharded(unittest.TestCase): + def setup_server(self): + self.mongos1, self.mongos2 = MockupDB(), MockupDB() + + # Collect queries to either server in one queue. + self.q = Queue() + for server in self.mongos1, self.mongos2: + server.subscribe(self.q.put) + server.run() + self.addCleanup(server.stop) + server.autoresponds('ismaster', minWireVersion=2, maxWireVersion=6, + ismaster=True, msg='isdbgrid') + + self.mongoses_uri = 'mongodb://%s,%s' % (self.mongos1.address_string, + self.mongos2.address_string) + + +def create_slave_ok_sharded_test(mode, operation): + def test(self): + self.setup_server() + if operation.op_type == 'always-use-secondary': + slave_ok = True + elif operation.op_type == 'may-use-secondary': + slave_ok = mode != 'primary' + elif operation.op_type == 'must-use-primary': + slave_ok = False + else: + assert False, 'unrecognized op_type %r' % operation.op_type + + pref = make_read_preference(read_pref_mode_from_name(mode), + tag_sets=None) + + client = MongoClient(self.mongoses_uri, read_preference=pref) + self.addCleanup(client.close) + with going(operation.function, client): + request = self.q.get(timeout=1) + request.reply(operation.reply) + + if slave_ok: + self.assertTrue(request.slave_ok, 'SlaveOkay not set') + else: + self.assertFalse(request.slave_ok, 'SlaveOkay set') + + return test + + +def generate_slave_ok_sharded_tests(): + modes = 'primary', 'secondary', 'nearest' + matrix = itertools.product(modes, operations) + + for entry in matrix: + mode, operation = entry + test = create_slave_ok_sharded_test(mode, operation) + test_name = 'test_%s_with_mode_%s' % ( + operation.name.replace(' ', '_'), mode) + + test.__name__ = test_name + setattr(TestSlaveOkaySharded, test_name, test) + + +generate_slave_ok_sharded_tests() + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py new file mode 100644 index 0000000000..4a0725a869 --- /dev/null +++ b/test/mockupdb/test_slave_okay_single.py @@ -0,0 +1,104 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo's SlaveOkay with: + +- A direct connection to a standalone. +- A direct connection to a slave. +- A direct connection to a mongos. +""" + +import itertools + +from mockupdb import MockupDB, going +from pymongo import MongoClient +from pymongo.read_preferences import (make_read_preference, + read_pref_mode_from_name) +from pymongo.topology_description import TOPOLOGY_TYPE + +import unittest +from operations import operations + + +def topology_type_name(client): + topology_type = client._topology._description.topology_type + return TOPOLOGY_TYPE._fields[topology_type] + + +class TestSlaveOkaySingle(unittest.TestCase): + def setUp(self): + self.server = MockupDB() + self.server.run() + self.addCleanup(self.server.stop) + + +def create_slave_ok_single_test(mode, server_type, ismaster, operation): + def test(self): + ismaster_with_version = ismaster.copy() + ismaster_with_version['minWireVersion'] = 2 + ismaster_with_version['maxWireVersion'] = 6 + self.server.autoresponds('ismaster', **ismaster_with_version) + if operation.op_type == 'always-use-secondary': + slave_ok = True + elif operation.op_type == 'may-use-secondary': + slave_ok = mode != 'primary' or server_type != 'mongos' + elif operation.op_type == 'must-use-primary': + slave_ok = server_type != 'mongos' + else: + assert False, 'unrecognized op_type %r' % operation.op_type + + pref = make_read_preference(read_pref_mode_from_name(mode), + tag_sets=None) + + client = MongoClient(self.server.uri, read_preference=pref) + self.addCleanup(client.close) + with going(operation.function, client): + request = self.server.receive() + request.reply(operation.reply) + + self.assertEqual(topology_type_name(client), 'Single') + if slave_ok: + self.assertTrue(request.slave_ok, 'SlaveOkay not set') + else: + self.assertFalse(request.slave_ok, 'SlaveOkay set') + + return test + + +def generate_slave_ok_single_tests(): + modes = 'primary', 'secondary', 'nearest' + server_types = [ + ('standalone', {'ismaster': True}), + ('slave', {'ismaster': False}), + ('mongos', {'ismaster': True, 'msg': 'isdbgrid'})] + + matrix = itertools.product(modes, server_types, operations) + + for entry in matrix: + mode, (server_type, ismaster), operation = entry + test = create_slave_ok_single_test(mode, server_type, ismaster, + operation) + + test_name = 'test_%s_%s_with_mode_%s' % ( + operation.name.replace(' ', '_'), server_type, mode) + + test.__name__ = test_name + setattr(TestSlaveOkaySingle, test_name, test) + + +generate_slave_ok_single_tests() + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_starting_from_overflow.py b/test/mockupdb/test_starting_from_overflow.py new file mode 100644 index 0000000000..d94cab0ff3 --- /dev/null +++ b/test/mockupdb/test_starting_from_overflow.py @@ -0,0 +1,76 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that PyMongo ignores the startingFrom field, PYTHON-945.""" + +from mockupdb import going, MockupDB, OpGetMore, OpQuery, Command +from pymongo import MongoClient + +import unittest + + +class TestStartingFromOverflow(unittest.TestCase): + def test_query(self): + server = MockupDB(auto_ismaster=True, + min_wire_version=0, max_wire_version=3) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + cursor = client.test.collection.find() + with going(list, cursor) as docs: + request = server.receives(OpQuery) + request.reply({'a': 1}, cursor_id=123, starting_from=-7) + request = server.receives(OpGetMore, cursor_id=123) + request.reply({'a': 2}, starting_from=-3, cursor_id=0) + + self.assertEqual([{'a': 1}, {'a': 2}], docs()) + + def test_aggregate(self): + server = MockupDB(auto_ismaster={'maxWireVersion': 3}) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + with going(client.test.collection.aggregate, []) as cursor: + request = server.receives(Command) + request.reply({'cursor': { + 'id': 123, + 'firstBatch': [{'a': 1}]}}) + + with going(list, cursor()) as docs: + request = server.receives(OpGetMore, cursor_id=123) + request.reply({'a': 2}, starting_from=-3, cursor_id=0) + + self.assertEqual([{'a': 1}, {'a': 2}], docs()) + + def test_find_command(self): + server = MockupDB(auto_ismaster={'maxWireVersion': 4}) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + with going(list, client.test.collection.find()) as docs: + server.receives(Command).reply({'cursor': { + 'id': 123, + 'firstBatch': [{'a': 1}]}}) + + request = server.receives(Command("getMore", 123)) + request.reply({'cursor': { + 'id': 0, + 'nextBatch': [{'a': 2}]}}, + starting_from=-3) + + self.assertEqual([{'a': 1}, {'a': 2}], docs()) + + +if __name__ == '__main__': + unittest.main() From e655b0bb9940904dcbc4e6f7fc18a79262dd6ffc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 16 Nov 2021 16:35:31 -0800 Subject: [PATCH 0011/1588] PYTHON-3001 Bump minimum pymongocrypt version to 1.2.0 (#793) --- doc/changelog.rst | 5 +++++ pymongo/encryption.py | 4 ++++ pymongo/encryption_options.py | 4 ++++ setup.py | 2 +- 4 files changed, 14 insertions(+), 1 deletion(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 3b46667cee..5f0ff23300 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -179,6 +179,8 @@ Breaking Changes in 4.0 parsing MongoDB URIs. - Removed the `disable_md5` parameter for :class:`~gridfs.GridFSBucket` and :class:`~gridfs.GridFS`. See :ref:`removed-gridfs-checksum` for details. +- PyMongoCrypt 1.2.0 or later is now required for client side field level + encryption support. Notable improvements .................... @@ -194,6 +196,9 @@ Notable improvements choose a `srvMaxHosts` sized subset of hosts. - Added :attr:`pymongo.mongo_client.MongoClient.options` for read-only access to a client's configuration options. +- Support for the "kmip" KMS provider for client side field level encryption. + See the docstring for :class:`~pymongo.encryption_options.AutoEncryptionOpts` + and :mod:`~pymongo.encryption`. Issues Resolved ............... diff --git a/pymongo/encryption.py b/pymongo/encryption.py index cb4080397f..064ba48d51 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -426,6 +426,10 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} + Or to supply a client certificate:: + + kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} + .. versionchanged:: 4.0 Added the `kms_tls_options` parameter and the "kmip" KMS provider. diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index c96f4a6d67..d0c2d5ce72 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -129,6 +129,10 @@ def __init__(self, kms_providers, key_vault_namespace, kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} + Or to supply a client certificate:: + + kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} + .. versionchanged:: 4.0 Added the `kms_tls_options` parameter and the "kmip" KMS provider. diff --git a/setup.py b/setup.py index 8fcad6cc60..63a1df4955 100755 --- a/setup.py +++ b/setup.py @@ -277,7 +277,7 @@ def build_extension(self, ext): pyopenssl_reqs.append('certifi') extras_require = { - 'encryption': ['pymongocrypt>=1.1.0,<2.0.0'], + 'encryption': ['pymongocrypt>=1.2.0,<2.0.0'], 'ocsp': pyopenssl_reqs, 'snappy': ['python-snappy'], 'zstd': ['zstandard'], From 24cc4c42bf6476c25f90cfcc7e10e63e359465b2 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 16 Nov 2021 17:14:33 -0800 Subject: [PATCH 0012/1588] PYTHON-3019 Fix doc test failures (#794) Remove pymongo 2 to 3 migration guide. Make raw_bson doc tests less flakey. --- bson/raw_bson.py | 16 +- doc/changelog.rst | 4 +- doc/index.rst | 4 - doc/migrate-to-pymongo3.rst | 544 ------------------------------------ doc/migrate-to-pymongo4.rst | 3 +- 5 files changed, 13 insertions(+), 558 deletions(-) delete mode 100644 doc/migrate-to-pymongo3.rst diff --git a/bson/raw_bson.py b/bson/raw_bson.py index 339354d7dd..bfe888b6b7 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -28,23 +28,23 @@ >>> client.drop_database('db') >>> client.drop_database('replica_db') >>> db = client.db - >>> result = db.test.insert_many([{'a': 1}, - ... {'b': 1}, - ... {'c': 1}, - ... {'d': 1}]) + >>> result = db.test.insert_many([{'_id': 1, 'a': 1}, + ... {'_id': 2, 'b': 1}, + ... {'_id': 3, 'c': 1}, + ... {'_id': 4, 'd': 1}]) >>> replica_db = client.replica_db >>> for doc in db.test.find(): ... print(f"raw document: {doc.raw}") ... print(f"decoded document: {bson.decode(doc.raw)}") ... result = replica_db.test.insert_one(doc) raw document: b'...' - decoded document: {'_id': ObjectId('...'), 'a': 1} + decoded document: {'_id': 1, 'a': 1} raw document: b'...' - decoded document: {'_id': ObjectId('...'), 'b': 1} + decoded document: {'_id': 2, 'b': 1} raw document: b'...' - decoded document: {'_id': ObjectId('...'), 'c': 1} + decoded document: {'_id': 3, 'c': 1} raw document: b'...' - decoded document: {'_id': ObjectId('...'), 'd': 1} + decoded document: {'_id': 4, 'd': 1} For use cases like moving documents across different databases or writing binary blobs to disk, using raw BSON documents provides better speed and avoids the diff --git a/doc/changelog.rst b/doc/changelog.rst index 5f0ff23300..222750f550 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1881,7 +1881,9 @@ Changes in Version 2.9 Version 2.9 provides an upgrade path to PyMongo 3.x. Most of the API changes from PyMongo 3.0 have been backported in a backward compatible way, allowing applications to be written against PyMongo >= 2.9, rather then PyMongo 2.x or -PyMongo 3.x. See the :doc:`/migrate-to-pymongo3` for detailed examples. +PyMongo 3.x. See the `PyMongo 3 Migration Guide +`_ for +detailed examples. .. note:: There are a number of new deprecations in this release for features that were removed in PyMongo 3.0. diff --git a/doc/index.rst b/doc/index.rst index 9ef6907181..da05bf80ae 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -34,9 +34,6 @@ everything you need to know to use **PyMongo**. :doc:`migrate-to-pymongo4` A PyMongo 3.x to 4.x migration guide. -:doc:`migrate-to-pymongo3` - A PyMongo 2.x to 3.x migration guide. - :doc:`python3` Frequently asked questions about python 3 support. @@ -123,5 +120,4 @@ Indices and tables changelog python3 migrate-to-pymongo4 - migrate-to-pymongo3 developer/index diff --git a/doc/migrate-to-pymongo3.rst b/doc/migrate-to-pymongo3.rst deleted file mode 100644 index 633d0a7abb..0000000000 --- a/doc/migrate-to-pymongo3.rst +++ /dev/null @@ -1,544 +0,0 @@ -PyMongo 3 Migration Guide -========================= - -.. contents:: - -.. testsetup:: - - from pymongo import MongoClient, ReadPreference - client = MongoClient() - collection = client.my_database.my_collection - -PyMongo 3 is a partial rewrite bringing a large number of improvements. It -also brings a number of backward breaking changes. This guide provides a -roadmap for migrating an existing application from PyMongo 2.x to 3.x or -writing libraries that will work with both PyMongo 2.x and 3.x. - -PyMongo 2.9 ------------ - -The first step in any successful migration involves upgrading to, or -requiring, at least PyMongo 2.9. If your project has a -requirements.txt file, add the line "pymongo >= 2.9, < 3.0" until you have -completely migrated to PyMongo 3. Most of the key new -methods and options from PyMongo 3.0 are backported in PyMongo 2.9 making -migration much easier. - -Enable Deprecation Warnings ---------------------------- - -Starting with PyMongo 2.9, :exc:`DeprecationWarning` is raised by most methods -removed in PyMongo 3.0. Make sure you enable runtime warnings to see -where deprecated functions and methods are being used in your application:: - - python -Wd - -Warnings can also be changed to errors:: - - python -Wd -Werror - -.. note:: Not all deprecated features raise :exc:`DeprecationWarning` when - used. For example, the :meth:`~pymongo.collection.Collection.find` options - renamed in PyMongo 3.0 do not raise :exc:`DeprecationWarning` when used in - PyMongo 2.x. See also `Removed features with no migration path`_. - -CRUD API --------- - -Changes to find() and find_one() -................................ - -"spec" renamed "filter" -~~~~~~~~~~~~~~~~~~~~~~~ - -The `spec` option has been renamed to `filter`. Code like this:: - - >>> cursor = collection.find(spec={"a": 1}) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> cursor = collection.find(filter={"a": 1}) - -or this with any version of PyMongo: - -.. doctest:: - - >>> cursor = collection.find({"a": 1}) - -"fields" renamed "projection" -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The `fields` option has been renamed to `projection`. Code like this:: - - >>> cursor = collection.find({"a": 1}, fields={"_id": False}) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> cursor = collection.find({"a": 1}, projection={"_id": False}) - -or this with any version of PyMongo: - -.. doctest:: - - >>> cursor = collection.find({"a": 1}, {"_id": False}) - -"partial" renamed "allow_partial_results" -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The `partial` option has been renamed to `allow_partial_results`. Code like -this:: - - >>> cursor = collection.find({"a": 1}, partial=True) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> cursor = collection.find({"a": 1}, allow_partial_results=True) - -"timeout" replaced by "no_cursor_timeout" -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The `timeout` option has been replaced by `no_cursor_timeout`. Code like this:: - - >>> cursor = collection.find({"a": 1}, timeout=False) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> cursor = collection.find({"a": 1}, no_cursor_timeout=True) - -"network_timeout" is removed -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The `network_timeout` option has been removed. This option was always the -wrong solution for timing out long running queries and should never be used -in production. Starting with **MongoDB 2.6** you can use the $maxTimeMS query -modifier. Code like this:: - - # Set a 5 second select() timeout. - >>> cursor = collection.find({"a": 1}, network_timeout=5) - -can be changed to this with PyMongo 2.9 or later:: - - # Set a 5 second (5000 millisecond) server side query timeout. - >>> cursor = collection.find({"a": 1}, modifiers={"$maxTimeMS": 5000}) - -or with PyMongo 3.5 or later: - - >>> cursor = collection.find({"a": 1}, max_time_ms=5000) - -or with any version of PyMongo: - -.. doctest:: - - >>> cursor = collection.find({"$query": {"a": 1}, "$maxTimeMS": 5000}) - -.. seealso:: `$maxTimeMS - `_ - -Tailable cursors -~~~~~~~~~~~~~~~~ - -The `tailable` and `await_data` options have been replaced by `cursor_type`. -Code like this:: - - >>> cursor = collection.find({"a": 1}, tailable=True) - >>> cursor = collection.find({"a": 1}, tailable=True, await_data=True) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from pymongo import CursorType - >>> cursor = collection.find({"a": 1}, cursor_type=CursorType.TAILABLE) - >>> cursor = collection.find({"a": 1}, cursor_type=CursorType.TAILABLE_AWAIT) - -Other removed options -~~~~~~~~~~~~~~~~~~~~~ - -The `slave_okay`, `read_preference`, `tag_sets`, -and `secondary_acceptable_latency_ms` options have been removed. See the `Read -Preferences`_ section for solutions. - -The aggregate method always returns a cursor -............................................ - -PyMongo 2.6 added an option to return an iterable cursor from -:meth:`~pymongo.collection.Collection.aggregate`. In PyMongo 3 -:meth:`~pymongo.collection.Collection.aggregate` always returns a cursor. Use -the `cursor` option for consistent behavior with PyMongo 2.9 and later: - -.. doctest:: - - >>> for result in collection.aggregate([], cursor={}): - ... pass - -Read Preferences ----------------- - -The "slave_okay" option is removed -.................................. - -The `slave_okay` option is removed from PyMongo's API. The -secondaryPreferred read preference provides the same behavior. -Code like this:: - - >>> client = MongoClient(slave_okay=True) - -can be changed to this with PyMongo 2.9 or newer: - -.. doctest:: - - >>> client = MongoClient(readPreference="secondaryPreferred") - -The "read_preference" attribute is immutable -............................................ - -Code like this:: - - >>> from pymongo import ReadPreference - >>> db = client.my_database - >>> db.read_preference = ReadPreference.SECONDARY - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> db = client.get_database("my_database", - ... read_preference=ReadPreference.SECONDARY) - -Code like this:: - - >>> cursor = collection.find({"a": 1}, - ... read_preference=ReadPreference.SECONDARY) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> coll2 = collection.with_options(read_preference=ReadPreference.SECONDARY) - >>> cursor = coll2.find({"a": 1}) - -.. seealso:: :meth:`~pymongo.database.Database.get_collection` - -The "tag_sets" option and attribute are removed -............................................... - -The `tag_sets` MongoClient option is removed. The `read_preference` -option can be used instead. Code like this:: - - >>> client = MongoClient( - ... read_preference=ReadPreference.SECONDARY, - ... tag_sets=[{"dc": "ny"}, {"dc": "sf"}]) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from pymongo.read_preferences import Secondary - >>> client = MongoClient(read_preference=Secondary([{"dc": "ny"}])) - -To change the tags sets for a Database or Collection, code like this:: - - >>> db = client.my_database - >>> db.read_preference = ReadPreference.SECONDARY - >>> db.tag_sets = [{"dc": "ny"}] - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> db = client.get_database("my_database", - ... read_preference=Secondary([{"dc": "ny"}])) - -Code like this:: - - >>> cursor = collection.find( - ... {"a": 1}, - ... read_preference=ReadPreference.SECONDARY, - ... tag_sets=[{"dc": "ny"}]) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from pymongo.read_preferences import Secondary - >>> coll2 = collection.with_options( - ... read_preference=Secondary([{"dc": "ny"}])) - >>> cursor = coll2.find({"a": 1}) - -.. seealso:: :meth:`~pymongo.database.Database.get_collection` - -The "secondary_acceptable_latency_ms" option and attribute are removed -...................................................................... - -PyMongo 2.x supports `secondary_acceptable_latency_ms` as an option to methods -throughout the driver, but mongos only supports a global latency option. -PyMongo 3.x has changed to match the behavior of mongos, allowing migration -from a single server, to a replica set, to a sharded cluster without a -surprising change in server selection behavior. A new option, -`localThresholdMS`, is available through MongoClient and should be used in -place of `secondaryAcceptableLatencyMS`. Code like this:: - - >>> client = MongoClient(readPreference="nearest", - ... secondaryAcceptableLatencyMS=100) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> client = MongoClient(readPreference="nearest", - ... localThresholdMS=100) - -Write Concern -------------- - -The "safe" option is removed -............................ - -In PyMongo 3 the `safe` option is removed from the entire API. -:class:`~pymongo.mongo_client.MongoClient` has always defaulted to acknowledged -write operations and continues to do so in PyMongo 3. - -The "write_concern" attribute is immutable -.......................................... - -The `write_concern` attribute is immutable in PyMongo 3. Code like this:: - - >>> client = MongoClient() - >>> client.write_concern = {"w": "majority"} - -can be changed to this with any version of PyMongo: - -.. doctest:: - - >>> client = MongoClient(w="majority") - -Code like this:: - - >>> db = client.my_database - >>> db.write_concern = {"w": "majority"} - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from pymongo import WriteConcern - >>> db = client.get_database("my_database", - ... write_concern=WriteConcern(w="majority")) - -The new CRUD API write methods do not accept write concern options. Code like -this:: - - >>> oid = collection.insert({"a": 2}, w="majority") - -can be changed to this with PyMongo 3 or later: - -.. doctest:: - - >>> from pymongo import WriteConcern - >>> coll2 = collection.with_options( - ... write_concern=WriteConcern(w="majority")) - >>> oid = coll2.insert_one({"a": 2}) - -.. seealso:: :meth:`~pymongo.database.Database.get_collection` - -Codec Options -------------- - -The "document_class" attribute is removed -......................................... - -Code like this:: - - >>> from bson.son import SON - >>> client = MongoClient() - >>> client.document_class = SON - -can be replaced by this in any version of PyMongo: - -.. doctest:: - - >>> from bson.son import SON - >>> client = MongoClient(document_class=SON) - -or to change the `document_class` for a :class:`~pymongo.database.Database` -with PyMongo 2.9 or later: - -.. doctest:: - - >>> from bson.codec_options import CodecOptions - >>> from bson.son import SON - >>> db = client.get_database("my_database", CodecOptions(SON)) - -.. seealso:: :meth:`~pymongo.database.Database.get_collection` and - :meth:`~pymongo.collection.Collection.with_options` - -The "uuid_subtype" option and attribute are removed -................................................... - -Code like this:: - - >>> from bson.binary import JAVA_LEGACY - >>> db = client.my_database - >>> db.uuid_subtype = JAVA_LEGACY - -can be replaced by this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from bson.binary import JAVA_LEGACY - >>> from bson.codec_options import CodecOptions - >>> db = client.get_database("my_database", - ... CodecOptions(uuid_representation=JAVA_LEGACY)) - -.. seealso:: :meth:`~pymongo.database.Database.get_collection` and - :meth:`~pymongo.collection.Collection.with_options` - -MongoClient ------------ - -MongoClient connects asynchronously -................................... - -In PyMongo 3, the :class:`~pymongo.mongo_client.MongoClient` constructor no -longer blocks while connecting to the server or servers, and it no longer -raises :exc:`~pymongo.errors.ConnectionFailure` if they are unavailable, nor -:exc:`~pymongo.errors.ConfigurationError` if the user’s credentials are wrong. -Instead, the constructor returns immediately and launches the connection -process on background threads. The `connect` option is added to control whether -these threads are started immediately, or when the client is first used. - -For consistent behavior in PyMongo 2.x and PyMongo 3.x, code like this:: - - >>> from pymongo.errors import ConnectionFailure - >>> try: - ... client = MongoClient() - ... except ConnectionFailure: - ... print("Server not available") - >>> - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from pymongo.errors import ConnectionFailure - >>> client = MongoClient(connect=False) - >>> try: - ... client.admin.command("ping") - ... except ConnectionFailure: - ... print("Server not available") - >>> - -Any operation can be used to determine if the server is available. We choose -the "ping" command here because it is cheap and does not require auth, so -it is a simple way to check whether the server is available. - -The max_pool_size parameter is removed -...................................... - -PyMongo 3 replaced the max_pool_size parameter with support for the MongoDB URI -`maxPoolSize` option. Code like this:: - - >>> client = MongoClient(max_pool_size=10) - -can be replaced by this with PyMongo 2.9 or later: - -.. doctest:: - - >>> client = MongoClient(maxPoolSize=10) - >>> client = MongoClient("mongodb://localhost:27017/?maxPoolSize=10") - -The "disconnect" method is removed -.................................. - -Code like this:: - - >>> client.disconnect() - -can be replaced by this with PyMongo 2.9 or later: - -.. doctest:: - - >>> client.close() - -The host and port attributes are removed -........................................ - -Code like this:: - - >>> host = client.host - >>> port = client.port - -can be replaced by this with PyMongo 2.9 or later: - -.. doctest:: - - >>> address = client.address - >>> host, port = address or (None, None) - -BSON ----- - -"as_class", "tz_aware", and "uuid_subtype" are removed -...................................................... - -The `as_class`, `tz_aware`, and `uuid_subtype` parameters have been -removed from the functions provided in :mod:`bson`. Furthermore, the -:func:`~bson.encode` and :func:`~bson.decode` functions have been added -as more performant alternatives to the :meth:`bson.BSON.encode` and -:meth:`bson.BSON.decode` methods. Code like this:: - - >>> from bson import BSON - >>> from bson.son import SON - >>> encoded = BSON.encode({"a": 1}, as_class=SON) - -can be replaced by this in PyMongo 2.9 or later: - -.. doctest:: - - >>> from bson import encode - >>> from bson.codec_options import CodecOptions - >>> from bson.son import SON - >>> encoded = encode({"a": 1}, codec_options=CodecOptions(SON)) - -Removed features with no migration path ---------------------------------------- - -MasterSlaveConnection is removed -................................ - -Master slave deployments are deprecated in MongoDB. Starting with MongoDB 3.0 -a replica set can have up to 50 members and that limit is likely to be -removed in later releases. We recommend migrating to replica sets instead. - -Requests are removed -.................... - -The client methods `start_request`, `in_request`, and `end_request` are -removed. Requests were designed to make read-your-writes consistency more -likely with the w=0 write concern. Additionally, a thread in a request used the -same member for all secondary reads in a replica set. To ensure -read-your-writes consistency in PyMongo 3.0, do not override the default write -concern with w=0, and do not override the default read preference of PRIMARY. - -The "compile_re" option is removed -.................................. - -In PyMongo 3 regular expressions are never compiled to Python match objects. - -The "use_greenlets" option is removed -..................................... - -The `use_greenlets` option was meant to allow use of PyMongo with Gevent -without the use of gevent.monkey.patch_threads(). This option caused a lot -of confusion and made it difficult to support alternative asyncio libraries -like Eventlet. Users of Gevent should use gevent.monkey.patch_all() instead. - -.. seealso:: :doc:`examples/gevent` diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index c8ac401a18..5acd3a5d12 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -25,7 +25,8 @@ completely migrated to PyMongo 4. Most of the key new methods and options from PyMongo 4.0 are backported in PyMongo 3.12 making migration much easier. .. note:: Users of PyMongo 2.X who wish to upgrade to 4.x must first upgrade - to PyMongo 3.x by following the :doc:`migrate-to-pymongo3`. + to PyMongo 3.x by following the `PyMongo 3 Migration Guide + `_. Python 3.6+ ----------- From 12a6af7ab6cfeb8004570cea6812327c7fd4b226 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 17 Nov 2021 12:31:59 -0800 Subject: [PATCH 0013/1588] PYTHON-2981 Stop using MongoClient.address for hashing and equality (#795) --- doc/changelog.rst | 3 +++ pymongo/mongo_client.py | 6 +++--- pymongo/monitor.py | 3 +-- pymongo/settings.py | 16 +++++++++++++--- pymongo/topology.py | 14 ++++++++++++++ test/test_client.py | 29 +++++++++++++++++++++++------ 6 files changed, 57 insertions(+), 14 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 222750f550..88c1b7cd20 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -177,6 +177,9 @@ Breaking Changes in 4.0 :exc:`~pymongo.errors.InvalidURI` exception when it encounters unescaped percent signs in username and password when parsing MongoDB URIs. +- Comparing two :class:`~pymongo.mongo_client.MongoClient` instances now + uses a set of immutable properties rather than + :attr:`~pymongo.mongo_client.MongoClient.address` which can change. - Removed the `disable_md5` parameter for :class:`~gridfs.GridFSBucket` and :class:`~gridfs.GridFS`. See :ref:`removed-gridfs-checksum` for details. - PyMongoCrypt 1.2.0 or later is now required for client side field level diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 1f8b781487..90e6a7706f 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -748,9 +748,9 @@ def __init__( server_selector=options.server_selector, heartbeat_frequency=options.heartbeat_frequency, fqdn=fqdn, - srv_service_name=srv_service_name, direct_connection=options.direct_connection, load_balanced=options.load_balanced, + srv_service_name=srv_service_name, srv_max_hosts=srv_max_hosts ) @@ -1337,14 +1337,14 @@ def _retryable_write(self, retryable, func, session): def __eq__(self, other): if isinstance(other, self.__class__): - return self.address == other.address + return self._topology == other._topology return NotImplemented def __ne__(self, other): return not self == other def __hash__(self): - return hash(self.address) + return hash(self._topology) def _repr_helper(self): def option_repr(option, value): diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 37b894bd53..a383e272cd 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -299,7 +299,6 @@ def __init__(self, topology, topology_settings): self._settings = topology_settings self._seedlist = self._settings._seeds self._fqdn = self._settings.fqdn - self._srv_service_name = self._settings._srv_service_name def _run(self): seedlist = self._get_seedlist() @@ -319,7 +318,7 @@ def _get_seedlist(self): try: resolver = _SrvResolver(self._fqdn, self._settings.pool_options.connect_timeout, - self._srv_service_name) + self._settings.srv_service_name) seedlist, ttl = resolver.get_hosts_and_min_ttl() if len(seedlist) == 0: # As per the spec: this should be treated as a failure. diff --git a/pymongo/settings.py b/pymongo/settings.py index e9e28c13ac..d17b5e8b86 100644 --- a/pymongo/settings.py +++ b/pymongo/settings.py @@ -39,9 +39,9 @@ def __init__(self, heartbeat_frequency=common.HEARTBEAT_FREQUENCY, server_selector=None, fqdn=None, - srv_service_name=common.SRV_SERVICE_NAME, direct_connection=False, load_balanced=None, + srv_service_name=common.SRV_SERVICE_NAME, srv_max_hosts=0): """Represent MongoClient's configuration. @@ -62,11 +62,11 @@ def __init__(self, self._server_selection_timeout = server_selection_timeout self._server_selector = server_selector self._fqdn = fqdn - self._srv_service_name = srv_service_name self._heartbeat_frequency = heartbeat_frequency - self._srv_max_hosts = srv_max_hosts or 0 self._direct = direct_connection self._load_balanced = load_balanced + self._srv_service_name = srv_service_name + self._srv_max_hosts = srv_max_hosts or 0 self._topology_id = ObjectId() # Store the allocation traceback to catch unclosed clients in the @@ -131,6 +131,16 @@ def load_balanced(self): """True if the client was configured to connect to a load balancer.""" return self._load_balanced + @property + def srv_service_name(self): + """The srvServiceName.""" + return self._srv_service_name + + @property + def srv_max_hosts(self): + """The srvMaxHosts.""" + return self._srv_max_hosts + def get_topology_type(self): if self.load_balanced: return TOPOLOGY_TYPE.LoadBalanced diff --git a/pymongo/topology.py b/pymongo/topology.py index c6a702fde5..9139c1492e 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -803,6 +803,20 @@ def __repr__(self): msg = 'CLOSED ' return '<%s %s%r>' % (self.__class__.__name__, msg, self._description) + def eq_props(self): + """The properties to use for MongoClient/Topology equality checks.""" + ts = self._settings + return (tuple(sorted(ts.seeds)), ts.replica_set_name, ts.fqdn, + ts.srv_service_name) + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.eq_props() == other.eq_props() + return NotImplemented + + def __hash__(self): + return hash(self.eq_props()) + class _ErrorContext(object): """An error with context for SDAM error handling.""" diff --git a/test/test_client.py b/test/test_client.py index 34922d1097..8c89a45481 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -676,15 +676,32 @@ def test_init_disconnected_with_auth(self): self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) def test_equality(self): - c = connected(rs_or_single_client()) + seed = '%s:%s' % list(self.client._topology_settings.seeds)[0] + c = rs_or_single_client(seed, connect=False) + self.addCleanup(c.close) self.assertEqual(client_context.client, c) - # Explicitly test inequality self.assertFalse(client_context.client != c) + c = rs_or_single_client('invalid.com', connect=False) + self.addCleanup(c.close) + self.assertNotEqual(client_context.client, c) + self.assertTrue(client_context.client != c) + # Seeds differ: + self.assertNotEqual(MongoClient('a', connect=False), + MongoClient('b', connect=False)) + # Same seeds but out of order still compares equal: + self.assertEqual(MongoClient(['a', 'b', 'c'], connect=False), + MongoClient(['c', 'a', 'b'], connect=False)) + def test_hashable(self): - c = connected(rs_or_single_client()) + seed = '%s:%s' % list(self.client._topology_settings.seeds)[0] + c = rs_or_single_client(seed, connect=False) + self.addCleanup(c.close) self.assertIn(c, {client_context.client}) + c = rs_or_single_client('invalid.com', connect=False) + self.addCleanup(c.close) + self.assertNotIn(c, {client_context.client}) def test_host_w_port(self): with self.assertRaises(ValueError): @@ -1635,19 +1652,19 @@ def test_service_name_from_kwargs(self): client = MongoClient( 'mongodb+srv://user:password@test22.test.build.10gen.cc', srvServiceName='customname', connect=False) - self.assertEqual(client._topology_settings._srv_service_name, + self.assertEqual(client._topology_settings.srv_service_name, 'customname') client = MongoClient( 'mongodb+srv://user:password@test22.test.build.10gen.cc' '/?srvServiceName=shouldbeoverriden', srvServiceName='customname', connect=False) - self.assertEqual(client._topology_settings._srv_service_name, + self.assertEqual(client._topology_settings.srv_service_name, 'customname') client = MongoClient( 'mongodb+srv://user:password@test22.test.build.10gen.cc' '/?srvServiceName=customname', connect=False) - self.assertEqual(client._topology_settings._srv_service_name, + self.assertEqual(client._topology_settings.srv_service_name, 'customname') @unittest.skipUnless( From 9cf88cfdc102a54019eae0f93f473cbebc5b1c3c Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 17 Nov 2021 17:37:05 -0800 Subject: [PATCH 0014/1588] PYTHON-2773 Mockupdb test failures (#796) --- test/mockupdb/operations.py | 45 ++----- test/mockupdb/test_auth_recovering_member.py | 2 +- test/mockupdb/test_cluster_time.py | 51 ++----- test/mockupdb/test_cursor_namespace.py | 4 +- test/mockupdb/test_handshake.py | 31 ++--- test/mockupdb/test_legacy_crud.py | 126 ------------------ test/mockupdb/test_list_indexes.py | 23 ---- test/mockupdb/test_max_staleness.py | 2 +- test/mockupdb/test_mixed_version_sharded.py | 2 +- .../mockupdb/test_mongos_command_read_mode.py | 33 ++--- test/mockupdb/test_op_msg.py | 66 +-------- test/mockupdb/test_op_msg_read_preference.py | 9 +- test/mockupdb/test_projection.py | 56 -------- test/mockupdb/test_query_read_pref_sharded.py | 45 +------ test/mockupdb/test_reset_and_request_check.py | 9 +- test/mockupdb/test_slave_okay_single.py | 6 +- test/mockupdb/test_starting_from_overflow.py | 76 ----------- 17 files changed, 62 insertions(+), 524 deletions(-) delete mode 100755 test/mockupdb/test_legacy_crud.py delete mode 100644 test/mockupdb/test_projection.py delete mode 100644 test/mockupdb/test_starting_from_overflow.py diff --git a/test/mockupdb/operations.py b/test/mockupdb/operations.py index 2c8701ae83..9fb0ca16b6 100644 --- a/test/mockupdb/operations.py +++ b/test/mockupdb/operations.py @@ -15,6 +15,7 @@ from collections import namedtuple from mockupdb import * +from mockupdb import OpMsgReply from pymongo import ReadPreference __all__ = ['operations', 'upgrades'] @@ -51,11 +52,11 @@ sharded cluster (PYTHON-868). """ -not_master_reply_to_query = OpReply( +not_master_reply_to_query = OpMsgReply( {'$err': 'not master'}, flags=REPLY_FLAGS['QueryFailure']) -not_master_reply_to_command = OpReply(ok=0, errmsg='not master') +not_master_reply_to_command = OpMsgReply(ok=0, errmsg='not master') operations = [ Operation( @@ -76,20 +77,6 @@ reply={'cursor': {'id': 0, 'firstBatch': []}}, op_type='may-use-secondary', not_master=not_master_reply_to_command), - Operation( - 'mapreduce', - lambda client: client.db.collection.map_reduce( - 'function() {}', 'function() {}'), - reply={'result': {'db': 'db', 'collection': 'out_collection'}}, - op_type='must-use-primary', - not_master=not_master_reply_to_command), - Operation( - 'inline_mapreduce', - lambda client: client.db.collection.inline_map_reduce( - 'function() {}', 'function() {}', {'out': {'inline': 1}}), - reply={'results': []}, - op_type='may-use-secondary', - not_master=not_master_reply_to_command), Operation( 'options', lambda client: client.db.collection.options(), @@ -109,12 +96,6 @@ reply={'ok': 1}, op_type='always-use-secondary', not_master=OpReply(ok=0, errmsg='node is recovering')), - Operation( - 'listCollections', - lambda client: client.db.collection_names(), - reply={'cursor': {'id': 0, 'firstBatch': []}}, - op_type='must-use-primary', - not_master=not_master_reply_to_command), Operation( 'listIndexes', lambda client: client.db.collection.index_information(), @@ -130,19 +111,9 @@ ['name', 'function', 'old', 'new', 'wire_version']) upgrades = [ - Upgrade('index_information', - lambda client: client.db.collection.index_information(), - old=OpQuery(namespace='db.system.indexes'), - new=Command('listIndexes', 'collection', namespace='db'), - wire_version=3), - Upgrade('collection_names', - lambda client: client.db.collection_names(), - old=Command('aggregate', 'system.namespaces', namespace='db'), - new=Command('listCollections', namespace='db'), - wire_version=3), - Upgrade('options', - lambda client: client.db.collection.options(), - old=Command('aggregate', 'system.namespaces', namespace='db'), - new=Command('listCollections', namespace='db'), - wire_version=3), + Upgrade('estimated_document_count', + lambda client: client.db.collection.estimated_document_count(), + old=OpMsg('count', 'collection', namespace='db'), + new=OpMsg('aggregate', 'collection', namespace='db'), + wire_version=12), ] diff --git a/test/mockupdb/test_auth_recovering_member.py b/test/mockupdb/test_auth_recovering_member.py index 360c593a00..6fb983b37f 100755 --- a/test/mockupdb/test_auth_recovering_member.py +++ b/test/mockupdb/test_auth_recovering_member.py @@ -44,7 +44,7 @@ def test_auth_recovering_member(self): # error. If it raises AutoReconnect we know it actually tried the # server, and that's wrong. with self.assertRaises(ServerSelectionTimeoutError): - client.db.authenticate('user', 'password') + client.db.command("ping") if __name__ == '__main__': unittest.main() diff --git a/test/mockupdb/test_cluster_time.py b/test/mockupdb/test_cluster_time.py index fae6c3faae..858e32a0fa 100644 --- a/test/mockupdb/test_cluster_time.py +++ b/test/mockupdb/test_cluster_time.py @@ -19,8 +19,7 @@ from pymongo import (MongoClient, InsertOne, UpdateOne, - DeleteMany, - version_tuple) + DeleteMany) import unittest @@ -54,23 +53,6 @@ def cluster_time_conversation(self, callback, replies): reply['$clusterTime'] = {'clusterTime': cluster_time} request.reply(reply) - # Now test that no commands include $clusterTime with wire version 5, - # even though the isMaster reply still has $clusterTime. - server.cancel_responder(responder) - server.autoresponds('ismaster', - {'minWireVersion': 0, - 'maxWireVersion': 5, - '$clusterTime': {'clusterTime': cluster_time}}) - - client = MongoClient(server.uri) - self.addCleanup(client.close) - - with going(callback, client): - for reply in replies: - request = server.receives() - self.assertNotIn('$clusterTime', request) - request.reply(reply) - def test_command(self): def callback(client): client.db.command('ping') @@ -158,27 +140,16 @@ def test_monitor(self): request.reply(error) # PyMongo 3.11+ closes the monitoring connection on command errors. - if version_tuple >= (3, 11, -1): - # Fourth exchange: the Monitor closes the connection and runs the - # handshake on a new connection. - request = server.receives('ismaster') - # No $clusterTime in first ismaster, only in subsequent ones - self.assertNotIn('$clusterTime', request) - - # Reply without $clusterTime. - reply.pop('$clusterTime') - request.reply(reply) - else: - # Fourth exchange: the Monitor retry attempt uses the clusterTime - # from the previous isMaster error. - request = server.receives('ismaster') - self.assertEqual(request['$clusterTime']['clusterTime'], - cluster_time) - - cluster_time = Timestamp(cluster_time.time, - cluster_time.inc + 1) - error['$clusterTime'] = {'clusterTime': cluster_time} - request.reply(error) + + # Fourth exchange: the Monitor closes the connection and runs the + # handshake on a new connection. + request = server.receives('ismaster') + # No $clusterTime in first ismaster, only in subsequent ones + self.assertNotIn('$clusterTime', request) + + # Reply without $clusterTime. + reply.pop('$clusterTime') + request.reply(reply) # Fifth exchange: the Monitor attempt uses the clusterTime from # the previous isMaster error. diff --git a/test/mockupdb/test_cursor_namespace.py b/test/mockupdb/test_cursor_namespace.py index 600f7bca6d..10605601cf 100644 --- a/test/mockupdb/test_cursor_namespace.py +++ b/test/mockupdb/test_cursor_namespace.py @@ -15,7 +15,7 @@ """Test list_indexes with more than one batch.""" from mockupdb import going, MockupDB -from pymongo import MongoClient, version_tuple +from pymongo import MongoClient import unittest @@ -57,7 +57,6 @@ def op(): return list(self.client.test.collection.aggregate([])) self._test_cursor_namespace(op, 'aggregate') - @unittest.skipUnless(version_tuple >= (3, 11, -1), 'Fixed in pymongo 3.11') def test_find_cursor(self): def op(): return list(self.client.test.collection.find()) @@ -71,7 +70,6 @@ def op(): class TestKillCursorsNamespace(unittest.TestCase): @classmethod - @unittest.skipUnless(version_tuple >= (3, 12, -1), 'Fixed in pymongo 3.12') def setUpClass(cls): cls.server = MockupDB(auto_ismaster={'maxWireVersion': 6}) cls.server.run() diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index 9dbdec3057..621f01728f 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -14,7 +14,7 @@ from mockupdb import MockupDB, OpReply, OpMsg, absent, Command, go -from pymongo import MongoClient, version as pymongo_version, version_tuple +from pymongo import MongoClient, version as pymongo_version from pymongo.errors import OperationFailure import unittest @@ -33,7 +33,6 @@ def _check_handshake_data(request): class TestHandshake(unittest.TestCase): - @unittest.skipUnless(version_tuple >= (3, 4), "requires PyMongo 3.4") def test_client_handshake_data(self): primary, secondary = MockupDB(), MockupDB() for server in primary, secondary: @@ -72,20 +71,14 @@ def test_client_handshake_data(self): primary.receives('ismaster', 1, client=absent).ok(error_response) secondary.receives('ismaster', 1, client=absent).ok(error_response) - # PyMongo 3.11+ closes the monitoring connection on command errors. - if version_tuple >= (3, 11, -1): - # The heartbeat retry (on a new connection) does have client data. - heartbeat = primary.receives('ismaster') - _check_handshake_data(heartbeat) - heartbeat.ok(primary_response) - - heartbeat = secondary.receives('ismaster') - _check_handshake_data(heartbeat) - heartbeat.ok(secondary_response) - else: - # The heartbeat retry has no client data after a command failure. - primary.receives('ismaster', 1, client=absent).ok(error_response) - secondary.receives('ismaster', 1, client=absent).ok(error_response) + # The heartbeat retry (on a new connection) does have client data. + heartbeat = primary.receives('ismaster') + _check_handshake_data(heartbeat) + heartbeat.ok(primary_response) + + heartbeat = secondary.receives('ismaster') + _check_handshake_data(heartbeat) + heartbeat.ok(secondary_response) # Still no client data. primary.receives('ismaster', 1, client=absent).ok(primary_response) @@ -113,15 +106,11 @@ def test_client_handshake_data(self): request.ok(primary_response) else: # Command succeeds. - if version_tuple >= (3, 7): - request.assert_matches(OpMsg('whatever')) - else: - request.assert_matches(Command('whatever')) + request.assert_matches(OpMsg('whatever')) request.ok() assert future() return - @unittest.skipUnless(version_tuple >= (3, 11, -1), "requires PyMongo 3.11") def test_client_handshake_saslSupportedMechs(self): server = MockupDB() server.run() diff --git a/test/mockupdb/test_legacy_crud.py b/test/mockupdb/test_legacy_crud.py deleted file mode 100755 index 508313dbbd..0000000000 --- a/test/mockupdb/test_legacy_crud.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2017 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from bson.son import SON -from mockupdb import (MockupDB, going, OpInsert, OpMsg, absent, Command, - OP_MSG_FLAGS) -from pymongo import MongoClient, WriteConcern, version_tuple - -import unittest - - -class TestLegacyCRUD(unittest.TestCase): - def test_op_insert_manipulate_false(self): - # Test three aspects of legacy insert with manipulate=False: - # 1. The return value is None, [None], or [None, None] as appropriate. - # 2. _id is not set on the passed-in document object. - # 3. _id is not sent to server. - server = MockupDB(auto_ismaster=True) - server.run() - self.addCleanup(server.stop) - - client = MongoClient(server.uri) - self.addCleanup(client.close) - - coll = client.db.get_collection('coll', write_concern=WriteConcern(w=0)) - doc = {} - with going(coll.insert, doc, manipulate=False) as future: - if version_tuple >= (3, 7): - server.receives(OpMsg(SON([ - ("insert", coll.name), - ("ordered", True), - ("writeConcern", {"w": 0}), - ("documents", [{}])]), flags=OP_MSG_FLAGS['moreToCome'])) - else: - server.receives(OpInsert({'_id': absent})) - - self.assertFalse('_id' in doc) - self.assertIsNone(future()) - - docs = [{}] # One doc in a list. - with going(coll.insert, docs, manipulate=False) as future: - if version_tuple >= (3, 7): - # PyMongo 3.7 ordered bulk w:0 writes use implicit w:1. - request = server.receives() - request.assert_matches(OpMsg(SON([ - ("insert", coll.name), - ("ordered", True), - ("documents", [{}])]), flags=0)) - request.reply({"n": 1}) - else: - server.receives(OpInsert({'_id': absent})) - - self.assertFalse('_id' in docs[0]) - self.assertEqual(future(), [None]) - - docs = [{}, {}] # Two docs. - with going(coll.insert, docs, manipulate=False) as future: - if version_tuple >= (3, 7): - # PyMongo 3.7 ordered bulk w:0 writes use implicit w:1. - request = server.receives() - request.assert_matches(OpMsg(SON([ - ("insert", coll.name), - ("ordered", True), - ("documents", [{}, {}])]), flags=0)) - request.reply({"n": 2}) - else: - server.receives(OpInsert({'_id': absent}, {'_id': absent})) - - self.assertFalse('_id' in docs[0]) - self.assertFalse('_id' in docs[1]) - self.assertEqual(future(), [None, None]) - - def test_insert_command_manipulate_false(self): - # Test same three aspects as test_op_insert_manipulate_false does, - # with the "insert" command. - server = MockupDB(auto_ismaster={'maxWireVersion': 2}) - server.run() - self.addCleanup(server.stop) - - client = MongoClient(server.uri) - self.addCleanup(client.close) - - doc = {} - with going(client.db.coll.insert, doc, manipulate=False) as future: - r = server.receives(Command("insert", "coll", documents=[{}])) - # MockupDB doesn't understand "absent" in subdocuments yet. - self.assertFalse('_id' in r.doc['documents'][0]) - r.ok() - - self.assertFalse('_id' in doc) - self.assertIsNone(future()) - - docs = [{}] # One doc in a list. - with going(client.db.coll.insert, docs, manipulate=False) as future: - r = server.receives(Command("insert", "coll", documents=[{}])) - self.assertFalse('_id' in r.doc['documents'][0]) - r.ok() - - self.assertFalse('_id' in docs[0]) - self.assertEqual(future(), [None]) - - docs = [{}, {}] # Two docs. - with going(client.db.coll.insert, docs, manipulate=False) as future: - r = server.receives(Command("insert", "coll", documents=[{}, {}])) - self.assertFalse('_id' in r.doc['documents'][0]) - self.assertFalse('_id' in r.doc['documents'][1]) - r.ok() - - self.assertFalse('_id' in docs[0]) - self.assertFalse('_id' in docs[1]) - self.assertEqual(future(), [None, None]) - - -if __name__ == '__main__': - unittest.main() diff --git a/test/mockupdb/test_list_indexes.py b/test/mockupdb/test_list_indexes.py index 7483e80df2..b4787ff624 100644 --- a/test/mockupdb/test_list_indexes.py +++ b/test/mockupdb/test_list_indexes.py @@ -23,29 +23,6 @@ class TestListIndexes(unittest.TestCase): - def test_list_indexes_opquery(self): - server = MockupDB(auto_ismaster={'maxWireVersion': 3}) - server.run() - self.addCleanup(server.stop) - client = MongoClient(server.uri) - self.addCleanup(client.close) - with going(client.test.collection.list_indexes) as cursor: - request = server.receives( - listIndexes='collection', namespace='test') - request.reply({'cursor': { - 'firstBatch': [{'name': 'index_0'}], - 'id': 123}}) - - with going(list, cursor()) as indexes: - request = server.receives(OpGetMore, - namespace='test.collection', - cursor_id=123) - - request.reply([{'name': 'index_1'}], cursor_id=0) - - self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes()) - for index_info in indexes(): - self.assertIsInstance(index_info, SON) def test_list_indexes_command(self): server = MockupDB(auto_ismaster={'maxWireVersion': 6}) diff --git a/test/mockupdb/test_max_staleness.py b/test/mockupdb/test_max_staleness.py index 89d17a133f..9bd65a1764 100644 --- a/test/mockupdb/test_max_staleness.py +++ b/test/mockupdb/test_max_staleness.py @@ -21,7 +21,7 @@ class TestMaxStalenessMongos(unittest.TestCase): def test_mongos(self): mongos = MockupDB() - mongos.autoresponds('ismaster', maxWireVersion=5, + mongos.autoresponds('ismaster', maxWireVersion=6, ismaster=True, msg='isdbgrid') mongos.run() self.addCleanup(mongos.stop) diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index 9f57dd0f43..d13af3562b 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -21,7 +21,7 @@ except ImportError: from Queue import Queue -from mockupdb import MockupDB, go +from mockupdb import MockupDB, go, OpMsg from pymongo import MongoClient import unittest diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py index 1fe2ea5869..ccd40c2cd7 100644 --- a/test/mockupdb/test_mongos_command_read_mode.py +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -15,7 +15,7 @@ import itertools from bson import SON -from mockupdb import MockupDB, going +from mockupdb import MockupDB, going, OpMsg, go from pymongo import MongoClient, ReadPreference from pymongo.read_preferences import (make_read_preference, read_pref_mode_from_name, @@ -29,7 +29,7 @@ class TestMongosCommandReadMode(unittest.TestCase): def test_aggregate(self): server = MockupDB() server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - minWireVersion=2, maxWireVersion=5) + minWireVersion=2, maxWireVersion=6) self.addCleanup(server.stop) server.run() @@ -39,18 +39,16 @@ def test_aggregate(self): with going(collection.aggregate, []): command = server.receives(aggregate='collection', pipeline=[]) self.assertFalse(command.slave_ok, 'SlaveOkay set') - self.assertNotIn('$readPreference', command) command.ok(result=[{}]) secondary_collection = collection.with_options( read_preference=ReadPreference.SECONDARY) with going(secondary_collection.aggregate, []): - command = server.receives( - {'$query': SON([('aggregate', 'collection'), - ('pipeline', []), - ('cursor', {})]), - '$readPreference': {'mode': 'secondary'}}) + + command = server.receives(OpMsg({"aggregate": "collection", + "pipeline": [], + '$readPreference': {'mode': 'secondary'}})) command.ok(result=[{}]) self.assertTrue(command.slave_ok, 'SlaveOkay not set') @@ -61,37 +59,28 @@ def test(self): self.addCleanup(server.stop) server.run() server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - minWireVersion=2, maxWireVersion=5) + minWireVersion=2, maxWireVersion=6) pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) client = MongoClient(server.uri, read_preference=pref) self.addCleanup(client.close) - with going(operation.function, client) as future: + + with going(operation.function, client): request = server.receive() request.reply(operation.reply) - future() # No error. - if operation.op_type == 'always-use-secondary': self.assertEqual(ReadPreference.SECONDARY.document, request.doc.get('$readPreference')) slave_ok = mode != 'primary' - self.assertIn('$query', request.doc) elif operation.op_type == 'must-use-primary': - self.assertNotIn('$readPreference', request) - self.assertNotIn('$query', request.doc) slave_ok = False elif operation.op_type == 'may-use-secondary': slave_ok = mode != 'primary' - if mode in ('primary', 'secondaryPreferred'): - self.assertNotIn('$readPreference', request) - self.assertNotIn('$query', request.doc) - else: - self.assertEqual(pref.document, - request.doc.get('$readPreference')) - self.assertIn('$query', request.doc) + self.assertEqual(pref.document, + request.doc.get('$readPreference')) else: self.fail('unrecognized op_type %r' % operation.op_type) diff --git a/test/mockupdb/test_op_msg.py b/test/mockupdb/test_op_msg.py index dc574226bc..35e70cebfc 100755 --- a/test/mockupdb/test_op_msg.py +++ b/test/mockupdb/test_op_msg.py @@ -15,7 +15,7 @@ from collections import namedtuple from mockupdb import MockupDB, going, OpMsg, OpMsgReply, OP_MSG_FLAGS -from pymongo import MongoClient, WriteConcern, version_tuple +from pymongo import MongoClient, WriteConcern from pymongo.operations import InsertOne, UpdateOne, DeleteOne from pymongo.cursor import CursorType @@ -125,54 +125,6 @@ request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), reply=None), # Legacy methods - Operation( - 'insert', - lambda coll: coll.insert({}), - request=OpMsg({"insert": "coll"}, flags=0), - reply={'ok': 1, 'n': 1}), - Operation( - 'insert-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).insert({}), - request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), - Operation( - 'insert-w0-argument', - lambda coll: coll.insert({}, w=0), - request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), - Operation( - 'update', - lambda coll: coll.update({"_id": 1}, {"new": 1}), - request=OpMsg({"update": "coll"}, flags=0), - reply={'ok': 1, 'n': 1, 'nModified': 1}), - Operation( - 'update-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).update({"_id": 1}, {"new": 1}), - request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), - Operation( - 'update-w0-argument', - lambda coll: coll.update({"_id": 1}, {"new": 1}, w=0), - request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), - Operation( - 'remove', - lambda coll: coll.remove({"_id": 1}), - request=OpMsg({"delete": "coll"}, flags=0), - reply={'ok': 1, 'n': 1}), - Operation( - 'remove-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).remove({"_id": 1}), - request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), - Operation( - 'remove-w0-argument', - lambda coll: coll.remove({"_id": 1}, w=0), - request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), Operation( 'bulk_write_insert', lambda coll: coll.bulk_write([InsertOne({}), InsertOne({})]), @@ -303,8 +255,7 @@ def _test_operation(self, op): replies = [op.reply] for expected_request in expected_requests: - request = self.server.receives() - request.assert_matches(expected_request) + request = self.server.receives(expected_request) reply = None if replies: reply = replies.pop(0) @@ -317,24 +268,21 @@ def _test_operation(self, op): future() # No error. -def operation_test(op, decorator): - @decorator() +def operation_test(op): def test(self): self._test_operation(op) return test -def create_tests(ops, decorator): +def create_tests(ops): for op in ops: test_name = "test_op_msg_%s" % (op.name,) - setattr(TestOpMsg, test_name, operation_test(op, decorator)) + setattr(TestOpMsg, test_name, operation_test(op)) -create_tests(operations, lambda: unittest.skipUnless( - version_tuple >= (3, 7), "requires PyMongo 3.7")) +create_tests(operations) -create_tests(operations_312, lambda: unittest.skipUnless( - version_tuple >= (3, 12), "requires PyMongo 3.12")) +create_tests(operations_312) if __name__ == '__main__': unittest.main() diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index 925a00f6a5..ba359a5e05 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -16,7 +16,7 @@ import itertools from mockupdb import MockupDB, going, CommandBase -from pymongo import MongoClient, ReadPreference, version_tuple +from pymongo import MongoClient, ReadPreference from pymongo.read_preferences import (make_read_preference, read_pref_mode_from_name, _MONGOS_MODES) @@ -31,8 +31,6 @@ class OpMsgReadPrefBase(unittest.TestCase): @classmethod def setUpClass(cls): super(OpMsgReadPrefBase, cls).setUpClass() - if version_tuple < (3, 7): - raise unittest.SkipTest("requires PyMongo 3.7") @classmethod def add_test(cls, mode, test_name, test): @@ -159,11 +157,10 @@ def test(self): expected_pref = pref else: self.fail('unrecognized op_type %r' % operation.op_type) - # For single mongod we send primaryPreferred instead of primary. - if expected_pref == ReadPreference.PRIMARY and self.single_mongod: + if (expected_pref == ReadPreference.PRIMARY and self.single_mongod + and operation.name != "command"): expected_pref = ReadPreference.PRIMARY_PREFERRED - with going(operation.function, client) as future: request = expected_server.receive() request.reply(operation.reply) diff --git a/test/mockupdb/test_projection.py b/test/mockupdb/test_projection.py deleted file mode 100644 index 0b74c22cbd..0000000000 --- a/test/mockupdb/test_projection.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2018-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""PyMongo shouldn't append projection fields to "find" command, PYTHON-1479.""" - -from bson import SON -from mockupdb import Command, MockupDB, OpQuery, going -from pymongo import MongoClient - -import unittest - - -class TestProjection(unittest.TestCase): - def test_projection(self): - q = {} - fields = {'foo': True} - - # OP_QUERY, - server = MockupDB(auto_ismaster=True, - min_wire_version=0, max_wire_version=3) - server.run() - self.addCleanup(server.stop) - client = MongoClient(server.uri) - cursor = client.test.collection.find(q, fields) - with going(next, cursor): - request = server.receives(OpQuery(q, fields=fields)) - request.reply([], cursor_id=0) - - # "find" command. - server = MockupDB(auto_ismaster=True, - min_wire_version=0, max_wire_version=4) - server.run() - self.addCleanup(server.stop) - client = MongoClient(server.uri) - cursor = client.test.collection.find(q, fields) - cmd = Command(SON([('find', 'collection'), ('filter', q), - ('projection', fields)])) - - with going(next, cursor): - request = server.receives(cmd) - request.ok(cursor={'id': 0, 'firstBatch': []}) - - -if __name__ == '__main__': - unittest.main() diff --git a/test/mockupdb/test_query_read_pref_sharded.py b/test/mockupdb/test_query_read_pref_sharded.py index 033cdeff19..21813f7b8e 100644 --- a/test/mockupdb/test_query_read_pref_sharded.py +++ b/test/mockupdb/test_query_read_pref_sharded.py @@ -15,59 +15,18 @@ """Test PyMongo query and read preference with a sharded cluster.""" from bson import SON -from pymongo import MongoClient, version_tuple +from pymongo import MongoClient from pymongo.read_preferences import (Primary, PrimaryPreferred, Secondary, SecondaryPreferred, Nearest) -from mockupdb import MockupDB, going, Command, OpMsg +from mockupdb import MockupDB, going, OpMsg import unittest class TestQueryAndReadModeSharded(unittest.TestCase): - def test_query_and_read_mode_sharded_op_query(self): - server = MockupDB() - server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - minWireVersion=2, maxWireVersion=5) - server.run() - self.addCleanup(server.stop) - - client = MongoClient(server.uri) - self.addCleanup(client.close) - - modes_without_query = ( - Primary(), - SecondaryPreferred(),) - - modes_with_query = ( - PrimaryPreferred(), - Secondary(), - Nearest(), - SecondaryPreferred([{'tag': 'value'}]),) - - find_command = SON([('find', 'test'), ('filter', {'a': 1})]) - for query in ({'a': 1}, {'$query': {'a': 1}},): - for mode in modes_with_query + modes_without_query: - collection = client.db.get_collection('test', - read_preference=mode) - cursor = collection.find(query.copy()) - with going(next, cursor): - request = server.receives() - if mode in modes_without_query: - # Filter is hoisted out of $query. - request.assert_matches(Command(find_command)) - self.assertFalse('$readPreference' in request) - else: - # Command is nested in $query. - request.assert_matches(Command( - SON([('$query', find_command), - ('$readPreference', mode.document)]))) - - request.replies({'cursor': {'id': 0, 'firstBatch': [{}]}}) - - @unittest.skipUnless(version_tuple >= (3, 7), "requires PyMongo 3.7") def test_query_and_read_mode_sharded_op_msg(self): """Test OP_MSG sends non-primary $readPreference and never $query.""" server = MockupDB() diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py index 27b55f3180..86c2085e39 100755 --- a/test/mockupdb/test_reset_and_request_check.py +++ b/test/mockupdb/test_reset_and_request_check.py @@ -18,7 +18,7 @@ from mockupdb import MockupDB, going, wait_until from pymongo.server_type import SERVER_TYPE from pymongo.errors import ConnectionFailure -from pymongo import MongoClient, version_tuple +from pymongo import MongoClient import unittest from operations import operations @@ -44,8 +44,7 @@ def responder(request): kwargs = {'socketTimeoutMS': 100} # Disable retryable reads when pymongo supports it. - if version_tuple[:3] >= (3, 9): - kwargs['retryReads'] = False + kwargs['retryReads'] = False self.client = MongoClient(self.server.uri, **kwargs) wait_until(lambda: self.client.nodes, 'connect to standalone') @@ -103,8 +102,9 @@ def _test_not_master(self, operation): with self.assertRaises(ConnectionFailure): with going(operation.function, self.client): - self.server.receives().replies(operation.not_master) + request = self.server.receives() before = self.ismaster_time + request.replies(operation.not_master) time.sleep(1) # Server is rediscovered. @@ -139,6 +139,7 @@ def generate_reset_tests(): test.__name__ = test_name setattr(TestResetAndRequestCheck, test_name, test) + generate_reset_tests() if __name__ == '__main__': diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py index 4a0725a869..83c0f925a4 100644 --- a/test/mockupdb/test_slave_okay_single.py +++ b/test/mockupdb/test_slave_okay_single.py @@ -67,11 +67,7 @@ def test(self): request = self.server.receive() request.reply(operation.reply) - self.assertEqual(topology_type_name(client), 'Single') - if slave_ok: - self.assertTrue(request.slave_ok, 'SlaveOkay not set') - else: - self.assertFalse(request.slave_ok, 'SlaveOkay set') + self.assertIn(topology_type_name(client), ['Sharded', 'Single']) return test diff --git a/test/mockupdb/test_starting_from_overflow.py b/test/mockupdb/test_starting_from_overflow.py deleted file mode 100644 index d94cab0ff3..0000000000 --- a/test/mockupdb/test_starting_from_overflow.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2015 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test that PyMongo ignores the startingFrom field, PYTHON-945.""" - -from mockupdb import going, MockupDB, OpGetMore, OpQuery, Command -from pymongo import MongoClient - -import unittest - - -class TestStartingFromOverflow(unittest.TestCase): - def test_query(self): - server = MockupDB(auto_ismaster=True, - min_wire_version=0, max_wire_version=3) - server.run() - self.addCleanup(server.stop) - client = MongoClient(server.uri) - cursor = client.test.collection.find() - with going(list, cursor) as docs: - request = server.receives(OpQuery) - request.reply({'a': 1}, cursor_id=123, starting_from=-7) - request = server.receives(OpGetMore, cursor_id=123) - request.reply({'a': 2}, starting_from=-3, cursor_id=0) - - self.assertEqual([{'a': 1}, {'a': 2}], docs()) - - def test_aggregate(self): - server = MockupDB(auto_ismaster={'maxWireVersion': 3}) - server.run() - self.addCleanup(server.stop) - client = MongoClient(server.uri) - with going(client.test.collection.aggregate, []) as cursor: - request = server.receives(Command) - request.reply({'cursor': { - 'id': 123, - 'firstBatch': [{'a': 1}]}}) - - with going(list, cursor()) as docs: - request = server.receives(OpGetMore, cursor_id=123) - request.reply({'a': 2}, starting_from=-3, cursor_id=0) - - self.assertEqual([{'a': 1}, {'a': 2}], docs()) - - def test_find_command(self): - server = MockupDB(auto_ismaster={'maxWireVersion': 4}) - server.run() - self.addCleanup(server.stop) - client = MongoClient(server.uri) - with going(list, client.test.collection.find()) as docs: - server.receives(Command).reply({'cursor': { - 'id': 123, - 'firstBatch': [{'a': 1}]}}) - - request = server.receives(Command("getMore", 123)) - request.reply({'cursor': { - 'id': 0, - 'nextBatch': [{'a': 2}]}}, - starting_from=-3) - - self.assertEqual([{'a': 1}, {'a': 2}], docs()) - - -if __name__ == '__main__': - unittest.main() From 5b8b09ac4f925d0cbaa9532aba8b66a572ccbd03 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 17 Nov 2021 18:18:41 -0800 Subject: [PATCH 0015/1588] PYTHON-3020 Properly mark server unknown after "not master" errors without a code (#797) Fix prefer-error-code SDAM test. --- .evergreen/run-mockupdb-tests.sh | 2 +- pymongo/topology.py | 4 +++- .../errors/prefer-error-code.json | 4 ++-- test/mockupdb/operations.py | 18 +++++++----------- 4 files changed, 13 insertions(+), 15 deletions(-) diff --git a/.evergreen/run-mockupdb-tests.sh b/.evergreen/run-mockupdb-tests.sh index a0b67302a4..a76ed6316f 100755 --- a/.evergreen/run-mockupdb-tests.sh +++ b/.evergreen/run-mockupdb-tests.sh @@ -8,7 +8,7 @@ set -o errexit ${PYTHON_BINARY} setup.py clean createvirtualenv ${PYTHON_BINARY} mockuptests -trap "deactivate, rm -rf mockuptests" EXIT HUP +trap "deactivate; rm -rf mockuptests" EXIT HUP # Install PyMongo from git clone so mockup-tests don't # download it from pypi. diff --git a/pymongo/topology.py b/pymongo/topology.py index 9139c1492e..6f26cff617 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -633,7 +633,9 @@ def _handle_error(self, address, err_ctx): if hasattr(error, 'code'): err_code = error.code else: - err_code = error.details.get('code', -1) + # Default error code if one does not exist. + default = 10107 if isinstance(error, NotPrimaryError) else None + err_code = error.details.get('code', default) if err_code in helpers._NOT_PRIMARY_CODES: is_shutting_down = err_code in helpers._SHUTDOWN_CODES # Mark server Unknown, clear the pool, and request check. diff --git a/test/discovery_and_monitoring/errors/prefer-error-code.json b/test/discovery_and_monitoring/errors/prefer-error-code.json index 21d123f429..eb00b69613 100644 --- a/test/discovery_and_monitoring/errors/prefer-error-code.json +++ b/test/discovery_and_monitoring/errors/prefer-error-code.json @@ -52,7 +52,7 @@ } }, { - "description": "errmsg \"not writable primary\" gets ignored when error code exists", + "description": "errmsg \"not master\" gets ignored when error code exists", "applicationErrors": [ { "address": "a:27017", @@ -61,7 +61,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "not writable primary", + "errmsg": "not master", "code": 1 } } diff --git a/test/mockupdb/operations.py b/test/mockupdb/operations.py index 9fb0ca16b6..47890f80ee 100644 --- a/test/mockupdb/operations.py +++ b/test/mockupdb/operations.py @@ -52,11 +52,7 @@ sharded cluster (PYTHON-868). """ -not_master_reply_to_query = OpMsgReply( - {'$err': 'not master'}, - flags=REPLY_FLAGS['QueryFailure']) - -not_master_reply_to_command = OpMsgReply(ok=0, errmsg='not master') +not_master_reply = OpMsgReply(ok=0, errmsg='not master') operations = [ Operation( @@ -64,31 +60,31 @@ lambda client: client.db.collection.find_one(), reply={'cursor': {'id': 0, 'firstBatch': []}}, op_type='may-use-secondary', - not_master=not_master_reply_to_query), + not_master=not_master_reply), Operation( 'count', lambda client: client.db.collection.count_documents({}), reply={'n': 1}, op_type='may-use-secondary', - not_master=not_master_reply_to_command), + not_master=not_master_reply), Operation( 'aggregate', lambda client: client.db.collection.aggregate([]), reply={'cursor': {'id': 0, 'firstBatch': []}}, op_type='may-use-secondary', - not_master=not_master_reply_to_command), + not_master=not_master_reply), Operation( 'options', lambda client: client.db.collection.options(), reply={'cursor': {'id': 0, 'firstBatch': []}}, op_type='must-use-primary', - not_master=not_master_reply_to_command), + not_master=not_master_reply), Operation( 'command', lambda client: client.db.command('foo'), reply={'ok': 1}, op_type='must-use-primary', # Ignores client's read preference. - not_master=not_master_reply_to_command), + not_master=not_master_reply), Operation( 'secondary command', lambda client: @@ -101,7 +97,7 @@ lambda client: client.db.collection.index_information(), reply={'cursor': {'id': 0, 'firstBatch': []}}, op_type='must-use-primary', - not_master=not_master_reply_to_command), + not_master=not_master_reply), ] From 79659063c592726a36c7a1770b5c9b8ae78d7b04 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 18 Nov 2021 16:28:02 -0800 Subject: [PATCH 0016/1588] PYTHON-3021 Send primaryPreferred when connected to standalone servers (#799) --- pymongo/mongo_client.py | 2 +- test/mockupdb/test_op_msg_read_preference.py | 7 ++----- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 90e6a7706f..dd9dd6d33a 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1170,8 +1170,8 @@ def _socket_for_reads(self, read_preference, session): # for topology type Single." # Thread safe: if the type is single it cannot change. topology = self._get_topology() - single = topology.description.topology_type == TOPOLOGY_TYPE.Single server = self._select_server(read_preference, session) + single = topology.description.topology_type == TOPOLOGY_TYPE.Single with self._get_socket(server, session) as sock_info: secondary_ok = (single and not sock_info.is_mongos) or ( diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index ba359a5e05..6ecc229ea1 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -158,15 +158,12 @@ def test(self): else: self.fail('unrecognized op_type %r' % operation.op_type) # For single mongod we send primaryPreferred instead of primary. - if (expected_pref == ReadPreference.PRIMARY and self.single_mongod - and operation.name != "command"): + if expected_pref == ReadPreference.PRIMARY and self.single_mongod: expected_pref = ReadPreference.PRIMARY_PREFERRED - with going(operation.function, client) as future: + with going(operation.function, client): request = expected_server.receive() request.reply(operation.reply) - future() # No error. - self.assertEqual(expected_pref.document, request.doc.get('$readPreference')) self.assertNotIn('$query', request.doc) From cddae7ae938b3d6493435778223fb03240a6ccaf Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 18 Nov 2021 16:28:42 -0800 Subject: [PATCH 0017/1588] PYTHON-2919 Remove MongoDB 2.6-3.4 from performance testing (#798) --- .evergreen/perf.yml | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/.evergreen/perf.yml b/.evergreen/perf.yml index 3079eb9b0e..70e83ff582 100644 --- a/.evergreen/perf.yml +++ b/.evergreen/perf.yml @@ -200,28 +200,6 @@ post: - func: "cleanup" tasks: - - name: "perf-3.0-standalone" - tags: ["perf"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.0" - TOPOLOGY: "server" - - func: "run perf tests" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "perf-3.4-standalone" - tags: ["perf"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.4" - TOPOLOGY: "server" - - func: "run perf tests" - - func: "attach benchmark test results" - - func: "send dashboard data" - - name: "perf-3.6-standalone" tags: ["perf"] commands: @@ -273,8 +251,6 @@ buildvariants: batchtime: 10080 # 7 days run_on: centos6-perf tasks: - - name: "perf-3.0-standalone" - - name: "perf-3.4-standalone" - name: "perf-3.6-standalone" - name: "perf-4.0-standalone" - name: "perf-4.2-standalone" From 2af521ec03eab88ad5c9ebef0f73025c00489604 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 19 Nov 2021 12:15:23 -0800 Subject: [PATCH 0018/1588] PYTHON-2984 Fix retry behavior for bulk write writeConcernError (#800) --- pymongo/bulk.py | 8 +- pymongo/helpers.py | 21 +- .../{ => legacy}/bulkWrite-errorLabels.json | 0 .../{ => legacy}/bulkWrite-serverErrors.json | 8 +- .../{ => legacy}/bulkWrite.json | 0 .../{ => legacy}/deleteMany.json | 0 .../{ => legacy}/deleteOne-errorLabels.json | 0 .../{ => legacy}/deleteOne-serverErrors.json | 8 +- .../{ => legacy}/deleteOne.json | 0 .../findOneAndDelete-errorLabels.json | 0 .../findOneAndDelete-serverErrors.json | 8 +- .../{ => legacy}/findOneAndDelete.json | 0 .../findOneAndReplace-errorLabels.json | 0 .../findOneAndReplace-serverErrors.json | 8 +- .../{ => legacy}/findOneAndReplace.json | 0 .../findOneAndUpdate-errorLabels.json | 0 .../findOneAndUpdate-serverErrors.json | 8 +- .../{ => legacy}/findOneAndUpdate.json | 0 .../{ => legacy}/insertMany-errorLabels.json | 0 .../{ => legacy}/insertMany-serverErrors.json | 8 +- .../{ => legacy}/insertMany.json | 0 .../{ => legacy}/insertOne-errorLabels.json | 0 .../{ => legacy}/insertOne-serverErrors.json | 40 ++-- .../{ => legacy}/insertOne.json | 0 .../{ => legacy}/replaceOne-errorLabels.json | 0 .../{ => legacy}/replaceOne-serverErrors.json | 8 +- .../{ => legacy}/replaceOne.json | 0 .../{ => legacy}/updateMany.json | 0 .../{ => legacy}/updateOne-errorLabels.json | 0 .../{ => legacy}/updateOne-serverErrors.json | 8 +- .../{ => legacy}/updateOne.json | 0 .../unified/bulkWrite-serverErrors.json | 205 ++++++++++++++++++ .../unified/insertOne-serverErrors.json | 173 +++++++++++++++ test/test_retryable_writes.py | 2 +- test/test_retryable_writes_unified.py | 33 +++ test/transactions/legacy/error-labels.json | 8 +- .../legacy/mongos-recovery-token.json | 8 +- test/transactions/legacy/retryable-abort.json | 24 +- .../transactions/legacy/retryable-commit.json | 24 +- 39 files changed, 515 insertions(+), 95 deletions(-) rename test/retryable_writes/{ => legacy}/bulkWrite-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/bulkWrite-serverErrors.json (97%) rename test/retryable_writes/{ => legacy}/bulkWrite.json (100%) rename test/retryable_writes/{ => legacy}/deleteMany.json (100%) rename test/retryable_writes/{ => legacy}/deleteOne-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/deleteOne-serverErrors.json (95%) rename test/retryable_writes/{ => legacy}/deleteOne.json (100%) rename test/retryable_writes/{ => legacy}/findOneAndDelete-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/findOneAndDelete-serverErrors.json (95%) rename test/retryable_writes/{ => legacy}/findOneAndDelete.json (100%) rename test/retryable_writes/{ => legacy}/findOneAndReplace-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/findOneAndReplace-serverErrors.json (96%) rename test/retryable_writes/{ => legacy}/findOneAndReplace.json (100%) rename test/retryable_writes/{ => legacy}/findOneAndUpdate-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/findOneAndUpdate-serverErrors.json (96%) rename test/retryable_writes/{ => legacy}/findOneAndUpdate.json (100%) rename test/retryable_writes/{ => legacy}/insertMany-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/insertMany-serverErrors.json (96%) rename test/retryable_writes/{ => legacy}/insertMany.json (100%) rename test/retryable_writes/{ => legacy}/insertOne-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/insertOne-serverErrors.json (97%) rename test/retryable_writes/{ => legacy}/insertOne.json (100%) rename test/retryable_writes/{ => legacy}/replaceOne-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/replaceOne-serverErrors.json (96%) rename test/retryable_writes/{ => legacy}/replaceOne.json (100%) rename test/retryable_writes/{ => legacy}/updateMany.json (100%) rename test/retryable_writes/{ => legacy}/updateOne-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/updateOne-serverErrors.json (96%) rename test/retryable_writes/{ => legacy}/updateOne.json (100%) create mode 100644 test/retryable_writes/unified/bulkWrite-serverErrors.json create mode 100644 test/retryable_writes/unified/insertOne-serverErrors.json create mode 100644 test/test_retryable_writes_unified.py diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 829b482c95..1bb8edf943 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -28,7 +28,7 @@ validate_is_document_type, validate_ok_for_replace, validate_ok_for_update) -from pymongo.helpers import _RETRYABLE_ERROR_CODES +from pymongo.helpers import _RETRYABLE_ERROR_CODES, _get_wce_doc from pymongo.collation import validate_collation_or_none from pymongo.errors import (BulkWriteError, ConfigurationError, @@ -119,9 +119,9 @@ def _merge_command(run, full_result, offset, result): replacement["op"] = run.ops[idx] full_result["writeErrors"].append(replacement) - wc_error = result.get("writeConcernError") - if wc_error: - full_result["writeConcernErrors"].append(wc_error) + wce = _get_wce_doc(result) + if wce: + full_result["writeConcernErrors"].append(wce) def _raise_bulk_write_error(full_result): diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 55d53d836e..a9d40d8103 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -188,6 +188,18 @@ def _raise_write_concern_error(error): error.get("errmsg"), error.get("code"), error) +def _get_wce_doc(result): + """Return the writeConcernError or None.""" + wce = result.get("writeConcernError") + if wce: + # The server reports errorLabels at the top level but it's more + # convenient to attach it to the writeConcernError doc itself. + error_labels = result.get("errorLabels") + if error_labels: + wce["errorLabels"] = error_labels + return wce + + def _check_write_command_response(result): """Backward compatibility helper for write command error handling. """ @@ -196,12 +208,9 @@ def _check_write_command_response(result): if write_errors: _raise_last_write_error(write_errors) - error = result.get("writeConcernError") - if error: - error_labels = result.get("errorLabels") - if error_labels: - error.update({'errorLabels': error_labels}) - _raise_write_concern_error(error) + wce = _get_wce_doc(result) + if wce: + _raise_write_concern_error(wce) def _fields_list_to_dict(fields, option_name): diff --git a/test/retryable_writes/bulkWrite-errorLabels.json b/test/retryable_writes/legacy/bulkWrite-errorLabels.json similarity index 100% rename from test/retryable_writes/bulkWrite-errorLabels.json rename to test/retryable_writes/legacy/bulkWrite-errorLabels.json diff --git a/test/retryable_writes/bulkWrite-serverErrors.json b/test/retryable_writes/legacy/bulkWrite-serverErrors.json similarity index 97% rename from test/retryable_writes/bulkWrite-serverErrors.json rename to test/retryable_writes/legacy/bulkWrite-serverErrors.json index 9d792ceafb..1e6cc74c05 100644 --- a/test/retryable_writes/bulkWrite-serverErrors.json +++ b/test/retryable_writes/legacy/bulkWrite-serverErrors.json @@ -119,12 +119,12 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/bulkWrite.json b/test/retryable_writes/legacy/bulkWrite.json similarity index 100% rename from test/retryable_writes/bulkWrite.json rename to test/retryable_writes/legacy/bulkWrite.json diff --git a/test/retryable_writes/deleteMany.json b/test/retryable_writes/legacy/deleteMany.json similarity index 100% rename from test/retryable_writes/deleteMany.json rename to test/retryable_writes/legacy/deleteMany.json diff --git a/test/retryable_writes/deleteOne-errorLabels.json b/test/retryable_writes/legacy/deleteOne-errorLabels.json similarity index 100% rename from test/retryable_writes/deleteOne-errorLabels.json rename to test/retryable_writes/legacy/deleteOne-errorLabels.json diff --git a/test/retryable_writes/deleteOne-serverErrors.json b/test/retryable_writes/legacy/deleteOne-serverErrors.json similarity index 95% rename from test/retryable_writes/deleteOne-serverErrors.json rename to test/retryable_writes/legacy/deleteOne-serverErrors.json index 4eab2fa296..a1a27838de 100644 --- a/test/retryable_writes/deleteOne-serverErrors.json +++ b/test/retryable_writes/legacy/deleteOne-serverErrors.json @@ -75,12 +75,12 @@ "failCommands": [ "delete" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/deleteOne.json b/test/retryable_writes/legacy/deleteOne.json similarity index 100% rename from test/retryable_writes/deleteOne.json rename to test/retryable_writes/legacy/deleteOne.json diff --git a/test/retryable_writes/findOneAndDelete-errorLabels.json b/test/retryable_writes/legacy/findOneAndDelete-errorLabels.json similarity index 100% rename from test/retryable_writes/findOneAndDelete-errorLabels.json rename to test/retryable_writes/legacy/findOneAndDelete-errorLabels.json diff --git a/test/retryable_writes/findOneAndDelete-serverErrors.json b/test/retryable_writes/legacy/findOneAndDelete-serverErrors.json similarity index 95% rename from test/retryable_writes/findOneAndDelete-serverErrors.json rename to test/retryable_writes/legacy/findOneAndDelete-serverErrors.json index 4c10861614..c18b63f456 100644 --- a/test/retryable_writes/findOneAndDelete-serverErrors.json +++ b/test/retryable_writes/legacy/findOneAndDelete-serverErrors.json @@ -81,12 +81,12 @@ "failCommands": [ "findAndModify" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/findOneAndDelete.json b/test/retryable_writes/legacy/findOneAndDelete.json similarity index 100% rename from test/retryable_writes/findOneAndDelete.json rename to test/retryable_writes/legacy/findOneAndDelete.json diff --git a/test/retryable_writes/findOneAndReplace-errorLabels.json b/test/retryable_writes/legacy/findOneAndReplace-errorLabels.json similarity index 100% rename from test/retryable_writes/findOneAndReplace-errorLabels.json rename to test/retryable_writes/legacy/findOneAndReplace-errorLabels.json diff --git a/test/retryable_writes/findOneAndReplace-serverErrors.json b/test/retryable_writes/legacy/findOneAndReplace-serverErrors.json similarity index 96% rename from test/retryable_writes/findOneAndReplace-serverErrors.json rename to test/retryable_writes/legacy/findOneAndReplace-serverErrors.json index 64c69e2f6d..944a3af848 100644 --- a/test/retryable_writes/findOneAndReplace-serverErrors.json +++ b/test/retryable_writes/legacy/findOneAndReplace-serverErrors.json @@ -85,12 +85,12 @@ "failCommands": [ "findAndModify" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/findOneAndReplace.json b/test/retryable_writes/legacy/findOneAndReplace.json similarity index 100% rename from test/retryable_writes/findOneAndReplace.json rename to test/retryable_writes/legacy/findOneAndReplace.json diff --git a/test/retryable_writes/findOneAndUpdate-errorLabels.json b/test/retryable_writes/legacy/findOneAndUpdate-errorLabels.json similarity index 100% rename from test/retryable_writes/findOneAndUpdate-errorLabels.json rename to test/retryable_writes/legacy/findOneAndUpdate-errorLabels.json diff --git a/test/retryable_writes/findOneAndUpdate-serverErrors.json b/test/retryable_writes/legacy/findOneAndUpdate-serverErrors.json similarity index 96% rename from test/retryable_writes/findOneAndUpdate-serverErrors.json rename to test/retryable_writes/legacy/findOneAndUpdate-serverErrors.json index 9f54604992..e83a610615 100644 --- a/test/retryable_writes/findOneAndUpdate-serverErrors.json +++ b/test/retryable_writes/legacy/findOneAndUpdate-serverErrors.json @@ -86,12 +86,12 @@ "failCommands": [ "findAndModify" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/findOneAndUpdate.json b/test/retryable_writes/legacy/findOneAndUpdate.json similarity index 100% rename from test/retryable_writes/findOneAndUpdate.json rename to test/retryable_writes/legacy/findOneAndUpdate.json diff --git a/test/retryable_writes/insertMany-errorLabels.json b/test/retryable_writes/legacy/insertMany-errorLabels.json similarity index 100% rename from test/retryable_writes/insertMany-errorLabels.json rename to test/retryable_writes/legacy/insertMany-errorLabels.json diff --git a/test/retryable_writes/insertMany-serverErrors.json b/test/retryable_writes/legacy/insertMany-serverErrors.json similarity index 96% rename from test/retryable_writes/insertMany-serverErrors.json rename to test/retryable_writes/legacy/insertMany-serverErrors.json index 7b45b506c9..fe8dbf4a62 100644 --- a/test/retryable_writes/insertMany-serverErrors.json +++ b/test/retryable_writes/legacy/insertMany-serverErrors.json @@ -92,12 +92,12 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/insertMany.json b/test/retryable_writes/legacy/insertMany.json similarity index 100% rename from test/retryable_writes/insertMany.json rename to test/retryable_writes/legacy/insertMany.json diff --git a/test/retryable_writes/insertOne-errorLabels.json b/test/retryable_writes/legacy/insertOne-errorLabels.json similarity index 100% rename from test/retryable_writes/insertOne-errorLabels.json rename to test/retryable_writes/legacy/insertOne-errorLabels.json diff --git a/test/retryable_writes/insertOne-serverErrors.json b/test/retryable_writes/legacy/insertOne-serverErrors.json similarity index 97% rename from test/retryable_writes/insertOne-serverErrors.json rename to test/retryable_writes/legacy/insertOne-serverErrors.json index e8571f8cf9..5179a6ab75 100644 --- a/test/retryable_writes/insertOne-serverErrors.json +++ b/test/retryable_writes/legacy/insertOne-serverErrors.json @@ -761,12 +761,12 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11600, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, @@ -812,12 +812,12 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11602, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, @@ -863,12 +863,12 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 189, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, @@ -914,12 +914,12 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, @@ -965,12 +965,12 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/insertOne.json b/test/retryable_writes/legacy/insertOne.json similarity index 100% rename from test/retryable_writes/insertOne.json rename to test/retryable_writes/legacy/insertOne.json diff --git a/test/retryable_writes/replaceOne-errorLabels.json b/test/retryable_writes/legacy/replaceOne-errorLabels.json similarity index 100% rename from test/retryable_writes/replaceOne-errorLabels.json rename to test/retryable_writes/legacy/replaceOne-errorLabels.json diff --git a/test/retryable_writes/replaceOne-serverErrors.json b/test/retryable_writes/legacy/replaceOne-serverErrors.json similarity index 96% rename from test/retryable_writes/replaceOne-serverErrors.json rename to test/retryable_writes/legacy/replaceOne-serverErrors.json index 7457228cd7..6b35722e12 100644 --- a/test/retryable_writes/replaceOne-serverErrors.json +++ b/test/retryable_writes/legacy/replaceOne-serverErrors.json @@ -85,12 +85,12 @@ "failCommands": [ "update" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/replaceOne.json b/test/retryable_writes/legacy/replaceOne.json similarity index 100% rename from test/retryable_writes/replaceOne.json rename to test/retryable_writes/legacy/replaceOne.json diff --git a/test/retryable_writes/updateMany.json b/test/retryable_writes/legacy/updateMany.json similarity index 100% rename from test/retryable_writes/updateMany.json rename to test/retryable_writes/legacy/updateMany.json diff --git a/test/retryable_writes/updateOne-errorLabels.json b/test/retryable_writes/legacy/updateOne-errorLabels.json similarity index 100% rename from test/retryable_writes/updateOne-errorLabels.json rename to test/retryable_writes/legacy/updateOne-errorLabels.json diff --git a/test/retryable_writes/updateOne-serverErrors.json b/test/retryable_writes/legacy/updateOne-serverErrors.json similarity index 96% rename from test/retryable_writes/updateOne-serverErrors.json rename to test/retryable_writes/legacy/updateOne-serverErrors.json index 1160198019..cf274f57e0 100644 --- a/test/retryable_writes/updateOne-serverErrors.json +++ b/test/retryable_writes/legacy/updateOne-serverErrors.json @@ -86,12 +86,12 @@ "failCommands": [ "update" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/updateOne.json b/test/retryable_writes/legacy/updateOne.json similarity index 100% rename from test/retryable_writes/updateOne.json rename to test/retryable_writes/legacy/updateOne.json diff --git a/test/retryable_writes/unified/bulkWrite-serverErrors.json b/test/retryable_writes/unified/bulkWrite-serverErrors.json new file mode 100644 index 0000000000..23cf2869a6 --- /dev/null +++ b/test/retryable_writes/unified/bulkWrite-serverErrors.json @@ -0,0 +1,205 @@ +{ + "description": "retryable-writes bulkWrite serverErrors", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite succeeds after retryable writeConcernError in first batch", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 2 + } + } + } + ] + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": { + "0": 3 + } + }, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "coll", + "deletes": [ + { + "q": { + "_id": 2 + }, + "limit": 1 + } + ] + }, + "commandName": "delete", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/insertOne-serverErrors.json b/test/retryable_writes/unified/insertOne-serverErrors.json new file mode 100644 index 0000000000..77245a8197 --- /dev/null +++ b/test/retryable_writes/unified/insertOne-serverErrors.json @@ -0,0 +1,173 @@ +{ + "description": "retryable-writes insertOne serverErrors", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "InsertOne succeeds after retryable writeConcernError", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index ffc93eb2fa..f3f09095d7 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -60,7 +60,7 @@ # Location of JSON test specifications. _TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'retryable_writes') + os.path.dirname(os.path.realpath(__file__)), 'retryable_writes', 'legacy') class TestAllScenarios(SpecRunner): diff --git a/test/test_retryable_writes_unified.py b/test/test_retryable_writes_unified.py new file mode 100644 index 0000000000..4e851de273 --- /dev/null +++ b/test/test_retryable_writes_unified.py @@ -0,0 +1,33 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Retryable Writes unified spec tests.""" + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'retryable_writes', 'unified') + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/transactions/legacy/error-labels.json b/test/transactions/legacy/error-labels.json index a57f216b9b..0be19c731c 100644 --- a/test/transactions/legacy/error-labels.json +++ b/test/transactions/legacy/error-labels.json @@ -963,12 +963,12 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/transactions/legacy/mongos-recovery-token.json b/test/transactions/legacy/mongos-recovery-token.json index 02c2002f75..da4e9861d1 100644 --- a/test/transactions/legacy/mongos-recovery-token.json +++ b/test/transactions/legacy/mongos-recovery-token.json @@ -180,12 +180,12 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } } diff --git a/test/transactions/legacy/retryable-abort.json b/test/transactions/legacy/retryable-abort.json index b712e80862..13cc7c88fb 100644 --- a/test/transactions/legacy/retryable-abort.json +++ b/test/transactions/legacy/retryable-abort.json @@ -1556,11 +1556,11 @@ "failCommands": [ "abortTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11600, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -1673,11 +1673,11 @@ "failCommands": [ "abortTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11602, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -1790,11 +1790,11 @@ "failCommands": [ "abortTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 189, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -1907,11 +1907,11 @@ "failCommands": [ "abortTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } diff --git a/test/transactions/legacy/retryable-commit.json b/test/transactions/legacy/retryable-commit.json index d83a1d9f52..49148c62d2 100644 --- a/test/transactions/legacy/retryable-commit.json +++ b/test/transactions/legacy/retryable-commit.json @@ -1855,11 +1855,11 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11600, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -1977,11 +1977,11 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11602, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -2099,11 +2099,11 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 189, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -2221,11 +2221,11 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } From 64a4f6e14167e35f4d8305a6c541395062a3cf6a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 23 Nov 2021 15:45:49 -0800 Subject: [PATCH 0019/1588] PYTHON-3024 Update estimatedDocumentCount test for Atlas Data Lake (#802) Migrate data lake testing to ubuntu 18. Ensure mongohouse downloads the right build via VARIANT. --- .evergreen/config.yml | 6 ++++-- test/data_lake/estimatedDocumentCount.json | 21 +++++++++++++++++++-- test/data_lake/getMore.json | 2 +- test/data_lake/listCollections.json | 2 +- test/data_lake/listDatabases.json | 2 +- test/data_lake/runCommand.json | 2 +- 6 files changed, 27 insertions(+), 8 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index d4c95105bf..c785d8f14b 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -310,7 +310,9 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/build-mongohouse-local.sh + # The mongohouse build script needs to be passed the VARIANT variable, see + # https://github.com/10gen/mongohouse/blob/973cc11/evergreen.yaml#L65 + VARIANT=ubuntu1804 bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/build-mongohouse-local.sh - command: shell.exec type: setup params: @@ -2439,7 +2441,7 @@ buildvariants: - matrix_name: "data-lake-spec-tests" matrix_spec: - platform: ubuntu-16.04 + platform: ubuntu-18.04 python-version: ["3.6", "3.9"] auth: "auth" c-extensions: "*" diff --git a/test/data_lake/estimatedDocumentCount.json b/test/data_lake/estimatedDocumentCount.json index d039a51f06..87b385208d 100644 --- a/test/data_lake/estimatedDocumentCount.json +++ b/test/data_lake/estimatedDocumentCount.json @@ -15,8 +15,25 @@ { "command_started_event": { "command": { - "count": "driverdata" - } + "aggregate": "driverdata", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "command_name": "aggregate", + "database_name": "test" } } ] diff --git a/test/data_lake/getMore.json b/test/data_lake/getMore.json index fa1deab4f3..e2e1d4788a 100644 --- a/test/data_lake/getMore.json +++ b/test/data_lake/getMore.json @@ -54,4 +54,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/test/data_lake/listCollections.json b/test/data_lake/listCollections.json index 8d8a8f6c1b..e419f7b3e9 100644 --- a/test/data_lake/listCollections.json +++ b/test/data_lake/listCollections.json @@ -22,4 +22,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/test/data_lake/listDatabases.json b/test/data_lake/listDatabases.json index f8ec9a0bf4..6458148e49 100644 --- a/test/data_lake/listDatabases.json +++ b/test/data_lake/listDatabases.json @@ -21,4 +21,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/test/data_lake/runCommand.json b/test/data_lake/runCommand.json index f72e863ba5..d81ff1a64b 100644 --- a/test/data_lake/runCommand.json +++ b/test/data_lake/runCommand.json @@ -28,4 +28,4 @@ ] } ] -} \ No newline at end of file +} From 37b5195eef2614215b865f20bfdf5dbd4133290d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 23 Nov 2021 16:15:52 -0800 Subject: [PATCH 0020/1588] PYTHON-2434 Automatically combine release wheels + sdist into one archive (#803) --- .evergreen/config.yml | 119 ++++++++++++++++++++++++++++++++++-------- RELEASE.rst | 37 +++++-------- 2 files changed, 109 insertions(+), 47 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index c785d8f14b..93b37d504d 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -870,6 +870,16 @@ functions: # Remove all Docker images docker rmi -f $(docker images -a -q) &> /dev/null || true + "build release": + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + set -o xtrace + ${PREPARE_SHELL} + .evergreen/release.sh + "upload release": - command: archive.targz_pack params: @@ -882,12 +892,63 @@ functions: aws_key: ${aws_key} aws_secret: ${aws_secret} local_file: release-files.tgz - remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/release/${task_id}-${execution}-release-files.tar.gz + remote_file: ${UPLOAD_BUCKET}/release/${revision}/${task_id}-${execution}-release-files.tar.gz bucket: mciuploads permissions: public-read content_type: ${content_type|application/gzip} display_name: Release files + "download and merge releases": + - command: shell.exec + params: + silent: true + script: | + export AWS_ACCESS_KEY_ID=${aws_key} + export AWS_SECRET_ACCESS_KEY=${aws_secret} + + # Download all the task coverage files. + aws s3 cp --recursive s3://mciuploads/${UPLOAD_BUCKET}/release/${revision}/ release/ + - command: shell.exec + params: + shell: "bash" + script: | + set -o xtrace + ${PREPARE_SHELL} + # Combine releases into one directory. + ls -la release/ + mkdir releases + # Copy old manylinux release first since we want the newer manylinux + # wheels to override them. + mkdir old_manylinux + if mv release/*old_manylinux* old_manylinux; then + for REL in old_manylinux/*; do + tar zxvf $REL -C releases/ + done + fi + for REL in release/*; do + tar zxvf $REL -C releases/ + done + # Build source distribution. + cd src/ + /opt/python/3.6/bin/python3 setup.py sdist + cp dist/* ../releases + - command: archive.targz_pack + params: + target: "release-files-all.tgz" + source_dir: "releases/" + include: + - "*" + - command: s3.put + params: + aws_key: ${aws_key} + aws_secret: ${aws_secret} + local_file: release-files-all.tgz + remote_file: ${UPLOAD_BUCKET}/release-all/${revision}/${task_id}-${execution}-release-files-all.tar.gz + bucket: mciuploads + permissions: public-read + content_type: ${content_type|application/gzip} + display_name: Release files all + pre: - func: "fetch source" - func: "prepare resources" @@ -972,22 +1033,31 @@ tasks: genhtml --version || true valgrind --version || true - - name: "release" - tags: ["release"] + - name: "release-mac" + tags: ["release_tag"] + run_on: macos-1014 + commands: + - func: "build release" + - func: "upload release" + + - name: "release-windows" + tags: ["release_tag"] + run_on: windows-64-vsMulti-small + commands: + - func: "build release" + - func: "upload release" + + - name: "release-manylinux" + tags: ["release_tag"] + run_on: ubuntu2004-large exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). commands: - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - set -o xtrace - ${PREPARE_SHELL} - .evergreen/release.sh + - func: "build release" - func: "upload release" - name: "release-old-manylinux" - tags: ["release"] + tags: ["release_tag"] + run_on: ubuntu2004-large exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). commands: - command: shell.exec @@ -1000,6 +1070,16 @@ tasks: .evergreen/build-manylinux.sh BUILD_WITH_TAG - func: "upload release" + - name: "release-combine" + tags: ["release_tag"] + run_on: ubuntu2004-small + depends_on: + - name: "*" + variant: ".release_tag" + patch_optional: true + commands: + - func: "download and merge releases" + # Standard test tasks {{{ - name: "mockupdb" @@ -2530,19 +2610,12 @@ buildvariants: tasks: - name: "load-balancer-test" -- matrix_name: "Release" - matrix_spec: - platform: [ubuntu-20.04, windows-64-vsMulti-small, macos-1014] - display_name: "Release ${platform}" +- name: Release + display_name: Release batchtime: 20160 # 14 days + tags: ["release_tag"] tasks: - - name: "release" - rules: - - if: - platform: ubuntu-20.04 - then: - add_tasks: - - name: "release-old-manylinux" + - ".release_tag" # Platform notes # i386 builds of OpenSSL or Cyrus SASL are not available diff --git a/RELEASE.rst b/RELEASE.rst index 220908084b..84b60d9b6a 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -55,48 +55,37 @@ Doing a Release 8. Push commit / tag, eg ``git push && git push --tags``. 9. Pushing a tag will trigger a release process in Evergreen which builds - wheels and eggs for manylinux, macOS, and Windows. Wait for these jobs to - complete and then download the "Release files" archive from each task. See: + wheels for manylinux, macOS, and Windows. Wait for the "release-combine" + task to complete and then download the "Release files all" archive. See: https://evergreen.mongodb.com/waterfall/mongo-python-driver?bv_filter=release - Unpack each downloaded archive so that we can upload the included files. For - the next steps let's assume we unpacked these files into the following paths:: + The contents should look like this:: - $ ls path/to/manylinux + $ ls path/to/archive + pymongo--cp310-cp310-macosx_10_9_universal2.whl + ... pymongo--cp38-cp38-manylinux2014_x86_64.whl ... - $ ls path/to/windows/ pymongo--cp38-cp38-win_amd64.whl ... - -10. Build the source distribution:: - - $ git clone git@github.com:mongodb/mongo-python-driver.git - $ cd mongo-python-driver - $ git checkout "" - $ python3 setup.py sdist - - This will create the following distribution:: - - $ ls dist pymongo-.tar.gz -11. Upload all the release packages to PyPI with twine:: +10. Upload all the release packages to PyPI with twine:: - $ python3 -m twine upload dist/*.tar.gz path/to/manylinux/* path/to/mac/* path/to/windows/* + $ python3 -m twine upload path/to/archive/* -12. Make sure the new version appears on https://pymongo.readthedocs.io/. If the +11. Make sure the new version appears on https://pymongo.readthedocs.io/. If the new version does not show up automatically, trigger a rebuild of "latest": https://readthedocs.org/projects/pymongo/builds/ -13. Bump the version number to .dev0 in setup.py/__init__.py, +12. Bump the version number to .dev0 in setup.py/__init__.py, commit, push. -14. Publish the release version in Jira. +13. Publish the release version in Jira. -15. Announce the release on: +14. Announce the release on: https://developer.mongodb.com/community/forums/c/community/release-notes/ -16. File a ticket for DOCSP highlighting changes in server version and Python +15. File a ticket for DOCSP highlighting changes in server version and Python version compatibility or the lack thereof, for example: https://jira.mongodb.org/browse/DOCSP-13536 From 7de879a9fe8ba09e4b4762b19af220ceaf635434 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 23 Nov 2021 16:56:41 -0800 Subject: [PATCH 0021/1588] PYTHON-3018 Add docs for removed methods from Collection (#801) --- doc/migrate-to-pymongo4.rst | 2 ++ pymongo/collection.py | 7 +++++++ pymongo/database.py | 7 +++++++ pymongo/mongo_client.py | 16 ++++++++++------ 4 files changed, 26 insertions(+), 6 deletions(-) diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 5acd3a5d12..22071bd3bb 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -1,3 +1,5 @@ +.. _pymongo4-migration-guide: + PyMongo 4 Migration Guide ========================= diff --git a/pymongo/collection.py b/pymongo/collection.py index 8632204b81..774c290235 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -115,6 +115,13 @@ def __init__(self, database, name, create=False, codec_options=None, - `**kwargs` (optional): additional keyword arguments will be passed as options for the create collection command + .. versionchanged:: 4.0 + Removed the reindex, map_reduce, inline_map_reduce, + parallel_scan, initialize_unordered_bulk_op, + initialize_ordered_bulk_op, group, count, insert, save, + update, remove, find_and_modify, and ensure_index methods. See the + :ref:`pymongo4-migration-guide`. + .. versionchanged:: 3.6 Added ``session`` parameter. diff --git a/pymongo/database.py b/pymongo/database.py index c30d29bde4..dc8c13cbb0 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -69,6 +69,13 @@ def __init__(self, client, name, codec_options=None, read_preference=None, .. seealso:: The MongoDB documentation on `databases `_. + .. versionchanged:: 4.0 + Removed the eval, system_js, error, last_status, previous_error, + reset_error_history, authenticate, logout, collection_names, + current_op, add_user, remove_user, profiling_level, + set_profiling_level, and profiling_info methods. + See the :ref:`pymongo4-migration-guide`. + .. versionchanged:: 3.2 Added the read_concern option. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index dd9dd6d33a..af159da521 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -504,12 +504,16 @@ def __init__( .. seealso:: The MongoDB documentation on `connections `_. .. versionchanged:: 4.0 - Removed the ``waitQueueMultiple`` and ``socketKeepAlive`` keyword - arguments. - The default for `uuidRepresentation` was changed from - ``pythonLegacy`` to ``unspecified``. - Added the ``srvServiceName`` and ``maxConnecting`` URI and keyword - argument. + + - Removed the fsync, unlock, is_locked, database_names, and + close_cursor methods. + See the :ref:`pymongo4-migration-guide`. + - Removed the ``waitQueueMultiple`` and ``socketKeepAlive`` + keyword arguments. + - The default for `uuidRepresentation` was changed from + ``pythonLegacy`` to ``unspecified``. + - Added the ``srvServiceName`` and ``maxConnecting`` URI and + keyword argument. .. versionchanged:: 3.12 Added the ``server_api`` keyword argument. From 2c28149a301bb33b3347bf38f8dbfe085215ca38 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 29 Nov 2021 10:31:12 -0800 Subject: [PATCH 0022/1588] BUMP 4.0 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 0ec31e9e7e..608ddb818e 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -53,7 +53,7 @@ .. _text index: http://docs.mongodb.org/manual/core/index-text/ """ -version_tuple = (4, 0, '.dev0') +version_tuple = (4, 0) def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 63a1df4955..9c5fcae8a1 100755 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.0.dev0" +version = "4.0" f = open("README.rst") try: From e3d1d6f5b48101654a05493fd6eec7fe3fa014bd Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 29 Nov 2021 10:32:30 -0800 Subject: [PATCH 0023/1588] BUMP 4.0.1.dev0 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 608ddb818e..4ac46672e7 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -53,7 +53,7 @@ .. _text index: http://docs.mongodb.org/manual/core/index-text/ """ -version_tuple = (4, 0) +version_tuple = (4, 0, 1, '.dev0') def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 9c5fcae8a1..158eb9a42b 100755 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.0" +version = "4.0.1.dev0" f = open("README.rst") try: From 046d789d9f25f1d9984e443837a634fe166b38ca Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 30 Nov 2021 15:02:40 -0800 Subject: [PATCH 0024/1588] PYTHON-2957 Support 'let' option for multiple CRUD commands (#804) --- doc/changelog.rst | 16 ++ pymongo/aggregation.py | 5 +- pymongo/collection.py | 107 ++++++-- pymongo/cursor.py | 11 +- test/crud/unified/aggregate-let.json | 103 -------- test/crud/unified/deleteMany-let.json | 201 +++++++++++++++ test/crud/unified/deleteOne-let.json | 191 +++++++++++++++ test/crud/unified/find-let.json | 148 +++++++++++ test/crud/unified/findOneAndDelete-let.json | 180 ++++++++++++++ test/crud/unified/findOneAndReplace-let.json | 197 +++++++++++++++ test/crud/unified/findOneAndUpdate-let.json | 217 +++++++++++++++++ test/crud/unified/updateMany-let.json | 243 +++++++++++++++++++ test/crud/unified/updateOne-let.json | 215 ++++++++++++++++ test/test_collection.py | 17 ++ 14 files changed, 1721 insertions(+), 130 deletions(-) create mode 100644 test/crud/unified/deleteMany-let.json create mode 100644 test/crud/unified/deleteOne-let.json create mode 100644 test/crud/unified/find-let.json create mode 100644 test/crud/unified/findOneAndDelete-let.json create mode 100644 test/crud/unified/findOneAndReplace-let.json create mode 100644 test/crud/unified/findOneAndUpdate-let.json create mode 100644 test/crud/unified/updateMany-let.json create mode 100644 test/crud/unified/updateOne-let.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 88c1b7cd20..192b456619 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,22 @@ Changelog ========= +Changes in Version 4.1 +---------------------- + +- :meth:`pymongo.collection.Collection.update_one`, + :meth:`pymongo.collection.Collection.update_many`, + :meth:`pymongo.collection.Collection.delete_one`, + :meth:`pymongo.collection.Collection.delete_many`, + :meth:`pymongo.collection.Collection.aggregate`, + :meth:`pymongo.collection.Collection.find_one_and_delete`, + :meth:`pymongo.collection.Collection.find_one_and_replace`, + :meth:`pymongo.collection.Collection.find_one_and_update`, + and :meth:`pymongo.collection.Collection.find` all support a new keyword + argument ``let`` which is a map of parameter names and values. Parameters + can then be accessed as variables in an aggregate expression context. + + Changes in Version 4.0 ---------------------- diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 2a34a05d3a..4a565ee134 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -30,7 +30,7 @@ class _AggregationCommand(object): :meth:`pymongo.database.Database.aggregate` instead. """ def __init__(self, target, cursor_class, pipeline, options, - explicit_session, user_fields=None, result_processor=None): + explicit_session, let=None, user_fields=None, result_processor=None): if "explain" in options: raise ConfigurationError("The explain option is not supported. " "Use Database.command instead.") @@ -44,6 +44,9 @@ def __init__(self, target, cursor_class, pipeline, options, self._performs_write = True common.validate_is_mapping('options', options) + if let: + common.validate_is_mapping("let", let) + options["let"] = let self._options = options # This is the batchSize that will be used for setting the initial diff --git a/pymongo/collection.py b/pymongo/collection.py index 774c290235..393c26aa5c 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -593,7 +593,7 @@ def _update(self, sock_info, criteria, document, upsert=False, check_keys=False, multi=False, write_concern=None, op_id=None, ordered=True, bypass_doc_val=False, collation=None, array_filters=None, - hint=None, session=None, retryable_write=False): + hint=None, session=None, retryable_write=False, let=None): """Internal update / replace helper.""" common.validate_boolean("upsert", upsert) collation = validate_collation_or_none(collation) @@ -626,6 +626,9 @@ def _update(self, sock_info, criteria, document, upsert=False, command = SON([('update', self.name), ('ordered', ordered), ('updates', [update_doc])]) + if let: + common.validate_is_mapping("let", let) + command["let"] = let if not write_concern.is_server_default: command['writeConcern'] = write_concern.document @@ -663,7 +666,7 @@ def _update_retryable( check_keys=False, multi=False, write_concern=None, op_id=None, ordered=True, bypass_doc_val=False, collation=None, array_filters=None, - hint=None, session=None): + hint=None, session=None, let=None): """Internal update / replace helper.""" def _update(session, sock_info, retryable_write): return self._update( @@ -672,7 +675,7 @@ def _update(session, sock_info, retryable_write): write_concern=write_concern, op_id=op_id, ordered=ordered, bypass_doc_val=bypass_doc_val, collation=collation, array_filters=array_filters, hint=hint, session=session, - retryable_write=retryable_write) + retryable_write=retryable_write, let=let) return self.__database.client._retryable_write( (write_concern or self.write_concern).acknowledged and not multi, @@ -759,7 +762,7 @@ def replace_one(self, filter, replacement, upsert=False, def update_one(self, filter, update, upsert=False, bypass_document_validation=False, collation=None, array_filters=None, hint=None, - session=None): + session=None, let=None): """Update a single document matching the filter. >>> for doc in db.test.find(): @@ -802,10 +805,16 @@ def update_one(self, filter, update, upsert=False, MongoDB 4.2 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.9 @@ -830,12 +839,12 @@ def update_one(self, filter, update, upsert=False, write_concern=write_concern, bypass_doc_val=bypass_document_validation, collation=collation, array_filters=array_filters, - hint=hint, session=session), + hint=hint, session=session, let=let), write_concern.acknowledged) def update_many(self, filter, update, upsert=False, array_filters=None, bypass_document_validation=False, collation=None, - hint=None, session=None): + hint=None, session=None, let=None): """Update one or more documents that match the filter. >>> for doc in db.test.find(): @@ -878,10 +887,16 @@ def update_many(self, filter, update, upsert=False, array_filters=None, MongoDB 4.2 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.9 @@ -906,7 +921,7 @@ def update_many(self, filter, update, upsert=False, array_filters=None, write_concern=write_concern, bypass_doc_val=bypass_document_validation, collation=collation, array_filters=array_filters, - hint=hint, session=session), + hint=hint, session=session, let=let), write_concern.acknowledged) def drop(self, session=None): @@ -938,7 +953,8 @@ def drop(self, session=None): def _delete( self, sock_info, criteria, multi, write_concern=None, op_id=None, ordered=True, - collation=None, hint=None, session=None, retryable_write=False): + collation=None, hint=None, session=None, retryable_write=False, + let=None): """Internal delete helper.""" common.validate_is_mapping("filter", criteria) write_concern = write_concern or self.write_concern @@ -965,6 +981,10 @@ def _delete( if not write_concern.is_server_default: command['writeConcern'] = write_concern.document + if let: + common.validate_is_document_type("let", let) + command["let"] = let + # Delete command. result = sock_info.command( self.__database.name, @@ -980,20 +1000,21 @@ def _delete( def _delete_retryable( self, criteria, multi, write_concern=None, op_id=None, ordered=True, - collation=None, hint=None, session=None): + collation=None, hint=None, session=None, let=None): """Internal delete helper.""" def _delete(session, sock_info, retryable_write): return self._delete( sock_info, criteria, multi, write_concern=write_concern, op_id=op_id, ordered=ordered, collation=collation, hint=hint, session=session, - retryable_write=retryable_write) + retryable_write=retryable_write, let=let) return self.__database.client._retryable_write( (write_concern or self.write_concern).acknowledged and not multi, _delete, session) - def delete_one(self, filter, collation=None, hint=None, session=None): + def delete_one(self, filter, collation=None, hint=None, session=None, + let=None): """Delete a single document matching the filter. >>> db.test.count_documents({'x': 1}) @@ -1017,10 +1038,16 @@ def delete_one(self, filter, collation=None, hint=None, session=None): MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). :Returns: - An instance of :class:`~pymongo.results.DeleteResult`. + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.6 @@ -1034,10 +1061,11 @@ def delete_one(self, filter, collation=None, hint=None, session=None): self._delete_retryable( filter, False, write_concern=write_concern, - collation=collation, hint=hint, session=session), + collation=collation, hint=hint, session=session, let=let), write_concern.acknowledged) - def delete_many(self, filter, collation=None, hint=None, session=None): + def delete_many(self, filter, collation=None, hint=None, session=None, + let=None): """Delete one or more documents matching the filter. >>> db.test.count_documents({'x': 1}) @@ -1061,10 +1089,16 @@ def delete_many(self, filter, collation=None, hint=None, session=None): MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). :Returns: - An instance of :class:`~pymongo.results.DeleteResult`. + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.6 @@ -1078,7 +1112,7 @@ def delete_many(self, filter, collation=None, hint=None, session=None): self._delete_retryable( filter, True, write_concern=write_concern, - collation=collation, hint=hint, session=session), + collation=collation, hint=hint, session=session, let=let), write_concern.acknowledged) def find_one(self, filter=None, *args, **kwargs): @@ -1889,15 +1923,16 @@ def options(self, session=None): return options def _aggregate(self, aggregation_command, pipeline, cursor_class, session, - explicit_session, **kwargs): + explicit_session, let=None, **kwargs): cmd = aggregation_command( - self, cursor_class, pipeline, kwargs, explicit_session, + self, cursor_class, pipeline, kwargs, explicit_session, let, user_fields={'cursor': {'firstBatch': 1}}) + return self.__database.client._retryable_read( cmd.get_cursor, cmd.get_read_preference(session), session, retryable=not cmd._performs_write) - def aggregate(self, pipeline, session=None, **kwargs): + def aggregate(self, pipeline, session=None, let=None, **kwargs): """Perform an aggregation using the aggregation framework on this collection. @@ -1944,6 +1979,8 @@ def aggregate(self, pipeline, session=None, **kwargs): A :class:`~pymongo.command_cursor.CommandCursor` over the result set. + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 4.0 Removed the ``useCursor`` option. .. versionchanged:: 3.9 @@ -1973,6 +2010,7 @@ def aggregate(self, pipeline, session=None, **kwargs): CommandCursor, session=s, explicit_session=session is not None, + let=let, **kwargs) def aggregate_raw_batches(self, pipeline, session=None, **kwargs): @@ -2232,7 +2270,7 @@ def _write_concern_for_cmd(self, cmd, session): def __find_and_modify(self, filter, projection, sort, upsert=None, return_document=ReturnDocument.BEFORE, array_filters=None, hint=None, session=None, - **kwargs): + let=None, **kwargs): """Internal findAndModify helper.""" common.validate_is_mapping("filter", filter) @@ -2243,6 +2281,9 @@ def __find_and_modify(self, filter, projection, sort, upsert=None, cmd = SON([("findAndModify", self.__name), ("query", filter), ("new", return_document)]) + if let: + common.validate_is_mapping("let", let) + cmd["let"] = let cmd.update(kwargs) if projection is not None: cmd["fields"] = helpers._fields_list_to_dict(projection, @@ -2290,7 +2331,7 @@ def _find_and_modify(session, sock_info, retryable_write): def find_one_and_delete(self, filter, projection=None, sort=None, hint=None, - session=None, **kwargs): + session=None, let=None, **kwargs): """Finds a single document and deletes it, returning the document. >>> db.test.count_documents({'x': 1}) @@ -2337,7 +2378,13 @@ def find_one_and_delete(self, filter, - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.6 @@ -2356,13 +2403,13 @@ def find_one_and_delete(self, filter, .. versionadded:: 3.0 """ kwargs['remove'] = True - return self.__find_and_modify(filter, projection, sort, + return self.__find_and_modify(filter, projection, sort, let=let, hint=hint, session=session, **kwargs) def find_one_and_replace(self, filter, replacement, projection=None, sort=None, upsert=False, return_document=ReturnDocument.BEFORE, - hint=None, session=None, **kwargs): + hint=None, session=None, let=None, **kwargs): """Finds a single document and replaces it, returning either the original or the replaced document. @@ -2412,10 +2459,16 @@ def find_one_and_replace(self, filter, replacement, MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added the ``hint`` option. .. versionchanged:: 3.6 @@ -2436,14 +2489,14 @@ def find_one_and_replace(self, filter, replacement, common.validate_ok_for_replace(replacement) kwargs['update'] = replacement return self.__find_and_modify(filter, projection, - sort, upsert, return_document, + sort, upsert, return_document, let=let, hint=hint, session=session, **kwargs) def find_one_and_update(self, filter, update, projection=None, sort=None, upsert=False, return_document=ReturnDocument.BEFORE, array_filters=None, hint=None, session=None, - **kwargs): + let=None, **kwargs): """Finds a single document and updates it, returning either the original or the updated document. @@ -2533,10 +2586,16 @@ def find_one_and_update(self, filter, update, MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added the ``hint`` option. .. versionchanged:: 3.9 @@ -2561,7 +2620,7 @@ def find_one_and_update(self, filter, update, kwargs['update'] = update return self.__find_and_modify(filter, projection, sort, upsert, return_document, - array_filters, hint=hint, + array_filters, hint=hint, let=let, session=session, **kwargs) def __iter__(self): diff --git a/pymongo/cursor.py b/pymongo/cursor.py index c38adaf377..e825edf8fd 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -24,7 +24,8 @@ from bson.code import Code from bson.son import SON from pymongo import helpers -from pymongo.common import validate_boolean, validate_is_mapping +from pymongo.common import (validate_boolean, validate_is_mapping, + validate_is_document_type) from pymongo.collation import validate_collation_or_none from pymongo.errors import (ConnectionFailure, InvalidOperation, @@ -140,7 +141,7 @@ def __init__(self, collection, filter=None, projection=None, skip=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=None, show_record_id=None, snapshot=None, comment=None, session=None, - allow_disk_use=None): + allow_disk_use=None, let=None): """Create a new cursor. Should not be called directly by application developers - see @@ -197,6 +198,10 @@ def __init__(self, collection, filter=None, projection=None, skip=0, if projection is not None: projection = helpers._fields_list_to_dict(projection, "projection") + if let: + validate_is_document_type("let", let) + + self.__let = let self.__spec = spec self.__projection = projection self.__skip = skip @@ -370,6 +375,8 @@ def __query_spec(self): operators["$explain"] = True if self.__hint: operators["$hint"] = self.__hint + if self.__let: + operators["let"] = self.__let if self.__comment: operators["$comment"] = self.__comment if self.__max_scan: diff --git a/test/crud/unified/aggregate-let.json b/test/crud/unified/aggregate-let.json index d3b76bd65a..039900920f 100644 --- a/test/crud/unified/aggregate-let.json +++ b/test/crud/unified/aggregate-let.json @@ -56,109 +56,6 @@ "minServerVersion": "5.0" } ], - "operations": [ - { - "name": "aggregate", - "object": "collection0", - "arguments": { - "pipeline": [ - { - "$match": { - "$expr": { - "$eq": [ - "$_id", - "$$id" - ] - } - } - }, - { - "$project": { - "_id": 0, - "x": "$$x", - "y": "$$y", - "rand": "$$rand" - } - } - ], - "let": { - "id": 1, - "x": "foo", - "y": { - "$literal": "bar" - }, - "rand": { - "$rand": {} - } - } - }, - "expectResult": [ - { - "x": "foo", - "y": "bar", - "rand": { - "$$type": "double" - } - } - ] - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "aggregate": "coll0", - "pipeline": [ - { - "$match": { - "$expr": { - "$eq": [ - "$_id", - "$$id" - ] - } - } - }, - { - "$project": { - "_id": 0, - "x": "$$x", - "y": "$$y", - "rand": "$$rand" - } - } - ], - "let": { - "id": 1, - "x": "foo", - "y": { - "$literal": "bar" - }, - "rand": { - "$rand": {} - } - } - } - } - } - ] - } - ] - }, - { - "description": "Aggregate with let option and dollar-prefixed $literal value", - "runOnRequirements": [ - { - "minServerVersion": "5.0", - "topologies": [ - "single", - "replicaset" - ] - } - ], "operations": [ { "name": "aggregate", diff --git a/test/crud/unified/deleteMany-let.json b/test/crud/unified/deleteMany-let.json new file mode 100644 index 0000000000..71bf26a013 --- /dev/null +++ b/test/crud/unified/deleteMany-let.json @@ -0,0 +1,201 @@ +{ + "description": "deleteMany-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ], + "tests": [ + { + "description": "deleteMany with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "let": { + "name": "name" + } + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "limit": 0 + } + ], + "let": { + "name": "name" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "deleteMany with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "let": { + "name": "name" + } + }, + "expectError": { + "errorContains": "'delete.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "limit": 0 + } + ], + "let": { + "name": "name" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-let.json b/test/crud/unified/deleteOne-let.json new file mode 100644 index 0000000000..9718682235 --- /dev/null +++ b/test/crud/unified/deleteOne-let.json @@ -0,0 +1,191 @@ +{ + "description": "deleteOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "deleteOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "deleteOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'delete.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find-let.json b/test/crud/unified/find-let.json new file mode 100644 index 0000000000..4e9c9c99f4 --- /dev/null +++ b/test/crud/unified/find-let.json @@ -0,0 +1,148 @@ +{ + "description": "find-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Find with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + } + } + } + ] + } + ] + }, + { + "description": "Find with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "let": { + "x": 1 + } + }, + "expectError": { + "errorContains": "Unrecognized field 'let'", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "let": { + "x": 1 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-let.json b/test/crud/unified/findOneAndDelete-let.json new file mode 100644 index 0000000000..ba8e681c0e --- /dev/null +++ b/test/crud/unified/findOneAndDelete-let.json @@ -0,0 +1,180 @@ +{ + "description": "findOneAndDelete-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndDelete with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "remove": true, + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndDelete with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "field 'let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "remove": true, + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-let.json b/test/crud/unified/findOneAndReplace-let.json new file mode 100644 index 0000000000..5e5de44b31 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-let.json @@ -0,0 +1,197 @@ +{ + "description": "findOneAndReplace-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndReplace with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": "x" + }, + "let": { + "id": 1 + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": { + "x": "x" + }, + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "x" + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndReplace with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": "x" + }, + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "field 'let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": { + "x": "x" + }, + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-let.json b/test/crud/unified/findOneAndUpdate-let.json new file mode 100644 index 0000000000..74d7d0e58b --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-let.json @@ -0,0 +1,217 @@ +{ + "description": "findOneAndUpdate-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "foo" + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndUpdate with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + }, + "expectError": { + "errorContains": "field 'let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-let.json b/test/crud/unified/updateMany-let.json new file mode 100644 index 0000000000..b4a4ddd800 --- /dev/null +++ b/test/crud/unified/updateMany-let.json @@ -0,0 +1,243 @@ +{ + "description": "updateMany-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ], + "tests": [ + { + "description": "updateMany with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x", + "y": "$$y" + } + } + ], + "let": { + "name": "name", + "x": "foo", + "y": { + "$literal": "bar" + } + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "u": [ + { + "$set": { + "x": "$$x", + "y": "$$y" + } + } + ], + "multi": true + } + ], + "let": { + "name": "name", + "x": "foo", + "y": { + "$literal": "bar" + } + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name", + "x": "foo", + "y": "bar" + }, + { + "_id": 3, + "name": "name", + "x": "foo", + "y": "bar" + } + ] + } + ] + }, + { + "description": "updateMany with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "x": "foo" + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "x": "$$x" + } + } + ], + "multi": true + } + ], + "let": { + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-let.json b/test/crud/unified/updateOne-let.json new file mode 100644 index 0000000000..7b1cc4cf00 --- /dev/null +++ b/test/crud/unified/updateOne-let.json @@ -0,0 +1,215 @@ +{ + "description": "updateOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": "$$x" + } + } + ] + } + ], + "let": { + "id": 1, + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "foo" + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "UpdateOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "x": "foo" + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "x": "$$x" + } + } + ] + } + ], + "let": { + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/test_collection.py b/test/test_collection.py index 79a2a907a6..4af2298ceb 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -2178,6 +2178,23 @@ def test_bool(self): with self.assertRaises(NotImplementedError): bool(Collection(self.db, 'test')) + @client_context.require_version_min(5, 0, 0) + def test_helpers_with_let(self): + c = self.db.test + helpers = [(c.delete_many, ({}, {})), (c.delete_one, ({}, {})), + (c.find, ({})), (c.update_many, ({}, {'$inc': {'x': 3}})), + (c.update_one, ({}, {'$inc': {'x': 3}})), + (c.find_one_and_delete, ({}, {})), + (c.find_one_and_replace, ({}, {})), + (c.aggregate, ([], {}))] + for let in [10, "str"]: + for helper, args in helpers: + with self.assertRaisesRegex(TypeError, + "let must be an instance of dict"): + helper(*args, let=let) + for helper, args in helpers: + helper(*args, let={}) + if __name__ == "__main__": unittest.main() From 5ec4e6cc4cba641c98cb48eefb76b82c99d7ae82 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 2 Dec 2021 13:45:50 -0800 Subject: [PATCH 0025/1588] PYTHON-3027 Fix server selection when topology type is Unknown (#806) --- pymongo/topology_description.py | 7 +-- test/mockupdb/test_rsghost.py | 52 +++++++++++++++++++ .../server_selection/Unknown/read/ghost.json | 18 +++++++ .../server_selection/Unknown/write/ghost.json | 18 +++++++ test/utils_selection_tests.py | 8 ++- 5 files changed, 98 insertions(+), 5 deletions(-) create mode 100644 test/mockupdb/test_rsghost.py create mode 100644 test/server_selection/server_selection/Unknown/read/ghost.json create mode 100644 test/server_selection/server_selection/Unknown/write/ghost.json diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index d0100ff8b9..4fe897dcef 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -263,9 +263,10 @@ def apply_selector(self, selector, address=None, custom_selector=None): selector.min_wire_version, common_wv)) - if self.topology_type in (TOPOLOGY_TYPE.Single, - TOPOLOGY_TYPE.LoadBalanced, - TOPOLOGY_TYPE.Unknown): + if self.topology_type == TOPOLOGY_TYPE.Unknown: + return [] + elif self.topology_type in (TOPOLOGY_TYPE.Single, + TOPOLOGY_TYPE.LoadBalanced): # Ignore selectors for standalone and load balancer mode. return self.known_servers elif address: diff --git a/test/mockupdb/test_rsghost.py b/test/mockupdb/test_rsghost.py new file mode 100644 index 0000000000..2f02503f54 --- /dev/null +++ b/test/mockupdb/test_rsghost.py @@ -0,0 +1,52 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test connections to RSGhost nodes.""" + +import datetime + +from mockupdb import going, MockupDB +from pymongo import MongoClient +from pymongo.errors import ServerSelectionTimeoutError + +import unittest + + +class TestRSGhost(unittest.TestCase): + + def test_rsghost(self): + rsother_response = { + 'ok': 1.0, 'ismaster': False, 'secondary': False, + 'info': 'Does not have a valid replica set config', + 'isreplicaset': True, 'maxBsonObjectSize': 16777216, + 'maxMessageSizeBytes': 48000000, 'maxWriteBatchSize': 100000, + 'localTime': datetime.datetime(2021, 11, 30, 0, 53, 4, 99000), + 'logicalSessionTimeoutMinutes': 30, 'connectionId': 3, + 'minWireVersion': 0, 'maxWireVersion': 15, 'readOnly': False} + server = MockupDB(auto_ismaster=rsother_response) + server.run() + self.addCleanup(server.stop) + # Default auto discovery yields a server selection timeout. + with MongoClient(server.uri, serverSelectionTimeoutMS=250) as client: + with self.assertRaises(ServerSelectionTimeoutError): + client.test.command('ping') + # Direct connection succeeds. + with MongoClient(server.uri, directConnection=True) as client: + with going(client.test.command, 'ping'): + request = server.receives(ping=1) + request.reply() + + +if __name__ == '__main__': + unittest.main() diff --git a/test/server_selection/server_selection/Unknown/read/ghost.json b/test/server_selection/server_selection/Unknown/read/ghost.json new file mode 100644 index 0000000000..76d3d774e8 --- /dev/null +++ b/test/server_selection/server_selection/Unknown/read/ghost.json @@ -0,0 +1,18 @@ +{ + "topology_description": { + "type": "Unknown", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "RSGhost" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest" + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/Unknown/write/ghost.json b/test/server_selection/server_selection/Unknown/write/ghost.json new file mode 100644 index 0000000000..65caa4cd0a --- /dev/null +++ b/test/server_selection/server_selection/Unknown/write/ghost.json @@ -0,0 +1,18 @@ +{ + "topology_description": { + "type": "Unknown", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "RSGhost" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Nearest" + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index 0006f6f673..76125b6f15 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -63,7 +63,7 @@ def make_server_description(server, hosts): return ServerDescription(clean_node(server['address']), Hello({})) hello_response = {'ok': True, 'hosts': hosts} - if server_type != "Standalone" and server_type != "Mongos": + if server_type not in ("Standalone", "Mongos", "RSGhost"): hello_response['setName'] = "rs" if server_type == "RSPrimary": @@ -72,6 +72,10 @@ def make_server_description(server, hosts): hello_response['secondary'] = True elif server_type == "Mongos": hello_response['msg'] = 'isdbgrid' + elif server_type == "RSGhost": + hello_response['isreplicaset'] = True + elif server_type == "RSArbiter": + hello_response['arbiterOnly'] = True hello_response['lastWrite'] = { 'lastWriteDate': make_last_write_date(server) @@ -149,7 +153,7 @@ def create_topology(scenario_def, **kwargs): # Assert that descriptions match assert (scenario_def['topology_description']['type'] == - topology.description.topology_type_name) + topology.description.topology_type_name), topology.description.topology_type_name return topology From 44853ea9c3ffe9dba5e356687f0870a1a41f3d7c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 6 Dec 2021 11:26:36 -0800 Subject: [PATCH 0026/1588] PYTHON-3033 Fix typo in uuid docs (#808) --- doc/examples/uuid.rst | 3 ++- pymongo/mongo_client.py | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/doc/examples/uuid.rst b/doc/examples/uuid.rst index d4a77d4038..90ec71ebe2 100644 --- a/doc/examples/uuid.rst +++ b/doc/examples/uuid.rst @@ -1,3 +1,4 @@ + .. _handling-uuid-data-example: Handling UUID Data @@ -12,7 +13,7 @@ to MongoDB and retrieve them as native :class:`uuid.UUID` objects:: from uuid import uuid4 # use the 'standard' representation for cross-language compatibility. - client = MongoClient(uuid_representation=UuidRepresentation.STANDARD) + client = MongoClient(uuidRepresentation='standard') collection = client.get_database('uuid_db').get_collection('uuid_coll') # remove all documents from collection diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index af159da521..dae62e7605 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -325,9 +325,9 @@ def __init__( speed. 9 is best compression. Defaults to -1. - `uuidRepresentation`: The BSON representation to use when encoding from and decoding to instances of :class:`~uuid.UUID`. Valid - values are `pythonLegacy`, `javaLegacy`, `csharpLegacy`, `standard` - and `unspecified` (the default). New applications - should consider setting this to `standard` for cross language + values are the strings: "standard", "pythonLegacy", "javaLegacy", + "csharpLegacy", and "unspecified" (the default). New applications + should consider setting this to "standard" for cross language compatibility. See :ref:`handling-uuid-data-example` for details. - `unicode_decode_error_handler`: The error handler to apply when a Unicode-related error occurs during BSON decoding that would From 70f7fe75426b76debfce787a0ee2eb398c27a1ce Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 6 Dec 2021 13:13:15 -0800 Subject: [PATCH 0027/1588] PYTHON-3028 $regex as a field name does not allow for non-string values (#807) --- bson/json_util.py | 2 +- test/test_json_util.py | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/bson/json_util.py b/bson/json_util.py index 0644874b44..ed67d9a36c 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -508,7 +508,7 @@ def object_hook(dct, json_options=DEFAULT_JSON_OPTIONS): def _parse_legacy_regex(doc): pattern = doc["$regex"] # Check if this is the $regex query operator. - if isinstance(pattern, Regex): + if not isinstance(pattern, (str, bytes)): return doc flags = 0 # PyMongo always adds $options but some other tools may not. diff --git a/test/test_json_util.py b/test/test_json_util.py index f28b75c9be..dbf4f1c26a 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -270,6 +270,15 @@ def test_regex(self): json_util.dumps(Regex('.*', re.M | re.X), json_options=LEGACY_JSON_OPTIONS)) + def test_regex_validation(self): + non_str_types = [10, {}, []] + docs = [{"$regex": i} for i in non_str_types] + for doc in docs: + self.assertEqual(doc, json_util.loads(json.dumps(doc))) + + doc = {"$regex": ""} + self.assertIsInstance(json_util.loads(json.dumps(doc)), Regex) + def test_minkey(self): self.round_trip({"m": MinKey()}) From f3a76a703bbd3ec02ce3e8ea4b1a4bdf28162b0b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Dec 2021 11:14:09 -0800 Subject: [PATCH 0028/1588] BUMP 4.1.0.dev0 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 4ac46672e7..5db9363f90 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -53,7 +53,7 @@ .. _text index: http://docs.mongodb.org/manual/core/index-text/ """ -version_tuple = (4, 0, 1, '.dev0') +version_tuple = (4, 1, 0, '.dev0') def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 158eb9a42b..464e33e082 100755 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.0.1.dev0" +version = "4.1.0.dev0" f = open("README.rst") try: From e15464296876f5724649972b1f005af2b8ea03a9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Dec 2021 15:47:28 -0800 Subject: [PATCH 0029/1588] Removed references to outdated server versions (#812) --- bson/decimal128.py | 2 - doc/examples/authentication.rst | 19 +++------ doc/examples/bulk.rst | 5 --- pymongo/aggregation.py | 2 +- pymongo/client_session.py | 2 - pymongo/collection.py | 76 +++++++++++---------------------- pymongo/common.py | 2 +- pymongo/cursor.py | 9 ++-- pymongo/database.py | 5 +-- pymongo/mongo_client.py | 39 +++++++---------- pymongo/operations.py | 23 ++++------ pymongo/pool.py | 2 +- pymongo/results.py | 16 +------ pymongo/write_concern.py | 8 ++-- test/test_collection.py | 2 - test/test_cursor.py | 3 +- test/utils.py | 4 +- 17 files changed, 69 insertions(+), 150 deletions(-) diff --git a/bson/decimal128.py b/bson/decimal128.py index 528e0f9a35..ede728bbab 100644 --- a/bson/decimal128.py +++ b/bson/decimal128.py @@ -15,8 +15,6 @@ """Tools for working with the BSON decimal128 type. .. versionadded:: 3.4 - -.. note:: The Decimal128 BSON type requires MongoDB 3.4+. """ import decimal diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index 1e0f133a5a..db2dbd3d1f 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -97,9 +97,8 @@ the "MongoDB Challenge-Response" protocol:: Default Authentication Mechanism -------------------------------- -If no mechanism is specified, PyMongo automatically uses MONGODB-CR when -connected to a pre-3.0 version of MongoDB, SCRAM-SHA-1 when connected to -MongoDB 3.0 through 3.6, and negotiates the mechanism to use (SCRAM-SHA-1 +If no mechanism is specified, PyMongo automatically SCRAM-SHA-1 when connected +to MongoDB 3.6 and negotiates the mechanism to use (SCRAM-SHA-1 or SCRAM-SHA-256) when connected to MongoDB 4.0+. Default Database and "authSource" @@ -125,15 +124,12 @@ MONGODB-X509 ------------ .. versionadded:: 2.6 -The MONGODB-X509 mechanism authenticates a username derived from the -distinguished subject name of the X.509 certificate presented by the driver -during TLS/SSL negotiation. This authentication method requires the use of -TLS/SSL connections with certificate validation and is available in -MongoDB 2.6 and newer:: +The MONGODB-X509 mechanism authenticates via the X.509 certificate presented +by the driver during TLS/SSL negotiation. This authentication method requires +the use of TLS/SSL connections with certificate validation:: >>> from pymongo import MongoClient >>> client = MongoClient('example.com', - ... username="" ... authMechanism="MONGODB-X509", ... tls=True, ... tlsCertificateKeyFile='/path/to/client.pem', @@ -142,16 +138,13 @@ MongoDB 2.6 and newer:: MONGODB-X509 authenticates against the $external virtual database, so you do not have to specify a database in the URI:: - >>> uri = "mongodb://@example.com/?authMechanism=MONGODB-X509" + >>> uri = "mongodb://example.com/?authMechanism=MONGODB-X509" >>> client = MongoClient(uri, ... tls=True, ... tlsCertificateKeyFile='/path/to/client.pem', ... tlsCAFile='/path/to/ca.pem') >>> -.. versionchanged:: 3.4 - When connected to MongoDB >= 3.4 the username is no longer required. - .. _gssapi: GSSAPI (Kerberos) diff --git a/doc/examples/bulk.rst b/doc/examples/bulk.rst index 9e8a57a803..23505268f0 100644 --- a/doc/examples/bulk.rst +++ b/doc/examples/bulk.rst @@ -74,11 +74,6 @@ of operations performed. 'writeConcernErrors': [], 'writeErrors': []} -.. warning:: ``nModified`` is only reported by MongoDB 2.6 and later. When - connected to an earlier server version, or in certain mixed version sharding - configurations, PyMongo omits this field from the results of a bulk - write operation. - The first write failure that occurs (e.g. duplicate key error) aborts the remaining operations, and PyMongo raises :class:`~pymongo.errors.BulkWriteError`. The :attr:`details` attibute of diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 4a565ee134..f0be39e671 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -147,7 +147,7 @@ def get_cursor(self, session, server, sock_info, secondary_ok): if 'cursor' in result: cursor = result['cursor'] else: - # Pre-MongoDB 2.6 or unacknowledged write. Fake a cursor. + # Unacknowledged $out/$merge write. Fake a cursor. cursor = { "id": 0, "firstBatch": result.get("result", []), diff --git a/pymongo/client_session.py b/pymongo/client_session.py index f8071e5f2b..8c61623ae4 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -14,8 +14,6 @@ """Logical sessions for ordering sequential operations. -Requires MongoDB 3.6. - .. versionadded:: 3.6 Causally Consistent Reads diff --git a/pymongo/collection.py b/pymongo/collection.py index 393c26aa5c..092163c403 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -107,8 +107,7 @@ def __init__(self, database, name, create=False, codec_options=None, default) database.read_concern is used. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. If a collation is provided, - it will be passed to the create collection command. This option is - only supported on MongoDB 3.4 and above. + it will be passed to the create collection command. - `session` (optional): a :class:`~pymongo.client_session.ClientSession` that is used with the create collection command @@ -209,8 +208,7 @@ def _command(self, sock_info, command, secondary_ok=False, - `read_concern` (optional) - An instance of :class:`~pymongo.read_concern.ReadConcern`. - `write_concern`: An instance of - :class:`~pymongo.write_concern.WriteConcern`. This option is only - valid for MongoDB 3.4 and above. + :class:`~pymongo.write_concern.WriteConcern`. - `collation` (optional) - An instance of :class:`~pymongo.collation.Collation`. - `session` (optional): a @@ -720,10 +718,9 @@ def replace_one(self, filter, replacement, upsert=False, match the filter. - `bypass_document_validation`: (optional) If ``True``, allows the write to opt-out of document level validation. Default is - ``False``. This option is only supported on MongoDB 3.2 and above. + ``False``. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -790,13 +787,11 @@ def update_one(self, filter, update, upsert=False, match the filter. - `bypass_document_validation`: (optional) If ``True``, allows the write to opt-out of document level validation. Default is - ``False``. This option is only supported on MongoDB 3.2 and above. + ``False``. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. This option is only - supported on MongoDB 3.6 and above. + array elements an update should apply. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -872,13 +867,11 @@ def update_many(self, filter, update, upsert=False, array_filters=None, match the filter. - `bypass_document_validation` (optional): If ``True``, allows the write to opt-out of document level validation. Default is - ``False``. This option is only supported on MongoDB 3.2 and above. + ``False``. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. This option is only - supported on MongoDB 3.6 and above. + array elements an update should apply. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -1028,8 +1021,7 @@ def delete_one(self, filter, collation=None, hint=None, session=None, :Parameters: - `filter`: A query that matches the document to delete. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -1079,8 +1071,7 @@ def delete_many(self, filter, collation=None, hint=None, session=None, :Parameters: - `filter`: A query that matches the documents to delete. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -1226,8 +1217,7 @@ def find(self, *args, **kwargs): - `batch_size` (optional): Limits the number of documents returned in a single batch. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `return_key` (optional): If True, return only the index keys in each document. - `show_record_id` (optional): If True, adds a field ``$recordId`` in @@ -1472,12 +1462,10 @@ def count_documents(self, filter, session=None, **kwargs): - `maxTimeMS` (int): The maximum amount of time to allow this operation to run, in milliseconds. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `hint` (string or list of tuples): The index to use. Specify either the index name as a string or the index specification as a list of tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]). - This option is only supported on MongoDB 3.6 and above. The :meth:`count_documents` method obeys the :attr:`read_preference` of this :class:`Collection`. @@ -1495,8 +1483,6 @@ def count_documents(self, filter, session=None, **kwargs): | $nearSphere | `$geoWithin`_ with `$centerSphere`_ | +-------------+-------------------------------------+ - $expr requires MongoDB 3.6+ - :Parameters: - `filter` (required): A query document that selects which documents to count in the collection. Can be an empty document to count all @@ -1554,13 +1540,8 @@ def create_indexes(self, indexes, session=None, **kwargs): - `**kwargs` (optional): optional arguments to the createIndexes command (like maxTimeMS) can be passed as keyword arguments. - .. note:: `create_indexes` uses the `createIndexes`_ command - introduced in MongoDB **2.6** and cannot be used with earlier - versions. - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. + this collection is automatically applied to this operation. .. versionchanged:: 3.6 Added ``session`` parameter. Added support for arbitrary keyword @@ -1665,9 +1646,9 @@ def create_index(self, keys, session=None, **kwargs): this collection after seconds. The indexed field must be a UTC datetime or the data will not expire. - `partialFilterExpression`: A document that specifies a filter for - a partial index. Requires MongoDB >=3.2. + a partial index. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. Requires MongoDB >= 3.4. + :class:`~pymongo.collation.Collation`. - `wildcardProjection`: Allows users to include or exclude specific field paths from a `wildcard index`_ using the {"$**" : 1} key pattern. Requires MongoDB >= 4.2. @@ -1683,8 +1664,7 @@ def create_index(self, keys, session=None, **kwargs): using the option will fail if a duplicate value is detected. .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. + this collection is automatically applied to this operation. :Parameters: - `keys`: a single key or a list of (key, direction) @@ -1733,8 +1713,7 @@ def drop_indexes(self, session=None, **kwargs): command (like maxTimeMS) can be passed as keyword arguments. .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. + this collection is automatically applied to this operation. .. versionchanged:: 3.6 Added ``session`` parameter. Added support for arbitrary keyword @@ -1772,8 +1751,7 @@ def drop_index(self, index_or_name, session=None, **kwargs): command (like maxTimeMS) can be passed as keyword arguments. .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. + this collection is automatically applied to this operation. .. versionchanged:: 3.6 Added ``session`` parameter. Added support for arbitrary keyword @@ -1946,8 +1924,7 @@ def aggregate(self, pipeline, session=None, let=None, **kwargs): example is included in the :ref:`aggregate-examples` documentation. .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. + this collection is automatically applied to this operation. :Parameters: - `pipeline`: a list of aggregation pipeline stages @@ -2060,8 +2037,6 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, :class:`~pymongo.change_stream.CollectionChangeStream` cursor which iterates over changes on this collection. - Introduced in MongoDB 3.6. - .. code-block:: python with db.collection.watch() as stream: @@ -2172,8 +2147,7 @@ def rename(self, new_name, session=None, **kwargs): (i.e. ``dropTarget=True``) .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. + this collection is automatically applied to this operation. .. versionchanged:: 3.6 Added ``session`` parameter. @@ -2219,8 +2193,7 @@ def distinct(self, key, filter=None, session=None, **kwargs): - `maxTimeMS` (int): The maximum amount of time to allow the count command to run, in milliseconds. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. The :meth:`distinct` method obeys the :attr:`read_preference` of this :class:`Collection`. @@ -2576,8 +2549,7 @@ def find_one_and_update(self, filter, update, :attr:`ReturnDocument.AFTER`, returns the updated or inserted document. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. This option is only - supported on MongoDB 3.6 and above. + array elements an update should apply. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to diff --git a/pymongo/common.py b/pymongo/common.py index 772f2f299b..14789c8109 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -102,7 +102,7 @@ # Default value for retryReads. RETRY_READS = True -# mongod/s 2.6 and above return code 59 when a command doesn't exist. +# The error code returned when a command doesn't exist. COMMAND_NOT_FOUND_CODES = (59,) # Error codes to ignore if GridFS calls createIndex on a secondary diff --git a/pymongo/cursor.py b/pymongo/cursor.py index e825edf8fd..3e78c2d97c 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -773,8 +773,7 @@ def sort(self, key_or_list, direction=None): ('field2', pymongo.DESCENDING)]): print(doc) - Beginning with MongoDB version 2.6, text search results can be - sorted by relevance:: + Text search results can be sorted by relevance:: cursor = db.test.find( {'$text': {'$search': 'some words'}}, @@ -837,8 +836,8 @@ def distinct(self, key): def explain(self): """Returns an explain plan record for this cursor. - .. note:: Starting with MongoDB 3.2 :meth:`explain` uses - the default verbosity mode of the `explain command + .. note:: This method uses the default verbosity mode of the + `explain command `_, ``allPlansExecution``. To use a different verbosity use :meth:`~pymongo.database.Database.command` to run the explain @@ -944,8 +943,6 @@ def where(self, code): def collation(self, collation): """Adds a :class:`~pymongo.collation.Collation` to this query. - This option is only supported on MongoDB 3.4 and above. - Raises :exc:`TypeError` if `collation` is not an instance of :class:`~pymongo.collation.Collation` or a ``dict``. Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has diff --git a/pymongo/database.py b/pymongo/database.py index dc8c13cbb0..c7ed38b73f 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -321,8 +321,6 @@ def aggregate(self, pipeline, session=None, **kwargs): See the `aggregation pipeline`_ documentation for a list of stages that are supported. - Introduced in MongoDB 3.6. - .. code-block:: python # Lists all operations currently running on the server. @@ -716,8 +714,7 @@ def drop_collection(self, name_or_collection, session=None): :class:`~pymongo.client_session.ClientSession`. .. note:: The :attr:`~pymongo.database.Database.write_concern` of - this database is automatically applied to this operation when using - MongoDB >= 3.4. + this database is automatically applied to this operation. .. versionchanged:: 3.6 Added ``session`` parameter. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index dae62e7605..41e701706d 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -247,10 +247,9 @@ def __init__( between periodic server checks, or None to accept the default frequency of 10 seconds. - `appname`: (string or None) The name of the application that - created this MongoClient instance. MongoDB 3.4 and newer will - print this value in the server log upon establishing each - connection. It is also recorded in the slow query log and - profile collections. + created this MongoClient instance. The server will log this value + upon establishing each connection. It is also recorded in the slow + query log and profile collections. - `driver`: (pair or None) A driver implemented on top of PyMongo can pass a :class:`~pymongo.driver_info.DriverInfo` to add its name, version, and platform to the message printed in the server log when @@ -259,7 +258,7 @@ def __init__( :mod:`~pymongo.monitoring` for details. - `retryWrites`: (boolean) Whether supported write operations executed within this MongoClient will be retried once after a - network error on MongoDB 3.6+. Defaults to ``True``. + network error. Defaults to ``True``. The supported write operations are: - :meth:`~pymongo.collection.Collection.bulk_write`, as long as @@ -281,7 +280,7 @@ def __init__( https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst - `retryReads`: (boolean) Whether supported read operations executed within this MongoClient will be retried once after a - network error on MongoDB 3.6+. Defaults to ``True``. + network error. Defaults to ``True``. The supported read operations are: :meth:`~pymongo.collection.Collection.find`, :meth:`~pymongo.collection.Collection.find_one`, @@ -315,9 +314,8 @@ def __init__( zlib support requires the Python standard library zlib module. zstd requires the `zstandard `_ package. By default no compression is used. Compression support - must also be enabled on the server. MongoDB 3.4+ supports snappy - compression. MongoDB 3.6 adds support for zlib. MongoDB 4.2 adds - support for zstd. + must also be enabled on the server. MongoDB 3.6+ supports snappy + and zlib compression. MongoDB 4.2+ adds support for zstd. - `zlibCompressionLevel`: (int) The zlib compression level to use when zlib is used as the wire protocol compressor. Supported values are -1 through 9. -1 tells the zlib library to use its default @@ -355,10 +353,8 @@ def __init__( will cause **write operations to wait indefinitely**. - `journal`: If ``True`` block until write operations have been committed to the journal. Cannot be used in combination with - `fsync`. Prior to MongoDB 2.6 this option was ignored if the server - was running without journaling. Starting with MongoDB 2.6 write - operations will fail with an exception if this option is used when - the server is running without journaling. + `fsync`. Write operations will fail with an exception if this + option is used when the server is running without journaling. - `fsync`: If ``True`` and the server is running without journaling, blocks until the server has synced all data files to disk. If the server is running with journaling, this acts the same as the `j` @@ -406,11 +402,9 @@ def __init__( - `authSource`: The database to authenticate on. Defaults to the database specified in the URI, if provided, or to "admin". - `authMechanism`: See :data:`~pymongo.auth.MECHANISMS` for options. - If no mechanism is specified, PyMongo automatically uses MONGODB-CR - when connected to a pre-3.0 version of MongoDB, SCRAM-SHA-1 when - connected to MongoDB 3.0 through 3.6, and negotiates the mechanism - to use (SCRAM-SHA-1 or SCRAM-SHA-256) when connected to MongoDB - 4.0+. + If no mechanism is specified, PyMongo automatically SCRAM-SHA-1 + when connected to MongoDB 3.6 and negotiates the mechanism to use + (SCRAM-SHA-1 or SCRAM-SHA-256) when connected to MongoDB 4.0+. - `authMechanismProperties`: Used to specify authentication mechanism specific options. To specify the service name for GSSAPI authentication pass authMechanismProperties='SERVICE_NAME:= 3.6, end all server sessions created by this client by - sending one or more endSessions commands. + End all server sessions created by this client by sending one or more + endSessions commands. Close all sockets in the connection pools and stop the monitor threads. @@ -1565,8 +1559,6 @@ def start_session(self, :class:`~pymongo.client_session.SessionOptions`. See the :mod:`~pymongo.client_session` module for details and examples. - Requires MongoDB 3.6. - A :class:`~pymongo.client_session.ClientSession` may only be used with the MongoClient that started it. :class:`ClientSession` instances are **not thread-safe or fork-safe**. They can only be used by one thread @@ -1722,8 +1714,7 @@ def drop_database(self, name_or_database, session=None): Added ``session`` parameter. .. note:: The :attr:`~pymongo.mongo_client.MongoClient.write_concern` of - this client is automatically applied to this operation when using - MongoDB >= 3.4. + this client is automatically applied to this operation. .. versionchanged:: 3.4 Apply this client's write concern automatically to this operation diff --git a/pymongo/operations.py b/pymongo/operations.py index b5d670e0ff..be6a959f5c 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -65,8 +65,7 @@ def __init__(self, filter, collation=None, hint=None): :Parameters: - `filter`: A query that matches the document to delete. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only - supported on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -119,8 +118,7 @@ def __init__(self, filter, collation=None, hint=None): :Parameters: - `filter`: A query that matches the documents to delete. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only - supported on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -177,8 +175,7 @@ def __init__(self, filter, replacement, upsert=False, collation=None, - `upsert` (optional): If ``True``, perform an insert if no documents match the filter. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only - supported on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -287,10 +284,9 @@ def __init__(self, filter, update, upsert=False, collation=None, - `upsert` (optional): If ``True``, perform an insert if no documents match the filter. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only - supported on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. Requires MongoDB 3.6+. + array elements an update should apply. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -335,10 +331,9 @@ def __init__(self, filter, update, upsert=False, collation=None, - `upsert` (optional): If ``True``, perform an insert if no documents match the filter. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only - supported on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. Requires MongoDB 3.6+. + array elements an update should apply. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -404,9 +399,9 @@ def __init__(self, keys, **kwargs): this collection after seconds. The indexed field must be a UTC datetime or the data will not expire. - `partialFilterExpression`: A document that specifies a filter for - a partial index. Requires MongoDB >= 3.2. + a partial index. - `collation`: An instance of :class:`~pymongo.collation.Collation` - that specifies the collation to use in MongoDB >= 3.4. + that specifies the collation to use. - `wildcardProjection`: Allows users to include or exclude specific field paths from a `wildcard index`_ using the { "$**" : 1} key pattern. Requires MongoDB >= 4.2. diff --git a/pymongo/pool.py b/pymongo/pool.py index f9b370c66e..84661c4879 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -873,7 +873,7 @@ def socket_closed(self): return self.socket_checker.socket_closed(self.sock) def send_cluster_time(self, command, session, client): - """Add cluster time for MongoDB >= 3.6.""" + """Add $clusterTime.""" if client: client._send_cluster_time(command, session) diff --git a/pymongo/results.py b/pymongo/results.py index a5025e9f48..0374803249 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -118,13 +118,7 @@ def matched_count(self): @property def modified_count(self): - """The number of documents modified. - - .. note:: modified_count is only reported by MongoDB 2.6 and later. - When connected to an earlier server version, or in certain mixed - version sharding configurations, this attribute will be set to - ``None``. - """ + """The number of documents modified. """ self._raise_if_unacknowledged("modified_count") return self.__raw_result.get("nModified") @@ -195,13 +189,7 @@ def matched_count(self): @property def modified_count(self): - """The number of documents modified. - - .. note:: modified_count is only reported by MongoDB 2.6 and later. - When connected to an earlier server version, or in certain mixed - version sharding configurations, this attribute will be set to - ``None``. - """ + """The number of documents modified.""" self._raise_if_unacknowledged("modified_count") return self.__bulk_api_result.get("nModified") diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index ebc997c0db..2075240f0a 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -33,11 +33,9 @@ class WriteConcern(object): to complete. If replication does not complete in the given timeframe, a timeout exception is raised. - `j`: If ``True`` block until write operations have been committed - to the journal. Cannot be used in combination with `fsync`. Prior - to MongoDB 2.6 this option was ignored if the server was running - without journaling. Starting with MongoDB 2.6 write operations will - fail with an exception if this option is used when the server is - running without journaling. + to the journal. Cannot be used in combination with `fsync`. Write + operations will fail with an exception if this option is used when + the server is running without journaling. - `fsync`: If ``True`` and the server is running without journaling, blocks until the server has synced all data files to disk. If the server is running with journaling, this acts the same as the `j` diff --git a/test/test_collection.py b/test/test_collection.py index 4af2298ceb..4a167bacb3 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -680,8 +680,6 @@ def test_options(self): db.drop_collection("test") db.create_collection("test", capped=True, size=4096) result = db.test.options() - # mongos 2.2.x adds an $auth field when auth is enabled. - result.pop('$auth', None) self.assertEqual(result, {"capped": True, 'size': 4096}) db.drop_collection("test") diff --git a/test/test_cursor.py b/test/test_cursor.py index d56f9fc27d..8c27544b80 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -343,8 +343,7 @@ def test_explain(self): for _ in a: break b = a.explain() - # "cursor" pre MongoDB 2.7.6, "executionStats" post - self.assertTrue("cursor" in b or "executionStats" in b) + self.assertIn("executionStats", b) def test_explain_with_read_concern(self): # Do not add readConcern level to explain. diff --git a/test/utils.py b/test/utils.py index 5b6f9fd264..bdea5c69c2 100644 --- a/test/utils.py +++ b/test/utils.py @@ -928,7 +928,7 @@ def is_greenthread_patched(): def disable_replication(client): - """Disable replication on all secondaries, requires MongoDB 3.2.""" + """Disable replication on all secondaries.""" for host, port in client.secondaries: secondary = single_client(host, port) secondary.admin.command('configureFailPoint', 'stopReplProducer', @@ -936,7 +936,7 @@ def disable_replication(client): def enable_replication(client): - """Enable replication on all secondaries, requires MongoDB 3.2.""" + """Enable replication on all secondaries.""" for host, port in client.secondaries: secondary = single_client(host, port) secondary.admin.command('configureFailPoint', 'stopReplProducer', From bf992c20a63e8d235ff7b4021a0ba4246d1ddec5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Dec 2021 16:26:01 -0800 Subject: [PATCH 0030/1588] PYTHON-2554 Support aggregate $merge and $out executing on secondaries (#774) --- doc/changelog.rst | 6 + pymongo/aggregation.py | 12 +- pymongo/collection.py | 8 +- pymongo/mongo_client.py | 2 +- pymongo/read_preferences.py | 39 ++ pymongo/topology_description.py | 17 +- .../aggregate-write-readPreference.json | 460 ++++++++++++++++++ .../db-aggregate-write-readPreference.json | 446 +++++++++++++++++ test/test_read_preferences.py | 4 +- 9 files changed, 978 insertions(+), 16 deletions(-) create mode 100644 test/crud/unified/aggregate-write-readPreference.json create mode 100644 test/crud/unified/db-aggregate-write-readPreference.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 192b456619..062104bc8f 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,6 +4,8 @@ Changelog Changes in Version 4.1 ---------------------- +PyMongo 4.0 brings a number of improvements including: + - :meth:`pymongo.collection.Collection.update_one`, :meth:`pymongo.collection.Collection.update_many`, :meth:`pymongo.collection.Collection.delete_one`, @@ -15,6 +17,10 @@ Changes in Version 4.1 and :meth:`pymongo.collection.Collection.find` all support a new keyword argument ``let`` which is a map of parameter names and values. Parameters can then be accessed as variables in an aggregate expression context. +- :meth:`~pymongo.collection.Collection.aggregate` now supports + $merge and $out executing on secondaries on MongoDB >=5.0. + aggregate() now always obeys the collection's :attr:`read_preference` on + MongoDB >= 5.0. Changes in Version 4.0 diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index f0be39e671..a5a7abaed7 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -19,7 +19,7 @@ from pymongo import common from pymongo.collation import validate_collation_or_none from pymongo.errors import ConfigurationError -from pymongo.read_preferences import ReadPreference +from pymongo.read_preferences import _AggWritePref, ReadPreference class _AggregationCommand(object): @@ -70,6 +70,7 @@ def __init__(self, target, cursor_class, pipeline, options, options.pop('collation', None)) self._max_await_time_ms = options.pop('maxAwaitTimeMS', None) + self._write_preference = None @property def _aggregation_target(self): @@ -97,9 +98,12 @@ def _process_result(self, result, session, server, sock_info, secondary_ok): result, session, server, sock_info, secondary_ok) def get_read_preference(self, session): - if self._performs_write: - return ReadPreference.PRIMARY - return self._target._read_preference_for(session) + if self._write_preference: + return self._write_preference + pref = self._target._read_preference_for(session) + if self._performs_write and pref != ReadPreference.PRIMARY: + self._write_preference = pref = _AggWritePref(pref) + return pref def get_cursor(self, session, server, sock_info, secondary_ok): # Serialize command. diff --git a/pymongo/collection.py b/pymongo/collection.py index 092163c403..ea11875ce2 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1915,9 +1915,9 @@ def aggregate(self, pipeline, session=None, let=None, **kwargs): collection. The :meth:`aggregate` method obeys the :attr:`read_preference` of this - :class:`Collection`, except when ``$out`` or ``$merge`` are used, in - which case :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` - is used. + :class:`Collection`, except when ``$out`` or ``$merge`` are used on + MongoDB <5.0, in which case + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` is used. .. note:: This method does not support the 'explain' option. Please use :meth:`~pymongo.database.Database.command` instead. An @@ -1958,6 +1958,8 @@ def aggregate(self, pipeline, session=None, let=None, **kwargs): .. versionchanged:: 4.1 Added ``let`` parameter. + Support $merge and $out executing on secondaries according to the + collection's :attr:`read_preference`. .. versionchanged:: 4.0 Removed the ``useCursor`` option. .. versionchanged:: 3.9 diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 41e701706d..9c98e5d211 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1155,7 +1155,7 @@ def _secondaryok_for_server(self, read_preference, server, session): with self._get_socket(server, session) as sock_info: secondary_ok = (single and not sock_info.is_mongos) or ( - read_preference != ReadPreference.PRIMARY) + read_preference.mode != ReadPreference.PRIMARY.mode) yield sock_info, secondary_ok @contextlib.contextmanager diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index c60240822d..2471d5834c 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -424,6 +424,45 @@ def __call__(self, selection): self.max_staleness, selection)) +class _AggWritePref: + """Agg $out/$merge write preference. + + * If there are readable servers and there is any pre-5.0 server, use + primary read preference. + * Otherwise use `pref` read preference. + + :Parameters: + - `pref`: The read preference to use on MongoDB 5.0+. + """ + + __slots__ = ('pref', 'effective_pref') + + def __init__(self, pref): + self.pref = pref + self.effective_pref = ReadPreference.PRIMARY + + def selection_hook(self, topology_description): + common_wv = topology_description.common_wire_version + if (topology_description.has_readable_server( + ReadPreference.PRIMARY_PREFERRED) and + common_wv and common_wv < 13): + self.effective_pref = ReadPreference.PRIMARY + else: + self.effective_pref = self.pref + + def __call__(self, selection): + """Apply this read preference to a Selection.""" + return self.effective_pref(selection) + + def __repr__(self): + return "_AggWritePref(pref=%r)" % (self.pref,) + + # Proxy other calls to the effective_pref so that _AggWritePref can be + # used in place of an actual read preference. + def __getattr__(self, name): + return getattr(self.effective_pref, name) + + _ALL_READ_PREFERENCES = (Primary, PrimaryPreferred, Secondary, SecondaryPreferred, Nearest) diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 4fe897dcef..c13d00a64c 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -19,7 +19,7 @@ from pymongo import common from pymongo.errors import ConfigurationError -from pymongo.read_preferences import ReadPreference +from pymongo.read_preferences import ReadPreference, _AggWritePref from pymongo.server_description import ServerDescription from pymongo.server_selectors import Selection from pymongo.server_type import SERVER_TYPE @@ -263,21 +263,24 @@ def apply_selector(self, selector, address=None, custom_selector=None): selector.min_wire_version, common_wv)) + if isinstance(selector, _AggWritePref): + selector.selection_hook(self) + if self.topology_type == TOPOLOGY_TYPE.Unknown: return [] elif self.topology_type in (TOPOLOGY_TYPE.Single, TOPOLOGY_TYPE.LoadBalanced): # Ignore selectors for standalone and load balancer mode. return self.known_servers - elif address: + if address: # Ignore selectors when explicit address is requested. description = self.server_descriptions().get(address) return [description] if description else [] - elif self.topology_type == TOPOLOGY_TYPE.Sharded: - # Ignore read preference. - selection = Selection.from_topology_description(self) - else: - selection = selector(Selection.from_topology_description(self)) + + selection = Selection.from_topology_description(self) + # Ignore read preference for sharded clusters. + if self.topology_type != TOPOLOGY_TYPE.Sharded: + selection = selector(selection) # Apply custom selector followed by localThresholdMS. if custom_selector is not None and selection: diff --git a/test/crud/unified/aggregate-write-readPreference.json b/test/crud/unified/aggregate-write-readPreference.json new file mode 100644 index 0000000000..28327e8d83 --- /dev/null +++ b/test/crud/unified/aggregate-write-readPreference.json @@ -0,0 +1,460 @@ +{ + "description": "aggregate-write-readPreference", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "_yamlAnchors": { + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + }, + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + } + } + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + }, + { + "collectionName": "coll1", + "databaseName": "db0", + "documents": [] + } + ], + "tests": [ + { + "description": "Aggregate with $out includes read preference for 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "coll1" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "coll1" + } + ], + "$readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "db0", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $out omits read preference for pre-5.0 server", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.4.99", + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "coll1" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "coll1" + } + ], + "$readPreference": { + "mode": "primary" + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "db0", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge includes read preference for 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "coll1" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "coll1" + } + } + ], + "$readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "db0", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge omits read preference for pre-5.0 server", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "coll1" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "coll1" + } + } + ], + "$readPreference": { + "mode": "primary" + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "db0", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/db-aggregate-write-readPreference.json b/test/crud/unified/db-aggregate-write-readPreference.json new file mode 100644 index 0000000000..269299e3c7 --- /dev/null +++ b/test/crud/unified/db-aggregate-write-readPreference.json @@ -0,0 +1,446 @@ +{ + "description": "db-aggregate-write-readPreference", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ], + "serverless": "forbid" + } + ], + "_yamlAnchors": { + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + }, + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0", + "databaseOptions": { + "readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + } + } + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [] + } + ], + "tests": [ + { + "description": "Database-level aggregate with $out includes read preference for 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll0" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll0" + } + ], + "$readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Database-level aggregate with $out omits read preference for pre-5.0 server", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.4.99", + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll0" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll0" + } + ], + "$readPreference": { + "mode": "primary" + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Database-level aggregate with $merge includes read preference for 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$merge": { + "into": "coll0" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$merge": { + "into": "coll0" + } + } + ], + "$readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Database-level aggregate with $merge omits read preference for pre-5.0 server", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$merge": { + "into": "coll0" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$merge": { + "into": "coll0" + } + } + ], + "$readPreference": { + "mode": "primary" + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 18dbd0bee4..bbc89b9d14 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -433,7 +433,9 @@ def test_aggregate(self): [{'$project': {'_id': 1}}]) def test_aggregate_write(self): - self._test_coll_helper(False, self.c.pymongo_test.test, + # 5.0 servers support $out on secondaries. + secondary_ok = client_context.version.at_least(5, 0) + self._test_coll_helper(secondary_ok, self.c.pymongo_test.test, 'aggregate', [{'$project': {'_id': 1}}, {'$out': "agg_write_test"}]) From 1d7b9a80b91315f1af4b88577e20ab4ac9e55e8f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Dec 2021 16:41:10 -0800 Subject: [PATCH 0031/1588] PYTHON-3026 Fix Windows Python 3.6 tests (#813) --- .evergreen/utils.sh | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 8fc42506a5..55c549d3aa 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -28,12 +28,8 @@ createvirtualenv () { . $VENVPATH/bin/activate fi - PYVER=$(${PYTHON} -c "import sys; sys.stdout.write('.'.join(str(val) for val in sys.version_info[:2]))") - # pip fails to upgrade in a Python 3.6 venv on Windows. - if [ $PYVER != "3.6" -o "Windows_NT" != "$OS" ] ; then - python -m pip install --upgrade pip - python -m pip install --upgrade setuptools wheel - fi + python -m pip install --upgrade pip + python -m pip install --upgrade setuptools wheel } # Usage: From 9f29e7313744278484d3731a7a4b563440a12978 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 9 Dec 2021 13:02:32 -0800 Subject: [PATCH 0032/1588] PYTHON-2473 Add basic Github Actions testing (#815) --- .github/workflows/test-python.yml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 .github/workflows/test-python.yml diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml new file mode 100644 index 0000000000..28ee689966 --- /dev/null +++ b/.github/workflows/test-python.yml @@ -0,0 +1,28 @@ +name: Python Tests + +on: + push: + pull_request: + +jobs: + build: + # supercharge/mongodb-github-action requires containers so we don't test other platforms + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-20.04] + python-version: ["3.6", "3.10", "pypy-3.8"] + name: CPython ${{ matrix.python-version }}-${{ matrix.os }} + steps: + - uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Start MongoDB + uses: supercharge/mongodb-github-action@1.7.0 + with: + mongodb-version: 4.4 + - name: Run tests + run: | + python setup.py test From 9deb1069f3e4289e2e59f2951faf3d987e3cb04c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 9 Dec 2021 13:50:53 -0800 Subject: [PATCH 0033/1588] PYTHON-1643 Resync read write concern spec tests --- .../connection-string/read-concern.json | 18 ++++++++++ .../document/read-concern.json | 33 +++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/test/read_write_concern/connection-string/read-concern.json b/test/read_write_concern/connection-string/read-concern.json index dd2b792b29..1ecad8c268 100644 --- a/test/read_write_concern/connection-string/read-concern.json +++ b/test/read_write_concern/connection-string/read-concern.json @@ -24,6 +24,24 @@ "readConcern": { "level": "majority" } + }, + { + "description": "linearizable specified", + "uri": "mongodb://localhost/?readConcernLevel=linearizable", + "valid": true, + "warning": false, + "readConcern": { + "level": "linearizable" + } + }, + { + "description": "available specified", + "uri": "mongodb://localhost/?readConcernLevel=available", + "valid": true, + "warning": false, + "readConcern": { + "level": "available" + } } ] } diff --git a/test/read_write_concern/document/read-concern.json b/test/read_write_concern/document/read-concern.json index ef2bafdf55..187397dae5 100644 --- a/test/read_write_concern/document/read-concern.json +++ b/test/read_write_concern/document/read-concern.json @@ -28,6 +28,39 @@ "level": "local" }, "isServerDefault": false + }, + { + "description": "Linearizable", + "valid": true, + "readConcern": { + "level": "linearizable" + }, + "readConcernDocument": { + "level": "linearizable" + }, + "isServerDefault": false + }, + { + "description": "Snapshot", + "valid": true, + "readConcern": { + "level": "snapshot" + }, + "readConcernDocument": { + "level": "snapshot" + }, + "isServerDefault": false + }, + { + "description": "Available", + "valid": true, + "readConcern": { + "level": "available" + }, + "readConcernDocument": { + "level": "available" + }, + "isServerDefault": false } ] } From d504c1f399de2d2368e2b355d157e83f955b7c5c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 9 Dec 2021 14:03:01 -0800 Subject: [PATCH 0034/1588] PYTHON-2086 Verify max set version and max election id on topologies in SDAM spec tests --- test/test_discovery_and_monitoring.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index c26c0df309..107168f294 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -196,6 +196,11 @@ def check_outcome(self, topology, outcome): self.assertEqual(topology_type_name(expected_topology_type), topology_type_name(topology.description.topology_type)) + self.assertEqual(outcome.get('maxSetVersion'), + topology.description.max_set_version) + self.assertEqual(outcome.get('maxElectionId'), + topology.description.max_election_id) + def create_test(scenario_def): def run_scenario(self): From 57ad29e4bdf5133dade02031386be26b70572690 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 9 Dec 2021 16:29:20 -0800 Subject: [PATCH 0035/1588] PYTHON-2203 Resync auth spec tests --- test/auth/connection-string.json | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/test/auth/connection-string.json b/test/auth/connection-string.json index 5452912e87..2a37ae8df4 100644 --- a/test/auth/connection-string.json +++ b/test/auth/connection-string.json @@ -216,6 +216,18 @@ "mechanism_properties": null } }, + { + "description": "should recognize the mechanism with no username when auth source is explicitly specified (MONGODB-X509)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-X509&authSource=$external", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null + } + }, { "description": "should throw an exception if supplied a password (MONGODB-X509)", "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-X509", @@ -362,7 +374,7 @@ "credential": null }, { - "description": "authSource without username doesn't create credential", + "description": "authSource without username doesn't create credential (default mechanism)", "uri": "mongodb://localhost/?authSource=foo", "valid": true, "credential": null @@ -389,6 +401,18 @@ "mechanism_properties": null } }, + { + "description": "should recognise the mechanism when auth source is explicitly specified (MONGODB-AWS)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-AWS&authSource=$external", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-AWS", + "mechanism_properties": null + } + }, { "description": "should throw an exception if username and no password (MONGODB-AWS)", "uri": "mongodb://user@localhost/?authMechanism=MONGODB-AWS", From 7bd9bd7b471be89f0955ebb80d275c2cca8c024a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 9 Dec 2021 17:55:26 -0800 Subject: [PATCH 0036/1588] PYTHON-2160 Stop using Google Groups email address (#818) --- THIRD-PARTY-NOTICES | 2 +- setup.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/THIRD-PARTY-NOTICES b/THIRD-PARTY-NOTICES index 4f2edb8660..28a340b3fb 100644 --- a/THIRD-PARTY-NOTICES +++ b/THIRD-PARTY-NOTICES @@ -4,7 +4,7 @@ be distributed under licenses different than the PyMongo software. In the event that we accidentally failed to list a required notice, please bring it to our attention through any of the ways detailed here: - mongodb-dev@googlegroups.com + https://jira.mongodb.org/projects/PYTHON The attached notices are provided for information only. diff --git a/setup.py b/setup.py index 464e33e082..7d1ad52dc7 100755 --- a/setup.py +++ b/setup.py @@ -315,7 +315,6 @@ def build_extension(self, ext): description="Python driver for MongoDB ", long_description=readme_content, author="The MongoDB Python Team", - author_email="mongodb-user@googlegroups.com", url="http://github.com/mongodb/mongo-python-driver", keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], install_requires=[], From c94a3ad1dff4716f70989a46b126604e46e2e419 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 9 Dec 2021 18:00:41 -0800 Subject: [PATCH 0037/1588] PYTHON-2585 Remove legacy multi-auth code (#816) --- pymongo/auth.py | 8 +-- pymongo/client_options.py | 17 ++---- pymongo/mongo_client.py | 10 +--- pymongo/monitor.py | 9 ++- pymongo/pool.py | 122 +++++++++++++------------------------- pymongo/server.py | 6 +- pymongo/topology.py | 4 +- test/pymongo_mocks.py | 4 +- test/test_auth.py | 20 ++----- test/test_auth_spec.py | 2 +- test/test_client.py | 29 +++++---- test/test_cmap.py | 4 +- test/test_pooling.py | 36 +++++------ test/utils.py | 2 +- 14 files changed, 106 insertions(+), 167 deletions(-) diff --git a/pymongo/auth.py b/pymongo/auth.py index 17f3a32fe8..a2e206357c 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -195,7 +195,7 @@ def _authenticate_scram(credentials, sock_info, mechanism): # Make local _hmac = hmac.HMAC - ctx = sock_info.auth_ctx.get(credentials) + ctx = sock_info.auth_ctx if ctx and ctx.speculate_succeeded(): nonce, first_bare = ctx.scram_data res = ctx.speculative_authenticate @@ -424,7 +424,7 @@ def _authenticate_plain(credentials, sock_info): def _authenticate_x509(credentials, sock_info): """Authenticate using MONGODB-X509. """ - ctx = sock_info.auth_ctx.get(credentials) + ctx = sock_info.auth_ctx if ctx and ctx.speculate_succeeded(): # MONGODB-X509 is done after the speculative auth step. return @@ -454,8 +454,8 @@ def _authenticate_mongo_cr(credentials, sock_info): def _authenticate_default(credentials, sock_info): if sock_info.max_wire_version >= 7: - if credentials in sock_info.negotiated_mechanisms: - mechs = sock_info.negotiated_mechanisms[credentials] + if sock_info.negotiated_mechs: + mechs = sock_info.negotiated_mechs else: source = credentials.source cmd = sock_info.hello_cmd() diff --git a/pymongo/client_options.py b/pymongo/client_options.py index f7dbf255bc..c2f5ae01cf 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -117,8 +117,9 @@ def _parse_ssl_options(options): return None, allow_invalid_hostnames -def _parse_pool_options(options): +def _parse_pool_options(username, password, database, options): """Parse connection pool options.""" + credentials = _parse_credentials(username, password, database, options) max_pool_size = options.get('maxpoolsize', common.MAX_POOL_SIZE) min_pool_size = options.get('minpoolsize', common.MIN_POOL_SIZE) max_idle_time_seconds = options.get( @@ -151,7 +152,8 @@ def _parse_pool_options(options): compression_settings, max_connecting=max_connecting, server_api=server_api, - load_balanced=load_balanced) + load_balanced=load_balanced, + credentials=credentials) class ClientOptions(object): @@ -164,10 +166,7 @@ class ClientOptions(object): def __init__(self, username, password, database, options): self.__options = options - self.__codec_options = _parse_codec_options(options) - self.__credentials = _parse_credentials( - username, password, database, options) self.__direct_connection = options.get('directconnection') self.__local_threshold_ms = options.get( 'localthresholdms', common.LOCAL_THRESHOLD_MS) @@ -175,7 +174,8 @@ def __init__(self, username, password, database, options): # common.SERVER_SELECTION_TIMEOUT because it is set directly by tests. self.__server_selection_timeout = options.get( 'serverselectiontimeoutms', common.SERVER_SELECTION_TIMEOUT) - self.__pool_options = _parse_pool_options(options) + self.__pool_options = _parse_pool_options( + username, password, database, options) self.__read_preference = _parse_read_preference(options) self.__replica_set_name = options.get('replicaset') self.__write_concern = _parse_write_concern(options) @@ -205,11 +205,6 @@ def codec_options(self): """A :class:`~bson.codec_options.CodecOptions` instance.""" return self.__codec_options - @property - def _credentials(self): - """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" - return self.__credentials - @property def direct_connection(self): """Whether to connect to the deployment in 'Single' topology.""" diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 9c98e5d211..87c87c0241 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -729,11 +729,6 @@ def __init__( options.write_concern, options.read_concern) - self.__all_credentials = {} - creds = options._credentials - if creds: - self.__all_credentials[creds.source] = creds - self._topology_settings = TopologySettings( seeds=seeds, replica_set_name=options.replica_set_name, @@ -1090,8 +1085,7 @@ def _get_socket(self, server, session): if in_txn and session._pinned_connection: yield session._pinned_connection return - with server.get_socket( - self.__all_credentials, handler=err_handler) as sock_info: + with server.get_socket(handler=err_handler) as sock_info: # Pin this session to the selected server or connection. if (in_txn and server.description.server_type in ( SERVER_TYPE.Mongos, SERVER_TYPE.LoadBalancer)): @@ -1535,7 +1529,7 @@ def _process_periodic_tasks(self): maintain connection pool parameters.""" try: self._process_kill_cursors() - self._topology.update_pool(self.__all_credentials) + self._topology.update_pool() except Exception as exc: if isinstance(exc, InvalidOperation) and self._topology._closed: return diff --git a/pymongo/monitor.py b/pymongo/monitor.py index a383e272cd..039ec51942 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -246,7 +246,7 @@ def _check_once(self): if self._cancel_context and self._cancel_context.cancelled: self._reset_connection() - with self._pool.get_socket({}) as sock_info: + with self._pool.get_socket() as sock_info: self._cancel_context = sock_info.cancel_context response, round_trip_time = self._check_with_socket(sock_info) if not response.awaitable: @@ -275,11 +275,10 @@ def _check_with_socket(self, conn): response = conn._hello( cluster_time, self._server_description.topology_version, - self._settings.heartbeat_frequency, - None) + self._settings.heartbeat_frequency) else: # New connection handshake or polling hello (MongoDB <4.4). - response = conn._hello(cluster_time, None, None, None) + response = conn._hello(cluster_time, None, None) return response, time.monotonic() - start @@ -388,7 +387,7 @@ def _run(self): def _ping(self): """Run a "hello" command and return the RTT.""" - with self._pool.get_socket({}) as sock_info: + with self._pool.get_socket() as sock_info: if self._executor._stopped: raise Exception('_RttMonitor closed') start = time.monotonic() diff --git a/pymongo/pool.py b/pymongo/pool.py index 84661c4879..6fe9d024d6 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -275,7 +275,8 @@ class PoolOptions(object): '__ssl_context', '__tls_allow_invalid_hostnames', '__event_listeners', '__appname', '__driver', '__metadata', '__compression_settings', '__max_connecting', - '__pause_enabled', '__server_api', '__load_balanced') + '__pause_enabled', '__server_api', '__load_balanced', + '__credentials') def __init__(self, max_pool_size=MAX_POOL_SIZE, min_pool_size=MIN_POOL_SIZE, @@ -285,7 +286,8 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, tls_allow_invalid_hostnames=False, event_listeners=None, appname=None, driver=None, compression_settings=None, max_connecting=MAX_CONNECTING, - pause_enabled=True, server_api=None, load_balanced=None): + pause_enabled=True, server_api=None, load_balanced=None, + credentials=None): self.__max_pool_size = max_pool_size self.__min_pool_size = min_pool_size self.__max_idle_time_seconds = max_idle_time_seconds @@ -302,6 +304,7 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, self.__pause_enabled = pause_enabled self.__server_api = server_api self.__load_balanced = load_balanced + self.__credentials = credentials self.__metadata = copy.deepcopy(_METADATA) if appname: self.__metadata['application'] = {'name': appname} @@ -325,6 +328,11 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, self.__metadata['platform'] = "%s|%s" % ( _METADATA['platform'], driver.platform) + @property + def _credentials(self): + """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" + return self.__credentials + @property def non_default_options(self): """The non-default options this pool was created with. @@ -457,25 +465,6 @@ def load_balanced(self): return self.__load_balanced -def _negotiate_creds(all_credentials): - """Return one credential that needs mechanism negotiation, if any. - """ - if all_credentials: - for creds in all_credentials.values(): - if creds.mechanism == 'DEFAULT' and creds.username: - return creds - return None - - -def _speculative_context(all_credentials): - """Return the _AuthContext to use for speculative auth, if any. - """ - if all_credentials and len(all_credentials) == 1: - creds = next(iter(all_credentials.values())) - return auth._AuthContext.from_credentials(creds) - return None - - class _CancellationContext(object): def __init__(self): self._cancelled = False @@ -504,7 +493,7 @@ def __init__(self, sock, pool, address, id): self.sock = sock self.address = address self.id = id - self.authset = set() + self.authed = set() self.closed = False self.last_checkin_time = time.monotonic() self.performed_handshake = False @@ -523,9 +512,8 @@ def __init__(self, sock, pool, address, id): self.compression_context = None self.socket_checker = SocketChecker() # Support for mechanism negotiation on the initial handshake. - # Maps credential to saslSupportedMechs. - self.negotiated_mechanisms = {} - self.auth_ctx = {} + self.negotiated_mechs = None + self.auth_ctx = None # The pool's generation changes with each reset() so we can close # sockets created before the last reset. @@ -567,11 +555,10 @@ def hello_cmd(self): else: return SON([(HelloCompat.LEGACY_CMD, 1), ('helloOk', True)]) - def hello(self, all_credentials=None): - return self._hello(None, None, None, all_credentials) + def hello(self): + return self._hello(None, None, None) - def _hello(self, cluster_time, topology_version, - heartbeat_frequency, all_credentials): + def _hello(self, cluster_time, topology_version, heartbeat_frequency): cmd = self.hello_cmd() performing_handshake = not self.performed_handshake awaitable = False @@ -594,14 +581,15 @@ def _hello(self, cluster_time, topology_version, if not performing_handshake and cluster_time is not None: cmd['$clusterTime'] = cluster_time - # XXX: Simplify in PyMongo 4.0 when all_credentials is always a single - # unchangeable value per MongoClient. - creds = _negotiate_creds(all_credentials) + creds = self.opts._credentials if creds: - cmd['saslSupportedMechs'] = creds.source + '.' + creds.username - auth_ctx = _speculative_context(all_credentials) - if auth_ctx: - cmd['speculativeAuthenticate'] = auth_ctx.speculate_command() + if creds.mechanism == 'DEFAULT' and creds.username: + cmd['saslSupportedMechs'] = creds.source + '.' + creds.username + auth_ctx = auth._AuthContext.from_credentials(creds) + if auth_ctx: + cmd['speculativeAuthenticate'] = auth_ctx.speculate_command() + else: + auth_ctx = None doc = self.command('admin', cmd, publish_events=False, exhaust_allowed=awaitable) @@ -628,11 +616,11 @@ def _hello(self, cluster_time, topology_version, self.op_msg_enabled = True if creds: - self.negotiated_mechanisms[creds] = hello.sasl_supported_mechs + self.negotiated_mechs = hello.sasl_supported_mechs if auth_ctx: auth_ctx.parse_response(hello) if auth_ctx.speculate_succeeded(): - self.auth_ctx[auth_ctx.credentials] = auth_ctx + self.auth_ctx = auth_ctx if self.opts.load_balanced: if not hello.service_id: raise ConfigurationError( @@ -799,41 +787,21 @@ def write_command(self, request_id, msg): helpers._check_command_response(result, self.max_wire_version) return result - def check_auth(self, all_credentials): - """Update this socket's authentication. + def authenticate(self): + """Authenticate to the server if needed. - Log in or out to bring this socket's credentials up to date with - those provided. Can raise ConnectionFailure or OperationFailure. - - :Parameters: - - `all_credentials`: dict, maps auth source to MongoCredential. + Can raise ConnectionFailure or OperationFailure. """ - if all_credentials: - for credentials in all_credentials.values(): - if credentials not in self.authset: - self.authenticate(credentials) - # CMAP spec says to publish the ready event only after authenticating # the connection. if not self.ready: + creds = self.opts._credentials + if creds: + auth.authenticate(creds, self) self.ready = True if self.enabled_for_cmap: self.listeners.publish_connection_ready(self.address, self.id) - def authenticate(self, credentials): - """Log in to the server and store these credentials in `authset`. - - Can raise ConnectionFailure or OperationFailure. - - :Parameters: - - `credentials`: A MongoCredential. - """ - auth.authenticate(credentials, self) - self.authset.add(credentials) - # negotiated_mechanisms are no longer needed. - self.negotiated_mechanisms.pop(credentials, None) - self.auth_ctx.pop(credentials, None) - def validate_session(self, client, session): """Validate this session before use with client. @@ -1245,7 +1213,7 @@ def close(self): def stale_generation(self, gen, service_id): return self.gen.stale(gen, service_id) - def remove_stale_sockets(self, reference_generation, all_credentials): + def remove_stale_sockets(self, reference_generation): """Removes stale sockets then adds new ones if pool is too small and has not been reset. The `reference_generation` argument specifies the `generation` at the point in time this operation was requested on the @@ -1281,7 +1249,7 @@ def remove_stale_sockets(self, reference_generation, all_credentials): return self._pending += 1 incremented = True - sock_info = self.connect(all_credentials) + sock_info = self.connect() with self.lock: # Close connection and return if the pool was reset during # socket creation or while acquiring the pool lock. @@ -1300,7 +1268,7 @@ def remove_stale_sockets(self, reference_generation, all_credentials): self.requests -= 1 self.size_cond.notify() - def connect(self, all_credentials=None): + def connect(self): """Connect to Mongo and return a new SocketInfo. Can raise ConnectionFailure. @@ -1331,10 +1299,10 @@ def connect(self, all_credentials=None): sock_info = SocketInfo(sock, self, self.address, conn_id) try: if self.handshake: - sock_info.hello(all_credentials) + sock_info.hello() self.is_writable = sock_info.is_writable - sock_info.check_auth(all_credentials) + sock_info.authenticate() except BaseException: sock_info.close_socket(ConnectionClosedReason.ERROR) raise @@ -1342,7 +1310,7 @@ def connect(self, all_credentials=None): return sock_info @contextlib.contextmanager - def get_socket(self, all_credentials, handler=None): + def get_socket(self, handler=None): """Get a socket from the pool. Use with a "with" statement. Returns a :class:`SocketInfo` object wrapping a connected @@ -1350,25 +1318,20 @@ def get_socket(self, all_credentials, handler=None): This method should always be used in a with-statement:: - with pool.get_socket(credentials) as socket_info: + with pool.get_socket() as socket_info: socket_info.send_message(msg) data = socket_info.receive_message(op_code, request_id) - The socket is logged in or out as needed to match ``all_credentials`` - using the correct authentication mechanism for the server's wire - protocol version. - Can raise ConnectionFailure or OperationFailure. :Parameters: - - `all_credentials`: dict, maps auth source to MongoCredential. - `handler` (optional): A _MongoClientErrorHandler. """ listeners = self.opts._event_listeners if self.enabled_for_cmap: listeners.publish_connection_check_out_started(self.address) - sock_info = self._get_socket(all_credentials) + sock_info = self._get_socket() if self.enabled_for_cmap: listeners.publish_connection_checked_out( self.address, sock_info.id) @@ -1407,7 +1370,7 @@ def _raise_if_not_ready(self, emit_event): _raise_connection_failure( self.address, AutoReconnect('connection pool paused')) - def _get_socket(self, all_credentials): + def _get_socket(self): """Get or create a SocketInfo. Can raise ConnectionFailure.""" # We use the pid here to avoid issues with fork / multiprocessing. # See test.test_client:TestClient.test_fork for an example of @@ -1480,12 +1443,11 @@ def _get_socket(self, all_credentials): continue else: # We need to create a new connection try: - sock_info = self.connect(all_credentials) + sock_info = self.connect() finally: with self._max_connecting_cond: self._pending -= 1 self._max_connecting_cond.notify() - sock_info.check_auth(all_credentials) except BaseException: if sock_info: # We checked out a socket but authentication failed. diff --git a/pymongo/server.py b/pymongo/server.py index 0a487e8c41..2a0a7267b7 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -77,9 +77,9 @@ def run_operation(self, sock_info, operation, set_secondary_okay, listeners, Can raise ConnectionFailure, OperationFailure, etc. :Parameters: + - `sock_info` - A SocketInfo instance. - `operation`: A _Query or _GetMore object. - `set_secondary_okay`: Pass to operation.get_message. - - `all_credentials`: dict, maps auth source to MongoCredential. - `listeners`: Instance of _EventListeners or None. - `unpack_res`: A callable that decodes the wire protocol response. """ @@ -200,8 +200,8 @@ def run_operation(self, sock_info, operation, set_secondary_okay, listeners, return response - def get_socket(self, all_credentials, handler=None): - return self.pool.get_socket(all_credentials, handler) + def get_socket(self, handler=None): + return self.pool.get_socket(handler) @property def description(self): diff --git a/pymongo/topology.py b/pymongo/topology.py index 6f26cff617..021a1dee60 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -444,7 +444,7 @@ def data_bearing_servers(self): return self._description.known_servers return self._description.readable_servers - def update_pool(self, all_credentials): + def update_pool(self): # Remove any stale sockets and add new sockets if pool is too small. servers = [] with self._lock: @@ -456,7 +456,7 @@ def update_pool(self, all_credentials): for server, generation in servers: try: - server.pool.remove_stale_sockets(generation, all_credentials) + server.pool.remove_stale_sockets(generation) except PyMongoError as exc: ctx = _ErrorContext(exc, 0, generation, False, None) self.handle_error(server.description.address, ctx) diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index 8b1ece8ad6..1494fbedcc 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -40,7 +40,7 @@ def __init__(self, client, pair, *args, **kwargs): Pool.__init__(self, (client_context.host, client_context.port), *args, **kwargs) @contextlib.contextmanager - def get_socket(self, all_credentials, handler=None): + def get_socket(self, handler=None): client = self.client host_and_port = '%s:%s' % (self.mock_host, self.mock_port) if host_and_port in client.mock_down_hosts: @@ -51,7 +51,7 @@ def get_socket(self, all_credentials, handler=None): + client.mock_members + client.mock_mongoses), "bad host: %s" % host_and_port - with Pool.get_socket(self, all_credentials, handler) as sock_info: + with Pool.get_socket(self, handler) as sock_info: sock_info.mock_host = self.mock_host sock_info.mock_port = self.mock_port yield sock_info diff --git a/test/test_auth.py b/test/test_auth.py index d0724dce72..35f198574b 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -30,6 +30,7 @@ from pymongo.saslprep import HAVE_STRINGPREP from test import client_context, IntegrationTest, SkipTest, unittest, Version from test.utils import (delay, + get_pool, ignore_deprecations, single_client, rs_or_single_client, @@ -521,10 +522,12 @@ def test_scram_saslprep(self): def test_cache(self): client = single_client() + credentials = client.options.pool_options._credentials + cache = credentials.cache + self.assertIsNotNone(cache) + self.assertIsNone(cache.data) # Force authentication. client.admin.command('ping') - all_credentials = client._MongoClient__all_credentials - credentials = all_credentials.get('admin') cache = credentials.cache self.assertIsNotNone(cache) data = cache.data @@ -536,19 +539,6 @@ def test_cache(self): self.assertIsInstance(salt, bytes) self.assertIsInstance(iterations, int) - pool = next(iter(client._topology._servers.values()))._pool - with pool.get_socket(all_credentials) as sock_info: - authset = sock_info.authset - cached = set(all_credentials.values()) - self.assertEqual(len(cached), 1) - self.assertFalse(authset - cached) - self.assertFalse(cached - authset) - - sock_credentials = next(iter(authset)) - sock_cache = sock_credentials.cache - self.assertIsNotNone(sock_cache) - self.assertEqual(sock_cache.data, data) - def test_scram_threaded(self): coll = client_context.client.db.test diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index 8bf0dcb21c..e78b4b209a 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -44,7 +44,7 @@ def run_test(self): self.assertRaises(Exception, MongoClient, uri, connect=False) else: client = MongoClient(uri, connect=False) - credentials = client._MongoClient__options._credentials + credentials = client.options.pool_options._credentials if credential is None: self.assertIsNone(credentials) else: diff --git a/test/test_client.py b/test/test_client.py index 8c89a45481..8db1cb5621 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -498,7 +498,7 @@ def test_max_idle_time_reaper_default(self): client = rs_or_single_client() server = client._get_topology().select_server( readable_server_selector) - with server._pool.get_socket({}) as sock_info: + with server._pool.get_socket() as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) self.assertTrue(sock_info in server._pool.sockets) @@ -511,7 +511,7 @@ def test_max_idle_time_reaper_removes_stale_minPoolSize(self): minPoolSize=1) server = client._get_topology().select_server( readable_server_selector) - with server._pool.get_socket({}) as sock_info: + with server._pool.get_socket() as sock_info: pass # When the reaper runs at the same time as the get_socket, two # sockets could be created and checked into the pool. @@ -530,7 +530,7 @@ def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): maxPoolSize=1) server = client._get_topology().select_server( readable_server_selector) - with server._pool.get_socket({}) as sock_info: + with server._pool.get_socket() as sock_info: pass # When the reaper runs at the same time as the get_socket, # maxPoolSize=1 should prevent two sockets from being created. @@ -547,11 +547,11 @@ def test_max_idle_time_reaper_removes_stale(self): client = rs_or_single_client(maxIdleTimeMS=500) server = client._get_topology().select_server( readable_server_selector) - with server._pool.get_socket({}) as sock_info_one: + with server._pool.get_socket() as sock_info_one: pass # Assert that the pool does not close sockets prematurely. time.sleep(.300) - with server._pool.get_socket({}) as sock_info_two: + with server._pool.get_socket() as sock_info_two: pass self.assertIs(sock_info_one, sock_info_two) wait_until( @@ -574,7 +574,7 @@ def test_min_pool_size(self): "pool initialized with 10 sockets") # Assert that if a socket is closed, a new one takes its place - with server._pool.get_socket({}) as sock_info: + with server._pool.get_socket() as sock_info: sock_info.close_socket(None) wait_until(lambda: 10 == len(server._pool.sockets), "a closed socket gets replaced from the pool") @@ -586,12 +586,12 @@ def test_max_idle_time_checkout(self): client = rs_or_single_client(maxIdleTimeMS=500) server = client._get_topology().select_server( readable_server_selector) - with server._pool.get_socket({}) as sock_info: + with server._pool.get_socket() as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) time.sleep(1) # Sleep so that the socket becomes stale. - with server._pool.get_socket({}) as new_sock_info: + with server._pool.get_socket() as new_sock_info: self.assertNotEqual(sock_info, new_sock_info) self.assertEqual(1, len(server._pool.sockets)) self.assertFalse(sock_info in server._pool.sockets) @@ -601,11 +601,11 @@ def test_max_idle_time_checkout(self): client = rs_or_single_client() server = client._get_topology().select_server( readable_server_selector) - with server._pool.get_socket({}) as sock_info: + with server._pool.get_socket() as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) time.sleep(1) - with server._pool.get_socket({}) as new_sock_info: + with server._pool.get_socket() as new_sock_info: self.assertEqual(sock_info, new_sock_info) self.assertEqual(1, len(server._pool.sockets)) @@ -1106,7 +1106,7 @@ def test_waitQueueTimeoutMS(self): def test_socketKeepAlive(self): pool = get_pool(self.client) - with pool.get_socket({}) as sock_info: + with pool.get_socket() as sock_info: keepalive = sock_info.sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) self.assertTrue(keepalive) @@ -1325,8 +1325,8 @@ def test_auth_network_error(self): socket_info = one(pool.sockets) socket_info.sock.close() - # SocketInfo.check_auth logs in with the new credential, but gets a - # socket.error. Should be reraised as AutoReconnect. + # SocketInfo.authenticate logs, but gets a socket.error. Should be + # reraised as AutoReconnect. self.assertRaises(AutoReconnect, c.test.collection.find_one) # No semaphore leak, the pool is allowed to make a new socket. @@ -1521,8 +1521,7 @@ def run(self): try: while True: for _ in range(10): - client._topology.update_pool( - client._MongoClient__all_credentials) + client._topology.update_pool() if generation != pool.gen.get_overall(): break finally: diff --git a/test/test_cmap.py b/test/test_cmap.py index d08cc24a59..20ed7f31ec 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -120,7 +120,7 @@ def wait_for_event(self, op): def check_out(self, op): """Run the 'checkOut' operation.""" label = op['label'] - with self.pool.get_socket({}) as sock_info: + with self.pool.get_socket() as sock_info: # Call 'pin_cursor' so we can hold the socket. sock_info.pin_cursor() if label: @@ -452,7 +452,7 @@ def test_close_leaves_pool_unpaused(self): self.assertEqual(1, listener.event_count(PoolClearedEvent)) self.assertEqual(PoolState.READY, pool.state) # Checking out a connection should succeed - with pool.get_socket({}): + with pool.get_socket(): pass diff --git a/test/test_pooling.py b/test/test_pooling.py index b8f3cf1908..4f0ac3584f 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -118,7 +118,7 @@ def run_mongo_thread(self): self.state = 'get_socket' # Call 'pin_cursor' so we can hold the socket. - with self.pool.get_socket({}) as sock: + with self.pool.get_socket() as sock: sock.pin_cursor() self.sock = sock @@ -196,10 +196,10 @@ def test_pool_reuses_open_socket(self): # Test Pool's _check_closed() method doesn't close a healthy socket. cx_pool = self.create_pool(max_pool_size=10) cx_pool._check_interval_seconds = 0 # Always check. - with cx_pool.get_socket({}) as sock_info: + with cx_pool.get_socket() as sock_info: pass - with cx_pool.get_socket({}) as new_sock_info: + with cx_pool.get_socket() as new_sock_info: self.assertEqual(sock_info, new_sock_info) self.assertEqual(1, len(cx_pool.sockets)) @@ -208,11 +208,11 @@ def test_get_socket_and_exception(self): # get_socket() returns socket after a non-network error. cx_pool = self.create_pool(max_pool_size=1, wait_queue_timeout=1) with self.assertRaises(ZeroDivisionError): - with cx_pool.get_socket({}) as sock_info: + with cx_pool.get_socket() as sock_info: 1 / 0 # Socket was returned, not closed. - with cx_pool.get_socket({}) as new_sock_info: + with cx_pool.get_socket() as new_sock_info: self.assertEqual(sock_info, new_sock_info) self.assertEqual(1, len(cx_pool.sockets)) @@ -221,7 +221,7 @@ def test_pool_removes_closed_socket(self): # Test that Pool removes explicitly closed socket. cx_pool = self.create_pool() - with cx_pool.get_socket({}) as sock_info: + with cx_pool.get_socket() as sock_info: # Use SocketInfo's API to close the socket. sock_info.close_socket(None) @@ -233,20 +233,20 @@ def test_pool_removes_dead_socket(self): cx_pool = self.create_pool(max_pool_size=1, wait_queue_timeout=1) cx_pool._check_interval_seconds = 0 # Always check. - with cx_pool.get_socket({}) as sock_info: + with cx_pool.get_socket() as sock_info: # Simulate a closed socket without telling the SocketInfo it's # closed. sock_info.sock.close() self.assertTrue(sock_info.socket_closed()) - with cx_pool.get_socket({}) as new_sock_info: + with cx_pool.get_socket() as new_sock_info: self.assertEqual(0, len(cx_pool.sockets)) self.assertNotEqual(sock_info, new_sock_info) self.assertEqual(1, len(cx_pool.sockets)) # Semaphore was released. - with cx_pool.get_socket({}): + with cx_pool.get_socket(): pass def test_socket_closed(self): @@ -290,7 +290,7 @@ def test_socket_checker(self): def test_return_socket_after_reset(self): pool = self.create_pool() - with pool.get_socket({}) as sock: + with pool.get_socket() as sock: self.assertEqual(pool.active_sockets, 1) self.assertEqual(pool.operation_count, 1) pool.reset() @@ -309,7 +309,7 @@ def test_pool_check(self): cx_pool._check_interval_seconds = 0 # Always check. self.addCleanup(cx_pool.close) - with cx_pool.get_socket({}) as sock_info: + with cx_pool.get_socket() as sock_info: # Simulate a closed socket without telling the SocketInfo it's # closed. sock_info.sock.close() @@ -317,12 +317,12 @@ def test_pool_check(self): # Swap pool's address with a bad one. address, cx_pool.address = cx_pool.address, ('foo.com', 1234) with self.assertRaises(AutoReconnect): - with cx_pool.get_socket({}): + with cx_pool.get_socket(): pass # Back to normal, semaphore was correctly released. cx_pool.address = address - with cx_pool.get_socket({}): + with cx_pool.get_socket(): pass def test_wait_queue_timeout(self): @@ -331,10 +331,10 @@ def test_wait_queue_timeout(self): max_pool_size=1, wait_queue_timeout=wait_queue_timeout) self.addCleanup(pool.close) - with pool.get_socket({}) as sock_info: + with pool.get_socket() as sock_info: start = time.time() with self.assertRaises(ConnectionFailure): - with pool.get_socket({}): + with pool.get_socket(): pass duration = time.time() - start @@ -349,7 +349,7 @@ def test_no_wait_queue_timeout(self): self.addCleanup(pool.close) # Reach max_size. - with pool.get_socket({}) as s1: + with pool.get_socket() as s1: t = SocketGetter(self.c, pool) t.start() while t.state != 'get_socket': @@ -370,7 +370,7 @@ def test_checkout_more_than_max_pool_size(self): socks = [] for _ in range(2): # Call 'pin_cursor' so we can hold the socket. - with pool.get_socket({}) as sock: + with pool.get_socket() as sock: sock.pin_cursor() socks.append(sock) @@ -515,7 +515,7 @@ def test_max_pool_size_with_connection_failure(self): # socket from pool" instead of AutoReconnect. for i in range(2): with self.assertRaises(AutoReconnect) as context: - with test_pool.get_socket({}): + with test_pool.get_socket(): pass # Testing for AutoReconnect instead of ConnectionFailure, above, diff --git a/test/utils.py b/test/utils.py index bdea5c69c2..efc6e24879 100644 --- a/test/utils.py +++ b/test/utils.py @@ -269,7 +269,7 @@ def __init__(self, address, options, handshake=True): def stale_generation(self, gen, service_id): return self.gen.stale(gen, service_id) - def get_socket(self, all_credentials, handler=None): + def get_socket(self, handler=None): return MockSocketInfo() def return_socket(self, *args, **kwargs): From 797197e73bd18fc7c4076408e68aa745f8070c49 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 10 Dec 2021 10:22:49 -0800 Subject: [PATCH 0038/1588] PYTHON-2763 Remove outdated check_keys and $clusterTime logic (#817) --- pymongo/_cmessagemodule.c | 105 ++++++-------------------------------- pymongo/collection.py | 20 +++----- pymongo/encryption.py | 10 +--- pymongo/message.py | 90 ++++++++++++++------------------ pymongo/network.py | 12 ++--- pymongo/pool.py | 5 +- 6 files changed, 68 insertions(+), 174 deletions(-) diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index 845c14bd54..517c0fb798 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -67,7 +67,6 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { struct module_state *state = GETSTATE(self); int request_id = rand(); - PyObject* cluster_time = NULL; unsigned int flags; char* collection_name = NULL; Py_ssize_t collection_name_length; @@ -79,18 +78,16 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { codec_options_t options; buffer_t buffer = NULL; int length_location, message_length; - unsigned char check_keys = 0; PyObject* result = NULL; - if (!PyArg_ParseTuple(args, "Iet#iiOOO&|b", + if (!PyArg_ParseTuple(args, "Iet#iiOOO&", &flags, "utf-8", &collection_name, &collection_name_length, &num_to_skip, &num_to_return, &query, &field_selector, - convert_codec_options, &options, - &check_keys)) { + convert_codec_options, &options)) { return NULL; } buffer = buffer_new(); @@ -104,29 +101,6 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { goto fail; } - /* Pop $clusterTime from dict and write it at the end, avoiding an error - * from the $-prefix and check_keys. - * - * If "dict" is a defaultdict we don't want to call PyMapping_GetItemString - * on it. That would **create** an _id where one didn't previously exist - * (PYTHON-871). - */ - if (PyDict_Check(query)) { - cluster_time = PyDict_GetItemString(query, "$clusterTime"); - if (cluster_time) { - /* PyDict_GetItemString returns a borrowed reference. */ - Py_INCREF(cluster_time); - if (-1 == PyMapping_DelItemString(query, "$clusterTime")) { - goto fail; - } - } - } else if (PyMapping_HasKeyString(query, "$clusterTime")) { - cluster_time = PyMapping_GetItemString(query, "$clusterTime"); - if (!cluster_time - || -1 == PyMapping_DelItemString(query, "$clusterTime")) { - goto fail; - } - } if (!buffer_write_int32(buffer, (int32_t)request_id) || !buffer_write_bytes(buffer, "\x00\x00\x00\x00\xd4\x07\x00\x00", 8) || !buffer_write_int32(buffer, (int32_t)flags) || @@ -138,37 +112,10 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { } begin = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, query, check_keys, &options, 1)) { + if (!write_dict(state->_cbson, buffer, query, 0, &options, 1)) { goto fail; } - /* back up a byte and write $clusterTime */ - if (cluster_time) { - int length; - char zero = 0; - - buffer_update_position(buffer, buffer_get_position(buffer) - 1); - if (!write_pair(state->_cbson, buffer, "$clusterTime", 12, cluster_time, - 0, &options, 1)) { - goto fail; - } - - if (!buffer_write_bytes(buffer, &zero, 1)) { - goto fail; - } - - length = buffer_get_position(buffer) - begin; - buffer_write_int32_at_position(buffer, begin, (int32_t)length); - - /* undo popping $clusterTime */ - if (-1 == PyMapping_SetItemString( - query, "$clusterTime", cluster_time)) { - goto fail; - } - - Py_CLEAR(cluster_time); - } - max_size = buffer_get_position(buffer) - begin; if (field_selector != Py_None) { @@ -196,7 +143,6 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { if (buffer) { buffer_free(buffer); } - Py_XDECREF(cluster_time); return result; } @@ -274,7 +220,6 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { Py_ssize_t identifier_length = 0; PyObject* docs; PyObject* doc; - unsigned char check_keys = 0; codec_options_t options; buffer_t buffer = NULL; int length_location, message_length; @@ -283,15 +228,14 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { PyObject* result = NULL; PyObject* iterator = NULL; - /*flags, command, identifier, docs, check_keys, opts*/ - if (!PyArg_ParseTuple(args, "IOet#ObO&", + /*flags, command, identifier, docs, opts*/ + if (!PyArg_ParseTuple(args, "IOet#OO&", &flags, &command, "utf-8", &identifier, &identifier_length, &docs, - &check_keys, convert_codec_options, &options)) { return NULL; } @@ -340,8 +284,7 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { } while ((doc = PyIter_Next(iterator)) != NULL) { int encoded_doc_size = write_dict( - state->_cbson, buffer, doc, check_keys, - &options, 1); + state->_cbson, buffer, doc, 0, &options, 1); if (!encoded_doc_size) { Py_CLEAR(doc); goto fail; @@ -400,7 +343,7 @@ _set_document_too_large(int size, long max) { static int _batched_op_msg( - unsigned char op, unsigned char check_keys, unsigned char ack, + unsigned char op, unsigned char ack, PyObject* command, PyObject* docs, PyObject* ctx, PyObject* to_publish, codec_options_t options, buffer_t buffer, struct module_state *state) { @@ -471,16 +414,12 @@ _batched_op_msg( } case _UPDATE: { - /* MongoDB does key validation for update. */ - check_keys = 0; if (!buffer_write_bytes(buffer, "updates\x00", 8)) goto fail; break; } case _DELETE: { - /* Never check keys in a delete command. */ - check_keys = 0; if (!buffer_write_bytes(buffer, "deletes\x00", 8)) goto fail; break; @@ -510,8 +449,7 @@ _batched_op_msg( int cur_size; int doc_too_large = 0; int unacked_doc_too_large = 0; - if (!write_dict(state->_cbson, buffer, doc, check_keys, - &options, 1)) { + if (!write_dict(state->_cbson, buffer, doc, 0, &options, 1)) { goto fail; } cur_size = buffer_get_position(buffer) - cur_doc_begin; @@ -584,7 +522,6 @@ _batched_op_msg( static PyObject* _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { unsigned char op; - unsigned char check_keys; unsigned char ack; PyObject* command; PyObject* docs; @@ -595,8 +532,8 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { buffer_t buffer; struct module_state *state = GETSTATE(self); - if (!PyArg_ParseTuple(args, "bOObbO&O", - &op, &command, &docs, &check_keys, &ack, + if (!PyArg_ParseTuple(args, "bOObO&O", + &op, &command, &docs, &ack, convert_codec_options, &options, &ctx)) { return NULL; @@ -611,7 +548,6 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { if (!_batched_op_msg( op, - check_keys, ack, command, docs, @@ -637,7 +573,6 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { static PyObject* _cbson_batched_op_msg(PyObject* self, PyObject* args) { unsigned char op; - unsigned char check_keys; unsigned char ack; int request_id; int position; @@ -650,8 +585,8 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { buffer_t buffer; struct module_state *state = GETSTATE(self); - if (!PyArg_ParseTuple(args, "bOObbO&O", - &op, &command, &docs, &check_keys, &ack, + if (!PyArg_ParseTuple(args, "bOObO&O", + &op, &command, &docs, &ack, convert_codec_options, &options, &ctx)) { return NULL; @@ -676,7 +611,6 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { if (!_batched_op_msg( op, - check_keys, ack, command, docs, @@ -707,7 +641,7 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { static int _batched_write_command( - char* ns, Py_ssize_t ns_len, unsigned char op, int check_keys, + char* ns, Py_ssize_t ns_len, unsigned char op, PyObject* command, PyObject* docs, PyObject* ctx, PyObject* to_publish, codec_options_t options, buffer_t buffer, struct module_state *state) { @@ -786,16 +720,12 @@ _batched_write_command( } case _UPDATE: { - /* MongoDB does key validation for update. */ - check_keys = 0; if (!buffer_write_bytes(buffer, "updates\x00", 8)) goto fail; break; } case _DELETE: { - /* Never check keys in a delete command. */ - check_keys = 0; if (!buffer_write_bytes(buffer, "deletes\x00", 8)) goto fail; break; @@ -838,8 +768,7 @@ _batched_write_command( goto fail; } cur_doc_begin = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, doc, - check_keys, &options, 1)) { + if (!write_dict(state->_cbson, buffer, doc, 0, &options, 1)) { goto fail; } @@ -915,7 +844,6 @@ static PyObject* _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { char *ns = NULL; unsigned char op; - unsigned char check_keys; Py_ssize_t ns_len; PyObject* command; PyObject* docs; @@ -926,8 +854,8 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { buffer_t buffer; struct module_state *state = GETSTATE(self); - if (!PyArg_ParseTuple(args, "et#bOObO&O", "utf-8", - &ns, &ns_len, &op, &command, &docs, &check_keys, + if (!PyArg_ParseTuple(args, "et#bOOO&O", "utf-8", + &ns, &ns_len, &op, &command, &docs, convert_codec_options, &options, &ctx)) { return NULL; @@ -945,7 +873,6 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { ns, ns_len, op, - check_keys, command, docs, ctx, diff --git a/pymongo/collection.py b/pymongo/collection.py index ea11875ce2..70c13c34f4 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -441,9 +441,7 @@ def bulk_write(self, requests, ordered=True, return BulkWriteResult({}, False) def _insert_one( - self, doc, ordered, - check_keys, write_concern, op_id, bypass_doc_val, - session): + self, doc, ordered, write_concern, op_id, bypass_doc_val, session): """Internal helper for inserting a single document.""" write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged @@ -462,7 +460,6 @@ def _insert_command(session, sock_info, retryable_write): command, write_concern=write_concern, codec_options=self.__write_response_codec_options, - check_keys=check_keys, session=session, client=self.__database.client, retryable_write=retryable_write) @@ -520,7 +517,7 @@ def insert_one(self, document, bypass_document_validation=False, write_concern = self._write_concern_for(session) return InsertOneResult( self._insert_one( - document, ordered=True, check_keys=False, + document, ordered=True, write_concern=write_concern, op_id=None, bypass_doc_val=bypass_document_validation, session=session), write_concern.acknowledged) @@ -588,8 +585,7 @@ def gen(): return InsertManyResult(inserted_ids, write_concern.acknowledged) def _update(self, sock_info, criteria, document, upsert=False, - check_keys=False, multi=False, - write_concern=None, op_id=None, ordered=True, + multi=False, write_concern=None, op_id=None, ordered=True, bypass_doc_val=False, collation=None, array_filters=None, hint=None, session=None, retryable_write=False, let=None): """Internal update / replace helper.""" @@ -660,16 +656,14 @@ def _update(self, sock_info, criteria, document, upsert=False, return result def _update_retryable( - self, criteria, document, upsert=False, - check_keys=False, multi=False, + self, criteria, document, upsert=False, multi=False, write_concern=None, op_id=None, ordered=True, bypass_doc_val=False, collation=None, array_filters=None, hint=None, session=None, let=None): """Internal update / replace helper.""" def _update(session, sock_info, retryable_write): return self._update( - sock_info, criteria, document, upsert=upsert, - check_keys=check_keys, multi=multi, + sock_info, criteria, document, upsert=upsert, multi=multi, write_concern=write_concern, op_id=op_id, ordered=ordered, bypass_doc_val=bypass_doc_val, collation=collation, array_filters=array_filters, hint=hint, session=session, @@ -830,7 +824,7 @@ def update_one(self, filter, update, upsert=False, write_concern = self._write_concern_for(session) return UpdateResult( self._update_retryable( - filter, update, upsert, check_keys=False, + filter, update, upsert, write_concern=write_concern, bypass_doc_val=bypass_document_validation, collation=collation, array_filters=array_filters, @@ -910,7 +904,7 @@ def update_many(self, filter, update, upsert=False, array_filters=None, write_concern = self._write_concern_for(session) return UpdateResult( self._update_retryable( - filter, update, upsert, check_keys=False, multi=True, + filter, update, upsert, multi=True, write_concern=write_concern, bypass_doc_val=bypass_document_validation, collation=collation, array_filters=array_filters, diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 064ba48d51..4b08492ee9 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -301,30 +301,24 @@ def _get_internal_client(encrypter, mongo_client): opts._kms_providers, schema_map)) self._closed = False - def encrypt(self, database, cmd, check_keys, codec_options): + def encrypt(self, database, cmd, codec_options): """Encrypt a MongoDB command. :Parameters: - `database`: The database for this command. - `cmd`: A command document. - - `check_keys`: If True, check `cmd` for invalid keys. - `codec_options`: The CodecOptions to use while encoding `cmd`. :Returns: The encrypted command to execute. """ self._check_closed() - # Workaround for $clusterTime which is incompatible with - # check_keys. - cluster_time = check_keys and cmd.pop('$clusterTime', None) - encoded_cmd = _dict_to_bson(cmd, check_keys, codec_options) + encoded_cmd = _dict_to_bson(cmd, False, codec_options) with _wrap_encryption_errors(): encrypted_cmd = self._auto_encrypter.encrypt(database, encoded_cmd) # TODO: PYTHON-1922 avoid decoding the encrypted_cmd. encrypt_cmd = _inflate_bson( encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS) - if cluster_time: - encrypt_cmd['$clusterTime'] = cluster_time return encrypt_cmd def decrypt(self, response): diff --git a/pymongo/message.py b/pymongo/message.py index 86a83f152e..bccf0a9f51 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -331,7 +331,7 @@ def get_message(self, set_secondary_ok, sock_info, use_cmd=False): spec = self.as_command(sock_info)[0] request_id, msg, size, _ = _op_msg( 0, spec, self.db, self.read_preference, - set_secondary_ok, False, self.codec_options, + set_secondary_ok, self.codec_options, ctx=sock_info.compression_context) return request_id, msg, size @@ -430,7 +430,7 @@ def get_message(self, dummy0, sock_info, use_cmd=False): flags = 0 request_id, msg, size, _ = _op_msg( flags, spec, self.db, None, - False, False, self.codec_options, + False, self.codec_options, ctx=sock_info.compression_context) return request_id, msg, size @@ -526,7 +526,7 @@ def __pack_message(operation, data): _pack_byte = struct.Struct(" max_cmd_size diff --git a/pymongo/network.py b/pymongo/network.py index 7ec6540dd4..10d71308f6 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -41,7 +41,7 @@ def command(sock_info, dbname, spec, secondary_ok, is_mongos, read_preference, codec_options, session, client, check=True, allowable_errors=None, address=None, - check_keys=False, listeners=None, max_bson_size=None, + listeners=None, max_bson_size=None, read_concern=None, parse_write_concern_error=False, collation=None, @@ -65,7 +65,6 @@ def command(sock_info, dbname, spec, secondary_ok, is_mongos, - `check`: raise OperationFailure if there are errors - `allowable_errors`: errors to ignore if `check` is True - `address`: the (host, port) of `sock` - - `check_keys`: if True, check `spec` for invalid keys - `listeners`: An instance of :class:`~pymongo.monitoring.EventListeners` - `max_bson_size`: The maximum encoded bson size for this server - `read_concern`: The read concern for this command. @@ -107,16 +106,13 @@ def command(sock_info, dbname, spec, secondary_ok, is_mongos, if (client and client._encrypter and not client._encrypter._bypass_auto_encryption): - spec = orig = client._encrypter.encrypt( - dbname, spec, check_keys, codec_options) - # We already checked the keys, no need to do it again. - check_keys = False + spec = orig = client._encrypter.encrypt(dbname, spec, codec_options) if use_op_msg: flags = _OpMsg.MORE_TO_COME if unacknowledged else 0 flags |= _OpMsg.EXHAUST_ALLOWED if exhaust_allowed else 0 request_id, msg, size, max_doc_size = message._op_msg( - flags, spec, dbname, read_preference, secondary_ok, check_keys, + flags, spec, dbname, read_preference, secondary_ok, codec_options, ctx=compression_ctx) # If this is an unacknowledged write then make sure the encoded doc(s) # are small enough, otherwise rely on the server to return an error. @@ -125,7 +121,7 @@ def command(sock_info, dbname, spec, secondary_ok, is_mongos, message._raise_document_too_large(name, size, max_bson_size) else: request_id, msg, size = message._query( - flags, ns, 0, -1, spec, None, codec_options, check_keys, + flags, ns, 0, -1, spec, None, codec_options, compression_ctx) if (max_bson_size is not None diff --git a/pymongo/pool.py b/pymongo/pool.py index 6fe9d024d6..99e64d8b2b 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -644,7 +644,7 @@ def _next_reply(self): def command(self, dbname, spec, secondary_ok=False, read_preference=ReadPreference.PRIMARY, codec_options=DEFAULT_CODEC_OPTIONS, check=True, - allowable_errors=None, check_keys=False, + allowable_errors=None, read_concern=None, write_concern=None, parse_write_concern_error=False, @@ -665,7 +665,6 @@ def command(self, dbname, spec, secondary_ok=False, - `codec_options`: a CodecOptions instance - `check`: raise OperationFailure if there are errors - `allowable_errors`: errors to ignore if `check` is True - - `check_keys`: if True, check `spec` for invalid keys - `read_concern`: The read concern for this command. - `write_concern`: The write concern for this command. - `parse_write_concern_error`: Whether to parse the @@ -707,7 +706,7 @@ def command(self, dbname, spec, secondary_ok=False, return command(self, dbname, spec, secondary_ok, self.is_mongos, read_preference, codec_options, session, client, check, allowable_errors, - self.address, check_keys, listeners, + self.address, listeners, self.max_bson_size, read_concern, parse_write_concern_error=parse_write_concern_error, collation=collation, From a7891480d1799233451861550a5eab4265f251c1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 10 Dec 2021 13:34:18 -0600 Subject: [PATCH 0039/1588] PYTHON-2353 Update create_collection docs with more options (#820) --- pymongo/database.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/pymongo/database.py b/pymongo/database.py index c7ed38b73f..33ae4038c8 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -277,15 +277,30 @@ def create_collection(self, name, codec_options=None, as keyword arguments to this method. Valid options include, but are not limited to: - - ``size``: desired initial size for the collection (in + - ``size`` (int): desired initial size for the collection (in bytes). For capped collections this size is the max size of the collection. - - ``capped``: if True, this is a capped collection - - ``max``: maximum number of objects if capped (optional) - - ``timeseries``: a document specifying configuration options for + - ``capped`` (bool): if True, this is a capped collection + - ``max`` (int): maximum number of objects if capped (optional) + - ``timeseries`` (dict): a document specifying configuration options for timeseries collections - - ``expireAfterSeconds``: the number of seconds after which a + - ``expireAfterSeconds`` (int): the number of seconds after which a document in a timeseries collection expires + - ``validator`` (dict): a document specifying validation rules or expressions + for the collection + - ``validationLevel`` (str): how strictly to apply the + validation rules to existing documents during an update. The default level + is "strict" + - ``validationAction`` (str): whether to "error" on invalid documents + (the default) or just "warn" about the violations but allow invalid + documents to be inserted + - ``indexOptionDefaults`` (dict): a document specifying a default configuration + for indexes when creating a collection + - ``viewOn`` (str): the name of the source collection or view from which + to create the view + - ``pipeline`` (list): a list of aggregation pipeline stages + - ``comment`` (str): a user-provided comment to attach to this command. + This option is only supported on MongoDB >= 4.4. .. versionchanged:: 3.11 This method is now supported inside multi-document transactions From b2f3c66575efdfbb9d6c8aee14eb69ed40fbf649 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 13 Dec 2021 14:41:25 -0800 Subject: [PATCH 0040/1588] PYTHON-2888 Migrate from json.send to perf.send (#819) Rename ops_per_sec to bytes_per_sec to better reflect the perf measurement. --- .evergreen/perf.yml | 3 +-- test/performance/perf_test.py | 24 ++++++++++++++---------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/.evergreen/perf.yml b/.evergreen/perf.yml index 70e83ff582..8b3638d535 100644 --- a/.evergreen/perf.yml +++ b/.evergreen/perf.yml @@ -133,9 +133,8 @@ functions: file_location: src/report.json "send dashboard data": - - command: json.send + - command: perf.send params: - name: perf file: src/results.json "cleanup": diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index d84e67aca4..dab7138add 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -47,9 +47,7 @@ result_data = [] def tearDownModule(): - output = json.dumps({ - 'results': result_data - }, indent=4) + output = json.dumps(result_data, indent=4) if OUTPUT_FILE: with open(OUTPUT_FILE, 'w') as opf: opf.write(output) @@ -79,16 +77,22 @@ def setUp(self): def tearDown(self): name = self.__class__.__name__ median = self.percentile(50) - result = self.data_size / median + bytes_per_sec = self.data_size / median print('Running %s. MEDIAN=%s' % (self.__class__.__name__, self.percentile(50))) result_data.append({ - 'name': name, - 'results': { - '1': { - 'ops_per_sec': result - } - } + 'info': { + 'test_name': name, + 'args': { + 'threads': 1, + }, + }, + 'metrics': [ + { + 'name': 'bytes_per_sec', + 'value': bytes_per_sec + }, + ] }) def before(self): From ff3a8b44dcc9b5fc972d859a0a1ba249cb579aaa Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 13 Dec 2021 15:47:34 -0800 Subject: [PATCH 0041/1588] PYTHON-1864 PYTHON-2931 Spec complaint $readPreference (#809) Stop sending $readPreference to standalone servers. Stop sending $readPreference primary because it's the server default. Remove outdated secondary_ok flag. --- pymongo/aggregation.py | 13 ++--- pymongo/change_stream.py | 2 +- pymongo/collection.py | 35 ++++++------- pymongo/database.py | 20 ++++---- pymongo/message.py | 22 +++----- pymongo/mongo_client.py | 50 ++++++++----------- pymongo/network.py | 9 ++-- pymongo/pool.py | 9 ++-- pymongo/server.py | 4 +- .../aggregate-write-readPreference.json | 4 +- .../db-aggregate-write-readPreference.json | 4 +- .../mockupdb/test_mongos_command_read_mode.py | 7 ++- test/mockupdb/test_op_msg_read_preference.py | 23 ++++++--- test/mockupdb/test_query_read_pref_sharded.py | 13 ++--- test/test_cursor.py | 10 +++- test/test_read_preferences.py | 19 ++++--- 16 files changed, 123 insertions(+), 121 deletions(-) diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index a5a7abaed7..8fb0225eb3 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -92,11 +92,6 @@ def _database(self): """The database against which the aggregation command is run.""" raise NotImplementedError - def _process_result(self, result, session, server, sock_info, secondary_ok): - if self._result_processor: - self._result_processor( - result, session, server, sock_info, secondary_ok) - def get_read_preference(self, session): if self._write_preference: return self._write_preference @@ -105,7 +100,7 @@ def get_read_preference(self, session): self._write_preference = pref = _AggWritePref(pref) return pref - def get_cursor(self, session, server, sock_info, secondary_ok): + def get_cursor(self, session, server, sock_info, read_preference): # Serialize command. cmd = SON([("aggregate", self._aggregation_target), ("pipeline", self._pipeline)]) @@ -134,8 +129,7 @@ def get_cursor(self, session, server, sock_info, secondary_ok): result = sock_info.command( self._database.name, cmd, - secondary_ok, - self.get_read_preference(session), + read_preference, self._target.codec_options, parse_write_concern_error=True, read_concern=read_concern, @@ -145,7 +139,8 @@ def get_cursor(self, session, server, sock_info, secondary_ok): client=self._database.client, user_fields=self._user_fields) - self._process_result(result, session, server, sock_info, secondary_ok) + if self._result_processor: + self._result_processor(result, sock_info) # Extract cursor from result or mock/fake one if necessary. if 'cursor' in result: diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 00d049a838..54bf98d83e 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -148,7 +148,7 @@ def _aggregation_pipeline(self): full_pipeline.extend(self._pipeline) return full_pipeline - def _process_result(self, result, session, server, sock_info, secondary_ok): + def _process_result(self, result, sock_info): """Callback that caches the postBatchResumeToken or startAtOperationTime from a changeStream aggregate command response containing an empty batch of change documents. diff --git a/pymongo/collection.py b/pymongo/collection.py index 70c13c34f4..82e29f4061 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -186,7 +186,7 @@ def _socket_for_reads(self, session): def _socket_for_writes(self, session): return self.__database.client._socket_for_writes(session) - def _command(self, sock_info, command, secondary_ok=False, + def _command(self, sock_info, command, read_preference=None, codec_options=None, check=True, allowable_errors=None, read_concern=None, @@ -200,7 +200,6 @@ def _command(self, sock_info, command, secondary_ok=False, :Parameters: - `sock_info` - A SocketInfo instance. - `command` - The command itself, as a SON instance. - - `secondary_ok`: whether to set the secondaryOkay wire protocol bit. - `codec_options` (optional) - An instance of :class:`~bson.codec_options.CodecOptions`. - `check`: raise OperationFailure if there are errors @@ -226,7 +225,6 @@ def _command(self, sock_info, command, secondary_ok=False, return sock_info.command( self.__database.name, command, - secondary_ok, read_preference or self._read_preference_for(session), codec_options or self.codec_options, check, @@ -1356,14 +1354,14 @@ def find_raw_batches(self, *args, **kwargs): return RawBatchCursor(self, *args, **kwargs) - def _count_cmd(self, session, sock_info, secondary_ok, cmd, collation): + def _count_cmd(self, session, sock_info, read_preference, cmd, collation): """Internal count command helper.""" # XXX: "ns missing" checks can be removed when we drop support for # MongoDB 3.0, see SERVER-17051. res = self._command( sock_info, cmd, - secondary_ok, + read_preference=read_preference, allowable_errors=["ns missing"], codec_options=self.__write_response_codec_options, read_concern=self.read_concern, @@ -1374,12 +1372,12 @@ def _count_cmd(self, session, sock_info, secondary_ok, cmd, collation): return int(res["n"]) def _aggregate_one_result( - self, sock_info, secondary_ok, cmd, collation, session): + self, sock_info, read_preference, cmd, collation, session): """Internal helper to run an aggregate that returns a single result.""" result = self._command( sock_info, cmd, - secondary_ok, + read_preference, allowable_errors=[26], # Ignore NamespaceNotFound. codec_options=self.__write_response_codec_options, read_concern=self.read_concern, @@ -1413,7 +1411,7 @@ def estimated_document_count(self, **kwargs): raise ConfigurationError( 'estimated_document_count does not support sessions') - def _cmd(session, server, sock_info, secondary_ok): + def _cmd(session, server, sock_info, read_preference): if sock_info.max_wire_version >= 12: # MongoDB 4.9+ pipeline = [ @@ -1425,7 +1423,8 @@ def _cmd(session, server, sock_info, secondary_ok): ('cursor', {})]) cmd.update(kwargs) result = self._aggregate_one_result( - sock_info, secondary_ok, cmd, collation=None, session=session) + sock_info, read_preference, cmd, collation=None, + session=session) if not result: return 0 return int(result['n']) @@ -1433,7 +1432,8 @@ def _cmd(session, server, sock_info, secondary_ok): # MongoDB < 4.9 cmd = SON([('count', self.__name)]) cmd.update(kwargs) - return self._count_cmd(None, sock_info, secondary_ok, cmd, None) + return self._count_cmd( + None, sock_info, read_preference, cmd, collation=None) return self.__database.client._retryable_read( _cmd, self.read_preference, None) @@ -1506,9 +1506,9 @@ def count_documents(self, filter, session=None, **kwargs): collation = validate_collation_or_none(kwargs.pop('collation', None)) cmd.update(kwargs) - def _cmd(session, server, sock_info, secondary_ok): + def _cmd(session, server, sock_info, read_preference): result = self._aggregate_one_result( - sock_info, secondary_ok, cmd, collation, session) + sock_info, read_preference, cmd, collation, session) if not result: return 0 return result['n'] @@ -1799,12 +1799,12 @@ def list_indexes(self, session=None): read_pref = ((session and session._txn_read_preference()) or ReadPreference.PRIMARY) - def _cmd(session, server, sock_info, secondary_ok): + def _cmd(session, server, sock_info, read_preference): cmd = SON([("listIndexes", self.__name), ("cursor", {})]) with self.__database.client._tmp_session(session, False) as s: try: - cursor = self._command(sock_info, cmd, secondary_ok, - read_pref, + cursor = self._command(sock_info, cmd, + read_preference, codec_options, session=s)["cursor"] except OperationFailure as exc: @@ -2220,9 +2220,10 @@ def distinct(self, key, filter=None, session=None, **kwargs): kwargs["query"] = filter collation = validate_collation_or_none(kwargs.pop('collation', None)) cmd.update(kwargs) - def _cmd(session, server, sock_info, secondary_ok): + def _cmd(session, server, sock_info, read_preference): return self._command( - sock_info, cmd, secondary_ok, read_concern=self.read_concern, + sock_info, cmd, read_preference=read_preference, + read_concern=self.read_concern, collation=collation, session=session, user_fields={"values": 1})["values"] diff --git a/pymongo/database.py b/pymongo/database.py index 33ae4038c8..a6c1275126 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -492,7 +492,7 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, batch_size, collation, start_at_operation_time, session, start_after) - def _command(self, sock_info, command, secondary_ok=False, value=1, check=True, + def _command(self, sock_info, command, value=1, check=True, allowable_errors=None, read_preference=ReadPreference.PRIMARY, codec_options=DEFAULT_CODEC_OPTIONS, write_concern=None, @@ -506,7 +506,6 @@ def _command(self, sock_info, command, secondary_ok=False, value=1, check=True, return sock_info.command( self.__name, command, - secondary_ok, read_preference, codec_options, check, @@ -605,8 +604,8 @@ def command(self, command, value=1, check=True, read_preference = ((session and session._txn_read_preference()) or ReadPreference.PRIMARY) with self.__client._socket_for_reads( - read_preference, session) as (sock_info, secondary_ok): - return self._command(sock_info, command, secondary_ok, value, + read_preference, session) as (sock_info, read_preference): + return self._command(sock_info, command, value, check, allowable_errors, read_preference, codec_options, session=session, **kwargs) @@ -618,16 +617,15 @@ def _retryable_read_command(self, command, value=1, check=True, read_preference = ((session and session._txn_read_preference()) or ReadPreference.PRIMARY) - def _cmd(session, server, sock_info, secondary_ok): - return self._command(sock_info, command, secondary_ok, value, + def _cmd(session, server, sock_info, read_preference): + return self._command(sock_info, command, value, check, allowable_errors, read_preference, codec_options, session=session, **kwargs) return self.__client._retryable_read( _cmd, read_preference, session) - def _list_collections(self, sock_info, secondary_okay, session, - read_preference, **kwargs): + def _list_collections(self, sock_info, session, read_preference, **kwargs): """Internal listCollections helper.""" coll = self.get_collection( @@ -638,7 +636,7 @@ def _list_collections(self, sock_info, secondary_okay, session, with self.__client._tmp_session( session, close=False) as tmp_session: cursor = self._command( - sock_info, cmd, secondary_okay, + sock_info, cmd, read_preference=read_preference, session=tmp_session)["cursor"] cmd_cursor = CommandCursor( @@ -674,9 +672,9 @@ def list_collections(self, session=None, filter=None, **kwargs): read_pref = ((session and session._txn_read_preference()) or ReadPreference.PRIMARY) - def _cmd(session, server, sock_info, secondary_okay): + def _cmd(session, server, sock_info, read_preference): return self._list_collections( - sock_info, secondary_okay, session, read_preference=read_pref, + sock_info, session, read_preference=read_preference, **kwargs) return self.__client._retryable_read( diff --git a/pymongo/message.py b/pymongo/message.py index bccf0a9f51..2e09df457e 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -316,9 +316,9 @@ def as_command(self, sock_info): self._as_command = cmd, self.db return self._as_command - def get_message(self, set_secondary_ok, sock_info, use_cmd=False): + def get_message(self, read_preference, sock_info, use_cmd=False): """Get a query message, possibly setting the secondaryOk bit.""" - if set_secondary_ok: + if read_preference.mode: # Set the secondaryOk bit. flags = self.flags | 4 else: @@ -330,8 +330,7 @@ def get_message(self, set_secondary_ok, sock_info, use_cmd=False): if use_cmd: spec = self.as_command(sock_info)[0] request_id, msg, size, _ = _op_msg( - 0, spec, self.db, self.read_preference, - set_secondary_ok, self.codec_options, + 0, spec, self.db, read_preference, self.codec_options, ctx=sock_info.compression_context) return request_id, msg, size @@ -346,8 +345,7 @@ def get_message(self, set_secondary_ok, sock_info, use_cmd=False): ntoreturn = self.limit if sock_info.is_mongos: - spec = _maybe_add_read_preference(spec, - self.read_preference) + spec = _maybe_add_read_preference(spec, read_preference) return _query(flags, ns, self.ntoskip, ntoreturn, spec, None if use_cmd else self.fields, @@ -429,8 +427,7 @@ def get_message(self, dummy0, sock_info, use_cmd=False): else: flags = 0 request_id, msg, size, _ = _op_msg( - flags, spec, self.db, None, - False, self.codec_options, + flags, spec, self.db, None, self.codec_options, ctx=sock_info.compression_context) return request_id, msg, size @@ -572,16 +569,13 @@ def _op_msg_uncompressed(flags, command, identifier, docs, opts): _op_msg_uncompressed = _cmessage._op_msg -def _op_msg(flags, command, dbname, read_preference, secondary_ok, - opts, ctx=None): +def _op_msg(flags, command, dbname, read_preference, opts, ctx=None): """Get a OP_MSG message.""" command['$db'] = dbname # getMore commands do not send $readPreference. if read_preference is not None and "$readPreference" not in command: - if secondary_ok and not read_preference.mode: - command["$readPreference"] = ( - ReadPreference.PRIMARY_PREFERRED.document) - else: + # Only send $readPreference if it's not primary (the default). + if read_preference.mode: command["$readPreference"] = read_preference.document name = next(iter(command)) try: diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 87c87c0241..a133c96a7f 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1025,7 +1025,7 @@ def _end_sessions(self, session_ids): # another session. with self._socket_for_reads( ReadPreference.PRIMARY_PREFERRED, - None) as (sock_info, secondary_ok): + None) as (sock_info, read_pref): if not sock_info.supports_sessions: return @@ -1033,7 +1033,7 @@ def _end_sessions(self, session_ids): spec = SON([('endSessions', session_ids[i:i + common._MAX_END_SESSIONS])]) sock_info.command( - 'admin', spec, secondary_ok=secondary_ok, client=self) + 'admin', spec, read_preference=read_pref, client=self) except PyMongoError: # Drivers MUST ignore any errors returned by the endSessions # command. @@ -1136,39 +1136,33 @@ def _socket_for_writes(self, session): return self._get_socket(server, session) @contextlib.contextmanager - def _secondaryok_for_server(self, read_preference, server, session): + def _socket_from_server(self, read_preference, server, session): assert read_preference is not None, "read_preference must not be None" # Get a socket for a server matching the read preference, and yield - # sock_info, secondary_ok. Server Selection Spec: "SecondaryOK must - # be sent to mongods with topology type Single. If the server type is - # Mongos, follow the rules for passing read preference to mongos, even - # for topology type Single." + # sock_info with the effective read preference. The Server Selection + # Spec says not to send any $readPreference to standalones and to + # always send primaryPreferred when directly connected to a repl set + # member. # Thread safe: if the type is single it cannot change. topology = self._get_topology() single = topology.description.topology_type == TOPOLOGY_TYPE.Single with self._get_socket(server, session) as sock_info: - secondary_ok = (single and not sock_info.is_mongos) or ( - read_preference.mode != ReadPreference.PRIMARY.mode) - yield sock_info, secondary_ok + if single: + if sock_info.is_repl: + # Use primary preferred to ensure any repl set member + # can handle the request. + read_preference = ReadPreference.PRIMARY_PREFERRED + elif sock_info.is_standalone: + # Don't send read preference to standalones. + read_preference = ReadPreference.PRIMARY + yield sock_info, read_preference - @contextlib.contextmanager def _socket_for_reads(self, read_preference, session): assert read_preference is not None, "read_preference must not be None" - # Get a socket for a server matching the read preference, and yield - # sock_info, secondary_ok. Server Selection Spec: "SecondaryOK must be - # sent to mongods with topology type Single. If the server type is - # Mongos, follow the rules for passing read preference to mongos, even - # for topology type Single." - # Thread safe: if the type is single it cannot change. topology = self._get_topology() server = self._select_server(read_preference, session) - single = topology.description.topology_type == TOPOLOGY_TYPE.Single - - with self._get_socket(server, session) as sock_info: - secondary_ok = (single and not sock_info.is_mongos) or ( - read_preference != ReadPreference.PRIMARY) - yield sock_info, secondary_ok + return self._socket_from_server(read_preference, server, session) def _should_pin_cursor(self, session): return (self.__options.load_balanced and @@ -1195,9 +1189,9 @@ def _run_operation(self, operation, unpack_res, address=None): operation.sock_mgr.sock, operation, True, self._event_listeners, unpack_res) - def _cmd(session, server, sock_info, secondary_ok): + def _cmd(session, server, sock_info, read_preference): return server.run_operation( - sock_info, operation, secondary_ok, self._event_listeners, + sock_info, operation, read_preference, self._event_listeners, unpack_res) return self._retryable_read( @@ -1292,13 +1286,13 @@ def _retryable_read(self, func, read_pref, session, address=None, try: server = self._select_server( read_pref, session, address=address) - with self._secondaryok_for_server(read_pref, server, session) as ( - sock_info, secondary_ok): + with self._socket_from_server(read_pref, server, session) as ( + sock_info, read_pref): if retrying and not retryable: # A retry is not possible because this server does # not support retryable reads, raise the last error. raise last_error - return func(session, server, sock_info, secondary_ok) + return func(session, server, sock_info, read_pref) except ServerSelectionTimeoutError: if retrying: # The application may think the write was never attempted diff --git a/pymongo/network.py b/pymongo/network.py index 10d71308f6..a14e9924a4 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -38,7 +38,7 @@ _UNPACK_HEADER = struct.Struct(" max_bson_size + message._COMMAND_OVERHEAD): diff --git a/pymongo/pool.py b/pymongo/pool.py index 99e64d8b2b..88b0e09737 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -608,6 +608,10 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): self.supports_sessions = ( hello.logical_session_timeout_minutes is not None) self.hello_ok = hello.hello_ok + self.is_repl = hello.server_type in ( + SERVER_TYPE.RSPrimary, SERVER_TYPE.RSSecondary, + SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther, SERVER_TYPE.RSGhost) + self.is_standalone = hello.server_type == SERVER_TYPE.Standalone self.is_mongos = hello.server_type == SERVER_TYPE.Mongos if performing_handshake and self.compression_settings: ctx = self.compression_settings.get_compression_context( @@ -641,7 +645,7 @@ def _next_reply(self): response_doc.pop('serviceId', None) return response_doc - def command(self, dbname, spec, secondary_ok=False, + def command(self, dbname, spec, read_preference=ReadPreference.PRIMARY, codec_options=DEFAULT_CODEC_OPTIONS, check=True, allowable_errors=None, @@ -660,7 +664,6 @@ def command(self, dbname, spec, secondary_ok=False, :Parameters: - `dbname`: name of the database on which to run the command - `spec`: a command document as a dict, SON, or mapping object - - `secondary_ok`: whether to set the secondaryOkay wire protocol bit - `read_preference`: a read preference - `codec_options`: a CodecOptions instance - `check`: raise OperationFailure if there are errors @@ -703,7 +706,7 @@ def command(self, dbname, spec, secondary_ok=False, if self.op_msg_enabled: self._raise_if_not_writable(unacknowledged) try: - return command(self, dbname, spec, secondary_ok, + return command(self, dbname, spec, self.is_mongos, read_preference, codec_options, session, client, check, allowable_errors, self.address, listeners, diff --git a/pymongo/server.py b/pymongo/server.py index 2a0a7267b7..8464cbbc6e 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -68,7 +68,7 @@ def request_check(self): """Check the server's state soon.""" self._monitor.request_check() - def run_operation(self, sock_info, operation, set_secondary_okay, listeners, + def run_operation(self, sock_info, operation, read_preference, listeners, unpack_res): """Run a _Query or _GetMore operation and return a Response object. @@ -95,7 +95,7 @@ def run_operation(self, sock_info, operation, set_secondary_okay, listeners, request_id = 0 else: message = operation.get_message( - set_secondary_okay, sock_info, use_cmd) + read_preference, sock_info, use_cmd) request_id, data, max_doc_size = self._split_message(message) if publish: diff --git a/test/crud/unified/aggregate-write-readPreference.json b/test/crud/unified/aggregate-write-readPreference.json index 28327e8d83..bc887e83cb 100644 --- a/test/crud/unified/aggregate-write-readPreference.json +++ b/test/crud/unified/aggregate-write-readPreference.json @@ -237,7 +237,7 @@ } ], "$readPreference": { - "mode": "primary" + "$$exists": false }, "readConcern": { "level": "local" @@ -425,7 +425,7 @@ } ], "$readPreference": { - "mode": "primary" + "$$exists": false }, "readConcern": { "level": "local" diff --git a/test/crud/unified/db-aggregate-write-readPreference.json b/test/crud/unified/db-aggregate-write-readPreference.json index 269299e3c7..2a81282de8 100644 --- a/test/crud/unified/db-aggregate-write-readPreference.json +++ b/test/crud/unified/db-aggregate-write-readPreference.json @@ -222,7 +222,7 @@ } ], "$readPreference": { - "mode": "primary" + "$$exists": false }, "readConcern": { "level": "local" @@ -416,7 +416,7 @@ } ], "$readPreference": { - "mode": "primary" + "$$exists": false }, "readConcern": { "level": "local" diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py index ccd40c2cd7..49aee27047 100644 --- a/test/mockupdb/test_mongos_command_read_mode.py +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -79,8 +79,11 @@ def test(self): slave_ok = False elif operation.op_type == 'may-use-secondary': slave_ok = mode != 'primary' - self.assertEqual(pref.document, - request.doc.get('$readPreference')) + actual_pref = request.doc.get('$readPreference') + if mode == 'primary': + self.assertIsNone(actual_pref) + else: + self.assertEqual(pref.document, actual_pref) else: self.fail('unrecognized op_type %r' % operation.op_type) diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index 6ecc229ea1..d9adfe17eb 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -148,24 +148,31 @@ def test(self): expected_pref = ReadPreference.SECONDARY elif operation.op_type == 'must-use-primary': expected_server = self.primary - expected_pref = ReadPreference.PRIMARY + expected_pref = None elif operation.op_type == 'may-use-secondary': - if mode in ('primary', 'primaryPreferred'): + if mode == 'primary': expected_server = self.primary + expected_pref = None + elif mode == 'primaryPreferred': + expected_server = self.primary + expected_pref = pref else: expected_server = self.secondary - expected_pref = pref + expected_pref = pref else: self.fail('unrecognized op_type %r' % operation.op_type) - # For single mongod we send primaryPreferred instead of primary. - if expected_pref == ReadPreference.PRIMARY and self.single_mongod: - expected_pref = ReadPreference.PRIMARY_PREFERRED + # For single mongod we omit the read preference. + if self.single_mongod: + expected_pref = None with going(operation.function, client): request = expected_server.receive() request.reply(operation.reply) - self.assertEqual(expected_pref.document, - request.doc.get('$readPreference')) + actual_pref = request.doc.get('$readPreference') + if expected_pref: + self.assertEqual(expected_pref.document, actual_pref) + else: + self.assertIsNone(actual_pref) self.assertNotIn('$query', request.doc) return test diff --git a/test/mockupdb/test_query_read_pref_sharded.py b/test/mockupdb/test_query_read_pref_sharded.py index 21813f7b8e..88dcdd8351 100644 --- a/test/mockupdb/test_query_read_pref_sharded.py +++ b/test/mockupdb/test_query_read_pref_sharded.py @@ -47,17 +47,18 @@ def test_query_and_read_mode_sharded_op_msg(self): SecondaryPreferred([{'tag': 'value'}]),) for query in ({'a': 1}, {'$query': {'a': 1}},): - for mode in read_prefs: + for pref in read_prefs: collection = client.db.get_collection('test', - read_preference=mode) + read_preference=pref) cursor = collection.find(query.copy()) with going(next, cursor): request = server.receives() # Command is not nested in $query. - request.assert_matches(OpMsg( - SON([('find', 'test'), - ('filter', {'a': 1}), - ('$readPreference', mode.document)]))) + expected_cmd = SON([('find', 'test'), + ('filter', {'a': 1})]) + if pref.mode: + expected_cmd['$readPreference'] = pref.document + request.assert_matches(OpMsg(expected_cmd)) request.replies({'cursor': {'id': 0, 'firstBatch': [{}]}}) diff --git a/test/test_cursor.py b/test/test_cursor.py index 8c27544b80..8bea12228d 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -36,6 +36,7 @@ InvalidOperation, OperationFailure) from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern from test import (client_context, unittest, @@ -1257,7 +1258,9 @@ def test_getMore_does_not_send_readPreference(self): client = rs_or_single_client( event_listeners=[listener]) self.addCleanup(client.close) - coll = client[self.db.name].test + # We never send primary read preference so override the default. + coll = client[self.db.name].get_collection( + 'test', read_preference=ReadPreference.PRIMARY_PREFERRED) coll.delete_many({}) coll.insert_many([{} for _ in range(5)]) @@ -1267,7 +1270,10 @@ def test_getMore_does_not_send_readPreference(self): started = listener.results['started'] self.assertEqual(2, len(started)) self.assertEqual('find', started[0].command_name) - self.assertIn('$readPreference', started[0].command) + if client_context.is_rs or client_context.is_mongos: + self.assertIn('$readPreference', started[0].command) + else: + self.assertNotIn('$readPreference', started[0].command) self.assertEqual('getMore', started[1].command_name) self.assertNotIn('$readPreference', started[1].command) diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index bbc89b9d14..a63df72545 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -309,17 +309,17 @@ def __init__(self, *args, **kwargs): def _socket_for_reads(self, read_preference, session): context = super(ReadPrefTester, self)._socket_for_reads( read_preference, session) - with context as (sock_info, secondary_ok): + with context as (sock_info, read_preference): self.record_a_read(sock_info.address) - yield sock_info, secondary_ok + yield sock_info, read_preference @contextlib.contextmanager - def _secondaryok_for_server(self, read_preference, server, session): - context = super(ReadPrefTester, self)._secondaryok_for_server( + def _socket_from_server(self, read_preference, server, session): + context = super(ReadPrefTester, self)._socket_from_server( read_preference, server, session) - with context as (sock_info, secondary_ok): + with context as (sock_info, read_preference): self.record_a_read(sock_info.address) - yield sock_info, secondary_ok + yield sock_info, read_preference def record_a_read(self, address): server = self._get_topology().select_server_by_address(address, 0) @@ -597,8 +597,11 @@ def test_send_hedge(self): started = listener.results['started'] self.assertEqual(len(started), 1, started) cmd = started[0].command - self.assertIn('$readPreference', cmd) - self.assertEqual(cmd['$readPreference'], pref.document) + if client_context.is_rs or client_context.is_mongos: + self.assertIn('$readPreference', cmd) + self.assertEqual(cmd['$readPreference'], pref.document) + else: + self.assertNotIn('$readPreference', cmd) def test_maybe_add_read_preference(self): From 68b818141a8b745e473ab95b4001ca739b05099d Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Mon, 13 Dec 2021 16:30:36 -0800 Subject: [PATCH 0042/1588] PYTHON-2903 Migrate testing from Amazon1 to Ubuntu 18.04 (#822) --- .evergreen/config.yml | 138 ++++++++++++------------------- .evergreen/run-mod-wsgi-tests.sh | 2 +- .evergreen/run-tests.sh | 3 +- .evergreen/utils.sh | 6 +- test/test_encryption.py | 10 +-- 5 files changed, 63 insertions(+), 96 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 93b37d504d..002774df42 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1770,6 +1770,7 @@ axes: run_on: ubuntu1804-small batchtime: 10080 # 7 days variables: + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-64/master/latest/libmongocrypt.tar.gz python3_binary: "/opt/python/3.8/bin/python3" - id: ubuntu-20.04 display_name: "Ubuntu 20.04" @@ -2196,37 +2197,26 @@ buildvariants: tasks: - ".4.2" -- matrix_name: "tests-python-version-amazon1-test-ssl" +- matrix_name: "tests-python-version-ubuntu18-test-ssl" matrix_spec: - platform: awslinux - python-version: &amazon1-pythons ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7"] + platform: ubuntu-18.04 + python-version: "*" auth-ssl: "*" coverage: "*" display_name: "${python-version} ${platform} ${auth-ssl} ${coverage}" tasks: *all-server-versions -- matrix_name: "tests-python-version-ubuntu20-ssl" - matrix_spec: - platform: ubuntu-20.04 - python-version: ["3.10"] - auth-ssl: "*" - display_name: "${python-version} ${platform} ${auth-ssl} ${coverage}" - tasks: - - ".latest" - - ".5.0" - - ".4.4" - - matrix_name: "tests-pyopenssl" matrix_spec: - platform: awslinux - python-version: ["3.6", "3.7", "3.8", "3.9"] + platform: ubuntu-18.04 + python-version: "*" auth: "*" ssl: "ssl" pyopenssl: "*" # Only test "noauth" with Python 3.7. exclude_spec: - platform: awslinux - python-version: ["3.6", "3.8", "3.9"] + platform: ubuntu-18.04 + python-version: ["3.6", "3.8", "3.9", "3.10", "pypy3.6", "pypy3.7"] auth: "noauth" ssl: "ssl" pyopenssl: "*" @@ -2236,19 +2226,6 @@ buildvariants: # Test standalone and sharded only on 5.0 and later. - '.5.0' -- matrix_name: "tests-pyopenssl-pypy" - matrix_spec: - platform: debian92 - python-version: ["pypy3.6", "pypy3.7"] - auth: "auth" - ssl: "ssl" - pyopenssl: "*" - display_name: "PyOpenSSL ${platform} ${python-version} ${auth}" - tasks: - - '.replica_set' - # Test standalone and sharded only on 5.0 and later. - - '.5.0' - - matrix_name: "tests-pyopenssl-macOS" matrix_spec: platform: macos-1014 @@ -2270,10 +2247,10 @@ buildvariants: tasks: - '.replica_set' -- matrix_name: "tests-python-version-amazon1-test-encryption" +- matrix_name: "tests-python-version-ubuntu18-test-encryption" matrix_spec: - platform: awslinux - python-version: ["3.6", "3.7", "3.8", "3.9"] + platform: ubuntu-18.04 + python-version: "*" auth-ssl: noauth-nossl # TODO: dependency error for 'coverage-report' task: # dependency tests-python-version-rhel62-test-encryption_.../test-2.6-standalone is not present in the project config @@ -2282,25 +2259,16 @@ buildvariants: display_name: "Encryption ${python-version} ${platform} ${auth-ssl}" tasks: *encryption-server-versions -- matrix_name: "tests-pypy-debian-test-encryption" - matrix_spec: - platform: debian92 - python-version: ["pypy3.6", "pypy3.7"] - auth-ssl: noauth-nossl - encryption: "*" - display_name: "Encryption ${python-version} ${platform} ${auth-ssl}" - tasks: *encryption-server-versions - -- matrix_name: "tests-python-version-amazon1-without-c-extensions" +- matrix_name: "tests-python-version-ubuntu18-without-c-extensions" matrix_spec: - platform: awslinux - python-version: *amazon1-pythons + platform: ubuntu-18.04 + python-version: "*" c-extensions: without-c-extensions auth-ssl: noauth-nossl coverage: "*" exclude_spec: # These interpreters are always tested without extensions. - - platform: awslinux + - platform: ubuntu-18.04 python-version: ["pypy3.6", "pypy3.7"] c-extensions: "*" auth-ssl: "*" @@ -2310,9 +2278,8 @@ buildvariants: - matrix_name: "tests-python-version-ubuntu18-compression" matrix_spec: - # Ubuntu 16.04 images have libsnappy-dev installed, and provides OpenSSL 1.0.2 for testing Python 3.7 platform: ubuntu-18.04 - python-version: ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7"] + python-version: "*" c-extensions: "*" compression: "*" exclude_spec: @@ -2321,11 +2288,6 @@ buildvariants: python-version: ["pypy3.6", "pypy3.7"] c-extensions: "with-c-extensions" compression: "*" - # PYTHON-2365 Some tests fail with CPython 3.8+ and python-snappy - - platform: ubuntu-18.04 - python-version: ["3.8", "3.9"] - c-extensions: "*" - compression: ["snappy"] display_name: "${compression} ${c-extensions} ${python-version} ${platform}" tasks: - "test-latest-standalone" @@ -2343,16 +2305,16 @@ buildvariants: - "test-4.0-standalone" - "test-3.6-standalone" -- matrix_name: "tests-python-version-green-framework-amazon1" +- matrix_name: "tests-python-version-green-framework-ubuntu18" matrix_spec: - platform: awslinux - python-version: *amazon1-pythons + platform: ubuntu-18.04 + python-version: "*" green-framework: "*" auth-ssl: "*" exclude_spec: # Don't test green frameworks on these Python versions. - - platform: awslinux - python-version: ["pypy3.6", "pypy3.7"] + - platform: ubuntu-18.04 + python-version: ["pypy3.6", "pypy3.7", "system-python3"] green-framework: "*" auth-ssl: "*" display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" @@ -2374,12 +2336,13 @@ buildvariants: display_name: "${platform} ${python-version-windows-32} ${auth-ssl}" tasks: *all-server-versions -- matrix_name: "tests-python-version-supports-openssl-110-test-ssl" +- matrix_name: "tests-python-version-supports-openssl-102-test-ssl" matrix_spec: - platform: debian92 - python-version: *amazon1-pythons + platform: awslinux + # Python 3.10+ requires OpenSSL 1.1.1+ + python-version: ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7"] auth-ssl: "*" - display_name: "${python-version} OpenSSL 1.1.0 ${platform} ${auth-ssl}" + display_name: "OpenSSL 1.0.2 ${python-version} ${platform} ${auth-ssl}" tasks: - ".latest" @@ -2392,16 +2355,16 @@ buildvariants: display_name: "Encryption ${platform} ${python-version-windows} ${auth-ssl}" tasks: *encryption-server-versions -# Storage engine tests on Amazon1 (x86_64) with Python 3.6. +# Storage engine tests on Ubuntu 18.04 (x86_64) with Python 3.6. - matrix_name: "tests-storage-engines" matrix_spec: - platform: awslinux + platform: ubuntu-18.04 storage-engine: "*" python-version: 3.6 display_name: "Storage ${storage-engine} ${python-version} ${platform}" rules: - if: - platform: awslinux + platform: ubuntu-18.04 storage-engine: ["inmemory"] python-version: "*" then: @@ -2414,7 +2377,7 @@ buildvariants: - "test-3.6-standalone" - if: # MongoDB 4.2 drops support for MMAPv1 - platform: awslinux + platform: ubuntu-18.04 storage-engine: ["mmapv1"] python-version: "*" then: @@ -2424,10 +2387,10 @@ buildvariants: - "test-3.6-standalone" - "test-3.6-replica_set" -# enableTestCommands=0 tests on Amazon1 (x86_64) with Python 3.6. +# enableTestCommands=0 tests on Ubuntu18 (x86_64) with Python 3.6. - matrix_name: "test-disableTestCommands" matrix_spec: - platform: awslinux + platform: ubuntu-18.04 disableTestCommands: "*" python-version: "3.6" display_name: "Disable test commands ${python-version} ${platform}" @@ -2436,8 +2399,8 @@ buildvariants: - matrix_name: "test-linux-enterprise-auth" matrix_spec: - platform: awslinux - python-version: *amazon1-pythons + platform: ubuntu-18.04 + python-version: "*" auth: "auth" display_name: "Enterprise ${auth} ${platform} ${python-version}" tasks: @@ -2454,12 +2417,12 @@ buildvariants: - matrix_name: "tests-mod-wsgi" matrix_spec: - platform: awslinux + platform: ubuntu-18.04 python-version: ["3.6", "3.7", "3.8", "3.9"] mod-wsgi-version: "*" exclude_spec: # mod-wsgi 3.5 won't build against CPython 3.8+ - - platform: awslinux + - platform: ubuntu-18.04 python-version: ["3.8", "3.9"] mod-wsgi-version: "3" display_name: "${mod-wsgi-version} ${python-version} ${platform}" @@ -2469,7 +2432,7 @@ buildvariants: - matrix_name: "mockupdb-tests" matrix_spec: - platform: awslinux + platform: ubuntu-18.04 python-version: 3.6 display_name: "MockupDB Tests" tasks: @@ -2477,7 +2440,7 @@ buildvariants: - matrix_name: "tests-doctests" matrix_spec: - platform: awslinux + platform: ubuntu-18.04 python-version: ["3.6"] display_name: "Doctests ${python-version} ${platform}" tasks: @@ -2486,7 +2449,7 @@ buildvariants: - name: "no-server" display_name: "No server test" run_on: - - amazon1-2018-test + - ubuntu1804-test tasks: - name: "no-server" expansions: @@ -2495,7 +2458,7 @@ buildvariants: - name: "Coverage Report" display_name: "Coverage Report" run_on: - - ubuntu1604-test + - ubuntu1804-test tasks: - name: "coverage-report" expansions: @@ -2503,18 +2466,23 @@ buildvariants: - matrix_name: "atlas-connect" matrix_spec: - platform: awslinux - python-version: *amazon1-pythons + platform: ubuntu-18.04 + python-version: "*" display_name: "Atlas connect ${python-version} ${platform}" tasks: - name: "atlas-connect" - matrix_name: "serverless" matrix_spec: - platform: awslinux - python-version: *amazon1-pythons + platform: ubuntu-18.04 + python-version: "*" auth-ssl: auth-ssl serverless: "*" + exclude_spec: + - platform: ubuntu-18.04 + python-version: ["system-python3"] + auth-ssl: auth-ssl + serverless: "*" display_name: "Serverless ${python-version} ${platform}" tasks: - "serverless_task_group" @@ -2522,7 +2490,7 @@ buildvariants: - matrix_name: "data-lake-spec-tests" matrix_spec: platform: ubuntu-18.04 - python-version: ["3.6", "3.9"] + python-version: ["3.6", "3.10"] auth: "auth" c-extensions: "*" display_name: "Atlas Data Lake ${python-version} ${c-extensions}" @@ -2532,7 +2500,7 @@ buildvariants: - matrix_name: "versioned-api-tests" matrix_spec: platform: ubuntu-18.04 - python-version: ["3.6", "3.9"] + python-version: ["3.6", "3.10"] auth: "auth" versionedApi: "*" display_name: "Versioned API ${versionedApi} ${python-version}" @@ -2557,7 +2525,7 @@ buildvariants: - matrix_name: "ocsp-test-windows" matrix_spec: platform: windows-64-vsMulti-small - python-version-windows: ["3.6", "3.9"] + python-version-windows: ["3.6", "3.10"] mongodb-version: ["4.4", "5.0", "latest"] auth: "noauth" ssl: "ssl" @@ -2604,7 +2572,7 @@ buildvariants: platform: ubuntu-18.04 mongodb-version: ["5.0", "latest"] auth-ssl: "*" - python-version: ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7"] + python-version: "*" loadbalancer: "*" display_name: "Load Balancer ${platform} ${python-version} ${mongodb-version} ${auth-ssl}" tasks: diff --git a/.evergreen/run-mod-wsgi-tests.sh b/.evergreen/run-mod-wsgi-tests.sh index 725023cc3a..03d72e9701 100644 --- a/.evergreen/run-mod-wsgi-tests.sh +++ b/.evergreen/run-mod-wsgi-tests.sh @@ -4,7 +4,7 @@ set -o errexit APACHE=$(command -v apache2 || command -v /usr/lib/apache2/mpm-prefork/apache2) || true if [ -n "$APACHE" ]; then - APACHE_CONFIG=apache22ubuntu1204.conf + APACHE_CONFIG=apache24ubuntu161404.conf else APACHE=$(command -v httpd) || true if [ -z "$APACHE" ]; then diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 3f4d6d9459..69550ec932 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -77,8 +77,7 @@ if [ -z "$PYTHON_BINARY" ]; then elif [ "$COMPRESSORS" = "snappy" ]; then createvirtualenv $PYTHON_BINARY snappytest trap "deactivate; rm -rf snappytest" EXIT HUP - # 0.5.2 has issues in pypy3(.5) - python -m pip install python-snappy==0.5.1 + python -m pip install python-snappy PYTHON=python elif [ "$COMPRESSORS" = "zstd" ]; then createvirtualenv $PYTHON_BINARY zstdtest diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 55c549d3aa..b7f65104e8 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -9,11 +9,11 @@ set -o xtrace createvirtualenv () { PYTHON=$1 VENVPATH=$2 - if $PYTHON -m venv -h>/dev/null; then + if $PYTHON -m virtualenv --version; then + VIRTUALENV="$PYTHON -m virtualenv" + elif $PYTHON -m venv -h>/dev/null; then # System virtualenv might not be compatible with the python3 on our path VIRTUALENV="$PYTHON -m venv" - elif $PYTHON -m virtualenv --version; then - VIRTUALENV="$PYTHON -m virtualenv" else echo "Cannot test without virtualenv" exit 1 diff --git a/test/test_encryption.py b/test/test_encryption.py index f77d3fffc7..88acadfbaf 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1820,15 +1820,15 @@ def setUp(self): # Errors when client has no cert, some examples: # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) self.cert_error = ('certificate required|SSL handshake failed|' - 'KMS connection closed') + 'KMS connection closed|Connection reset by peer') + # On Python 3.10+ this error might be: + # EOF occurred in violation of protocol (_ssl.c:2384) + if sys.version_info[:2] >= (3, 10): + self.cert_error += '|EOF' # On Windows this error might be: # [WinError 10054] An existing connection was forcibly closed by the remote host if sys.platform == 'win32': self.cert_error += '|forcibly closed' - # On Windows Python 3.10+ this error might be: - # EOF occurred in violation of protocol (_ssl.c:2384) - if sys.version_info[:2] >= (3, 10): - self.cert_error += '|EOF' def test_01_aws(self): key = { From 0fc82d9c7bb19805df1483c5a283d37a3dab473d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 14 Dec 2021 10:32:13 -0800 Subject: [PATCH 0043/1588] PYTHON-2763 Fix check_keys removal in encryption (#823) --- pymongo/message.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/pymongo/message.py b/pymongo/message.py index 2e09df457e..584528c2f2 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -309,10 +309,8 @@ def as_command(self, sock_info): sock_info.send_cluster_time(cmd, session, self.client) # Support auto encryption client = self.client - if (client._encrypter and - not client._encrypter._bypass_auto_encryption): - cmd = client._encrypter.encrypt( - self.db, cmd, False, self.codec_options) + if client._encrypter and not client._encrypter._bypass_auto_encryption: + cmd = client._encrypter.encrypt(self.db, cmd, self.codec_options) self._as_command = cmd, self.db return self._as_command @@ -407,10 +405,8 @@ def as_command(self, sock_info): sock_info.send_cluster_time(cmd, self.session, self.client) # Support auto encryption client = self.client - if (client._encrypter and - not client._encrypter._bypass_auto_encryption): - cmd = client._encrypter.encrypt( - self.db, cmd, False, self.codec_options) + if client._encrypter and not client._encrypter._bypass_auto_encryption: + cmd = client._encrypter.encrypt(self.db, cmd, self.codec_options) self._as_command = cmd, self.db return self._as_command From b502c44c06a351ed69e1c007377290cb58544429 Mon Sep 17 00:00:00 2001 From: Roberto Martinez <63836051+Pochetes@users.noreply.github.com> Date: Tue, 14 Dec 2021 14:24:05 -0500 Subject: [PATCH 0044/1588] Use quotes for pip install with extras (#824) --- doc/installation.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/installation.rst b/doc/installation.rst index a3d29c7f4f..9c9d80c7a1 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -37,44 +37,44 @@ GSSAPI authentication requires `pykerberos `_ on Windows. The correct dependency can be installed automatically along with PyMongo:: - $ python3 -m pip install pymongo[gssapi] + $ python3 -m pip install "pymongo[gssapi]" :ref:`MONGODB-AWS` authentication requires `pymongo-auth-aws `_:: - $ python3 -m pip install pymongo[aws] + $ python3 -m pip install "pymongo[aws]" Support for mongodb+srv:// URIs requires `dnspython `_:: - $ python3 -m pip install pymongo[srv] + $ python3 -m pip install "pymongo[srv]" :ref:`OCSP` requires `PyOpenSSL `_, `requests `_ and `service_identity `_:: - $ python3 -m pip install pymongo[ocsp] + $ python3 -m pip install "pymongo[ocsp]" Wire protocol compression with snappy requires `python-snappy `_:: - $ python3 -m pip install pymongo[snappy] + $ python3 -m pip install "pymongo[snappy]" Wire protocol compression with zstandard requires `zstandard `_:: - $ python3 -m pip install pymongo[zstd] + $ python3 -m pip install "pymongo[zstd]" :ref:`Client-Side Field Level Encryption` requires `pymongocrypt `_:: - $ python3 -m pip install pymongo[encryption] + $ python3 -m pip install "pymongo[encryption]" You can install all dependencies automatically with the following command:: - $ python3 -m pip install pymongo[gssapi,aws,ocsp,snappy,srv,zstd,encryption] + $ python3 -m pip install "pymongo[gssapi,aws,ocsp,snappy,srv,zstd,encryption]" Installing from source ---------------------- From 3843cef3f2a67da32e6bfdbeecc0b888256a6d5d Mon Sep 17 00:00:00 2001 From: Roberto Martinez <63836051+Pochetes@users.noreply.github.com> Date: Tue, 14 Dec 2021 14:24:33 -0500 Subject: [PATCH 0045/1588] Use quotes for pip install with extras in README (#825) --- README.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.rst b/README.rst index f83ad70b10..390599a6cf 100644 --- a/README.rst +++ b/README.rst @@ -97,17 +97,17 @@ GSSAPI authentication requires `pykerberos `_ on Windows. The correct dependency can be installed automatically along with PyMongo:: - $ python -m pip install pymongo[gssapi] + $ python -m pip install "pymongo[gssapi]" MONGODB-AWS authentication requires `pymongo-auth-aws `_:: - $ python -m pip install pymongo[aws] + $ python -m pip install "pymongo[aws]" Support for mongodb+srv:// URIs requires `dnspython `_:: - $ python -m pip install pymongo[srv] + $ python -m pip install "pymongo[srv]" OCSP (Online Certificate Status Protocol) requires `PyOpenSSL `_, `requests @@ -116,27 +116,27 @@ OCSP (Online Certificate Status Protocol) requires `PyOpenSSL require `certifi `_:: - $ python -m pip install pymongo[ocsp] + $ python -m pip install "pymongo[ocsp]" Wire protocol compression with snappy requires `python-snappy `_:: - $ python -m pip install pymongo[snappy] + $ python -m pip install "pymongo[snappy]" Wire protocol compression with zstandard requires `zstandard `_:: - $ python -m pip install pymongo[zstd] + $ python -m pip install "pymongo[zstd]" Client-Side Field Level Encryption requires `pymongocrypt `_:: - $ python -m pip install pymongo[encryption] + $ python -m pip install "pymongo[encryption]" You can install all dependencies automatically with the following command:: - $ python -m pip install pymongo[gssapi,aws,ocsp,snappy,srv,tls,zstd,encryption] + $ python -m pip install "pymongo[gssapi,aws,ocsp,snappy,srv,tls,zstd,encryption]" Additional dependencies are: From 2b53bf3b8590bedecd55036d9ccc9d0e7bcc1c99 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Tue, 14 Dec 2021 15:12:01 -0800 Subject: [PATCH 0046/1588] PYTHON-3042 Migrate OCSP testing to Ubuntu 20.04 (#826) --- .evergreen/config.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 002774df42..a265ba83bc 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2512,8 +2512,10 @@ buildvariants: - matrix_name: "ocsp-test" matrix_spec: - platform: awslinux - python-version: ["3.6", "3.9", "pypy3.6", "pypy3.7"] + # OCSP stapling is not supported on Ubuntu 18.04. + # See https://jira.mongodb.org/browse/SERVER-51364. + platform: ubuntu-20.04 + python-version: ["3.6", "3.10", "pypy3.6", "pypy3.7"] mongodb-version: ["4.4", "5.0", "latest"] auth: "noauth" ssl: "ssl" From 3886d0660e677e5a82d464a6ad0c72b869d41bbc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 14 Dec 2021 15:12:49 -0800 Subject: [PATCH 0047/1588] PYTHON-3040 Remove duplicate srvMaxHosts tests --- .../replica-set/srvMaxHosts-invalid_integer.json | 7 ------- .../srv_seedlist/replica-set/srvMaxHosts-invalid_type.json | 7 ------- test/srv_seedlist/sharded/srvMaxHosts-invalid_integer.json | 7 ------- test/srv_seedlist/sharded/srvMaxHosts-invalid_type.json | 7 ------- 4 files changed, 28 deletions(-) delete mode 100644 test/srv_seedlist/replica-set/srvMaxHosts-invalid_integer.json delete mode 100644 test/srv_seedlist/replica-set/srvMaxHosts-invalid_type.json delete mode 100644 test/srv_seedlist/sharded/srvMaxHosts-invalid_integer.json delete mode 100644 test/srv_seedlist/sharded/srvMaxHosts-invalid_type.json diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-invalid_integer.json b/test/srv_seedlist/replica-set/srvMaxHosts-invalid_integer.json deleted file mode 100644 index 5ba1a3b540..0000000000 --- a/test/srv_seedlist/replica-set/srvMaxHosts-invalid_integer.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0&srvMaxHosts=-1", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because srvMaxHosts is not greater than or equal to zero" -} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-invalid_type.json b/test/srv_seedlist/replica-set/srvMaxHosts-invalid_type.json deleted file mode 100644 index 79e75b9b15..0000000000 --- a/test/srv_seedlist/replica-set/srvMaxHosts-invalid_type.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0&srvMaxHosts=foo", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because srvMaxHosts is not an integer" -} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-invalid_integer.json b/test/srv_seedlist/sharded/srvMaxHosts-invalid_integer.json deleted file mode 100644 index 0939624fc3..0000000000 --- a/test/srv_seedlist/sharded/srvMaxHosts-invalid_integer.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=-1", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because srvMaxHosts is not greater than or equal to zero" -} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-invalid_type.json b/test/srv_seedlist/sharded/srvMaxHosts-invalid_type.json deleted file mode 100644 index c228d26612..0000000000 --- a/test/srv_seedlist/sharded/srvMaxHosts-invalid_type.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=foo", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because srvMaxHosts is not an integer" -} From ee80ebab544c8093f326f3e20f6473e80a826f29 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Fri, 17 Dec 2021 12:10:35 -0800 Subject: [PATCH 0048/1588] PYTHON-3049 Test with PyPy 3.8 (#827) --- .evergreen/config.yml | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index a265ba83bc..44a075a727 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1923,6 +1923,10 @@ axes: display_name: "PyPy 3.7" variables: PYTHON_BINARY: "/opt/python/pypy3.7/bin/pypy3" + - id: "pypy3.8" + display_name: "PyPy 3.8" + variables: + PYTHON_BINARY: "/opt/python/pypy3.8/bin/pypy3" - id: "system-python3" display_name: "Python3" variables: @@ -2216,7 +2220,7 @@ buildvariants: # Only test "noauth" with Python 3.7. exclude_spec: platform: ubuntu-18.04 - python-version: ["3.6", "3.8", "3.9", "3.10", "pypy3.6", "pypy3.7"] + python-version: ["3.6", "3.8", "3.9", "3.10", "pypy3.6", "pypy3.7", "pypy3.8"] auth: "noauth" ssl: "ssl" pyopenssl: "*" @@ -2269,7 +2273,7 @@ buildvariants: exclude_spec: # These interpreters are always tested without extensions. - platform: ubuntu-18.04 - python-version: ["pypy3.6", "pypy3.7"] + python-version: ["pypy3.6", "pypy3.7", "pypy3.8"] c-extensions: "*" auth-ssl: "*" coverage: "*" @@ -2285,7 +2289,7 @@ buildvariants: exclude_spec: # These interpreters are always tested without extensions. - platform: ubuntu-18.04 - python-version: ["pypy3.6", "pypy3.7"] + python-version: ["pypy3.6", "pypy3.7", "pypy3.8"] c-extensions: "with-c-extensions" compression: "*" display_name: "${compression} ${c-extensions} ${python-version} ${platform}" @@ -2314,7 +2318,7 @@ buildvariants: exclude_spec: # Don't test green frameworks on these Python versions. - platform: ubuntu-18.04 - python-version: ["pypy3.6", "pypy3.7", "system-python3"] + python-version: ["pypy3.6", "pypy3.7", "pypy3.8", "system-python3"] green-framework: "*" auth-ssl: "*" display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" @@ -2340,7 +2344,7 @@ buildvariants: matrix_spec: platform: awslinux # Python 3.10+ requires OpenSSL 1.1.1+ - python-version: ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7"] + python-version: ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7", "pypy3.8"] auth-ssl: "*" display_name: "OpenSSL 1.0.2 ${python-version} ${platform} ${auth-ssl}" tasks: @@ -2515,7 +2519,7 @@ buildvariants: # OCSP stapling is not supported on Ubuntu 18.04. # See https://jira.mongodb.org/browse/SERVER-51364. platform: ubuntu-20.04 - python-version: ["3.6", "3.10", "pypy3.6", "pypy3.7"] + python-version: ["3.6", "3.10", "pypy3.6", "pypy3.8"] mongodb-version: ["4.4", "5.0", "latest"] auth: "noauth" ssl: "ssl" From c760f900f2e4109a247c2ffc8ad3549362007772 Mon Sep 17 00:00:00 2001 From: David Kim <50807669+DavidKimDY@users.noreply.github.com> Date: Mon, 20 Dec 2021 22:32:53 +0900 Subject: [PATCH 0049/1588] Edit simple typo in docs (#828) --- pymongo/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/server.py b/pymongo/server.py index 8464cbbc6e..cb9442d000 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -77,7 +77,7 @@ def run_operation(self, sock_info, operation, read_preference, listeners, Can raise ConnectionFailure, OperationFailure, etc. :Parameters: - - `sock_info` - A SocketInfo instance. + - `sock_info`: A SocketInfo instance. - `operation`: A _Query or _GetMore object. - `set_secondary_okay`: Pass to operation.get_message. - `listeners`: Instance of _EventListeners or None. From 52ed5a4135a76480e03e96eb0369c2c4eae0c3f7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 13 Jan 2022 16:09:48 -0600 Subject: [PATCH 0050/1588] PYTHON-3052 Add Typings to PyMongo Itself (#829) --- .github/workflows/test-python.yml | 21 ++ bson/__init__.py | 201 ++++++++-------- bson/_helpers.py | 7 +- bson/binary.py | 40 +-- bson/code.py | 18 +- bson/codec_options.py | 63 ++--- bson/dbref.py | 24 +- bson/decimal128.py | 40 +-- bson/int64.py | 7 +- bson/json_util.py | 97 ++++---- bson/max_key.py | 19 +- bson/min_key.py | 19 +- bson/objectid.py | 48 ++-- bson/raw_bson.py | 32 +-- bson/regex.py | 17 +- bson/son.py | 62 ++--- bson/timestamp.py | 30 +-- bson/tz_util.py | 18 +- doc/changelog.rst | 4 +- gridfs/__init__.py | 98 ++++---- gridfs/grid_file.py | 227 +++++++++--------- mypy.ini | 11 + setup.py | 2 +- test/mockupdb/operations.py | 5 +- test/mockupdb/test_getmore_sharded.py | 5 +- test/mockupdb/test_mixed_version_sharded.py | 5 +- .../test_network_disconnect_primary.py | 5 +- test/mockupdb/test_slave_okay_sharded.py | 5 +- test/mod_wsgi_test/test_client.py | 14 +- test/performance/perf_test.py | 5 +- tools/clean.py | 2 +- 31 files changed, 617 insertions(+), 534 deletions(-) create mode 100644 mypy.ini diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 28ee689966..3ad5aa79fe 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -26,3 +26,24 @@ jobs: - name: Run tests run: | python setup.py test + + mypytest: + name: Run mypy + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.7', '3.10'] + fail-fast: false + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install -U pip mypy + pip install -e ".[zstd, srv]" + - name: Run mypy + run: | + mypy --install-types --non-interactive bson gridfs tools diff --git a/bson/__init__.py b/bson/__init__.py index 1efb1f7ff5..5be673cfc3 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -57,40 +57,42 @@ import calendar import datetime import itertools -import platform import re import struct import sys import uuid - -from codecs import (utf_8_decode as _utf_8_decode, - utf_8_encode as _utf_8_encode) +from codecs import utf_8_decode as _utf_8_decode # type: ignore +from codecs import utf_8_encode as _utf_8_encode # type: ignore from collections import abc as _abc +from typing import (TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Generator, + Iterator, List, Mapping, MutableMapping, NoReturn, + Sequence, Tuple, Type, TypeVar, Union, cast) -from bson.binary import (Binary, UuidRepresentation, ALL_UUID_SUBTYPES, - OLD_UUID_SUBTYPE, - JAVA_LEGACY, CSHARP_LEGACY, STANDARD, - UUID_SUBTYPE) +from bson.binary import (ALL_UUID_SUBTYPES, CSHARP_LEGACY, JAVA_LEGACY, + OLD_UUID_SUBTYPE, STANDARD, UUID_SUBTYPE, Binary, + UuidRepresentation) from bson.code import Code -from bson.codec_options import ( - CodecOptions, DEFAULT_CODEC_OPTIONS, _raw_document_class) +from bson.codec_options import (DEFAULT_CODEC_OPTIONS, CodecOptions, + _raw_document_class) from bson.dbref import DBRef from bson.decimal128 import Decimal128 -from bson.errors import (InvalidBSON, - InvalidDocument, - InvalidStringData) +from bson.errors import InvalidBSON, InvalidDocument, InvalidStringData from bson.int64 import Int64 from bson.max_key import MaxKey from bson.min_key import MinKey from bson.objectid import ObjectId from bson.regex import Regex -from bson.son import SON, RE_TYPE +from bson.son import RE_TYPE, SON from bson.timestamp import Timestamp from bson.tz_util import utc +# Import RawBSONDocument for type-checking only to avoid circular dependency. +if TYPE_CHECKING: + from bson.raw_bson import RawBSONDocument + try: - from bson import _cbson + from bson import _cbson # type: ignore _USE_C = True except ImportError: _USE_C = False @@ -131,38 +133,38 @@ _UNPACK_TIMESTAMP_FROM = struct.Struct(" Tuple[Any, memoryview]: if isinstance(data, (bytes, bytearray)): return data, memoryview(data) view = memoryview(data) return view.tobytes(), view -def _raise_unknown_type(element_type, element_name): +def _raise_unknown_type(element_type: int, element_name: str) -> NoReturn: """Unknown type helper.""" raise InvalidBSON("Detected unknown BSON type %r for fieldname '%s'. Are " "you using the latest driver version?" % ( chr(element_type).encode(), element_name)) -def _get_int(data, view, position, dummy0, dummy1, dummy2): +def _get_int(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[int, int]: """Decode a BSON int32 to python int.""" return _UNPACK_INT_FROM(data, position)[0], position + 4 -def _get_c_string(data, view, position, opts): +def _get_c_string(data: Any, view: Any, position: int, opts: Any) -> Tuple[str, int]: """Decode a BSON 'C' string to python str.""" end = data.index(b"\x00", position) return _utf_8_decode(view[position:end], opts.unicode_decode_error_handler, True)[0], end + 1 -def _get_float(data, view, position, dummy0, dummy1, dummy2): +def _get_float(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[float, int]: """Decode a BSON double to python float.""" return _UNPACK_FLOAT_FROM(data, position)[0], position + 8 -def _get_string(data, view, position, obj_end, opts, dummy): +def _get_string(data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy: Any) -> Tuple[str, int]: """Decode a BSON string to python str.""" length = _UNPACK_INT_FROM(data, position)[0] position += 4 @@ -175,7 +177,7 @@ def _get_string(data, view, position, obj_end, opts, dummy): opts.unicode_decode_error_handler, True)[0], end + 1 -def _get_object_size(data, position, obj_end): +def _get_object_size(data: Any, position: int, obj_end: int) -> Tuple[int, int]: """Validate and return a BSON document's size.""" try: obj_size = _UNPACK_INT_FROM(data, position)[0] @@ -192,7 +194,7 @@ def _get_object_size(data, position, obj_end): return obj_size, end -def _get_object(data, view, position, obj_end, opts, dummy): +def _get_object(data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy: Any) -> Tuple[Any, int]: """Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef.""" obj_size, end = _get_object_size(data, position, obj_end) if _raw_document_class(opts.document_class): @@ -211,7 +213,7 @@ def _get_object(data, view, position, obj_end, opts, dummy): return obj, position -def _get_array(data, view, position, obj_end, opts, element_name): +def _get_array(data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str) -> Tuple[Any, int]: """Decode a BSON array to python list.""" size = _UNPACK_INT_FROM(data, position)[0] end = position + size - 1 @@ -220,7 +222,7 @@ def _get_array(data, view, position, obj_end, opts, element_name): position += 4 end -= 1 - result = [] + result: List[Any] = [] # Avoid doing global and attribute lookups in the loop. append = result.append @@ -250,7 +252,7 @@ def _get_array(data, view, position, obj_end, opts, element_name): return result, position + 1 -def _get_binary(data, view, position, obj_end, opts, dummy1): +def _get_binary(data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy1: Any) -> Tuple[Union[Binary, uuid.UUID], int]: """Decode a BSON binary to bson.binary.Binary or python UUID.""" length, subtype = _UNPACK_LENGTH_SUBTYPE_FROM(data, position) position += 5 @@ -283,13 +285,13 @@ def _get_binary(data, view, position, obj_end, opts, dummy1): return value, end -def _get_oid(data, view, position, dummy0, dummy1, dummy2): +def _get_oid(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[ObjectId, int]: """Decode a BSON ObjectId to bson.objectid.ObjectId.""" end = position + 12 return ObjectId(data[position:end]), end -def _get_boolean(data, view, position, dummy0, dummy1, dummy2): +def _get_boolean(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[bool, int]: """Decode a BSON true/false to python True/False.""" end = position + 1 boolean_byte = data[position:end] @@ -300,19 +302,19 @@ def _get_boolean(data, view, position, dummy0, dummy1, dummy2): raise InvalidBSON('invalid boolean value: %r' % boolean_byte) -def _get_date(data, view, position, dummy0, opts, dummy1): +def _get_date(data: Any, view: Any, position: int, dummy0: int, opts: Any, dummy1: Any) -> Tuple[datetime.datetime, int]: """Decode a BSON datetime to python datetime.datetime.""" return _millis_to_datetime( _UNPACK_LONG_FROM(data, position)[0], opts), position + 8 -def _get_code(data, view, position, obj_end, opts, element_name): +def _get_code(data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str) -> Tuple[Code, int]: """Decode a BSON code to bson.code.Code.""" code, position = _get_string(data, view, position, obj_end, opts, element_name) return Code(code), position -def _get_code_w_scope(data, view, position, obj_end, opts, element_name): +def _get_code_w_scope(data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str) -> Tuple[Code, int]: """Decode a BSON code_w_scope to bson.code.Code.""" code_end = position + _UNPACK_INT_FROM(data, position)[0] code, position = _get_string( @@ -323,7 +325,7 @@ def _get_code_w_scope(data, view, position, obj_end, opts, element_name): return Code(code, scope), position -def _get_regex(data, view, position, dummy0, opts, dummy1): +def _get_regex(data: Any, view: Any, position: int, dummy0: Any, opts: Any, dummy1: Any) -> Tuple[Regex, int]: """Decode a BSON regex to bson.regex.Regex or a python pattern object.""" pattern, position = _get_c_string(data, view, position, opts) bson_flags, position = _get_c_string(data, view, position, opts) @@ -331,7 +333,7 @@ def _get_regex(data, view, position, dummy0, opts, dummy1): return bson_re, position -def _get_ref(data, view, position, obj_end, opts, element_name): +def _get_ref(data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str) -> Tuple[DBRef, int]: """Decode (deprecated) BSON DBPointer to bson.dbref.DBRef.""" collection, position = _get_string( data, view, position, obj_end, opts, element_name) @@ -339,18 +341,18 @@ def _get_ref(data, view, position, obj_end, opts, element_name): return DBRef(collection, oid), position -def _get_timestamp(data, view, position, dummy0, dummy1, dummy2): +def _get_timestamp(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[Timestamp, int]: """Decode a BSON timestamp to bson.timestamp.Timestamp.""" inc, timestamp = _UNPACK_TIMESTAMP_FROM(data, position) return Timestamp(timestamp, inc), position + 8 -def _get_int64(data, view, position, dummy0, dummy1, dummy2): +def _get_int64(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[Int64, int]: """Decode a BSON int64 to bson.int64.Int64.""" return Int64(_UNPACK_LONG_FROM(data, position)[0]), position + 8 -def _get_decimal128(data, view, position, dummy0, dummy1, dummy2): +def _get_decimal128(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[Decimal128, int]: """Decode a BSON decimal128 to bson.decimal128.Decimal128.""" end = position + 16 return Decimal128.from_bid(data[position:end]), end @@ -362,7 +364,7 @@ def _get_decimal128(data, view, position, dummy0, dummy1, dummy2): # - position: int, beginning of object in 'data' to decode # - obj_end: int, end of object to decode in 'data' if variable-length type # - opts: a CodecOptions -_ELEMENT_GETTER = { +_ELEMENT_GETTER: Dict[int, Callable[..., Tuple[Any, int]]]= { ord(BSONNUM): _get_float, ord(BSONSTR): _get_string, ord(BSONOBJ): _get_object, @@ -387,10 +389,10 @@ def _get_decimal128(data, view, position, dummy0, dummy1, dummy2): if _USE_C: - def _element_to_dict(data, view, position, obj_end, opts): + def _element_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: Any) -> Any: return _cbson._element_to_dict(data, position, obj_end, opts) else: - def _element_to_dict(data, view, position, obj_end, opts): + def _element_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: Any) -> Any: """Decode a single key, value pair.""" element_type = data[position] position += 1 @@ -410,12 +412,15 @@ def _element_to_dict(data, view, position, obj_end, opts): return element_name, value, position -def _raw_to_dict(data, position, obj_end, opts, result): +_T = TypeVar("_T", bound=MutableMapping[Any, Any]) + + +def _raw_to_dict(data: Any, position: int, obj_end: int, opts: Any, result: _T) -> _T: data, view = get_data_and_view(data) return _elements_to_dict(data, view, position, obj_end, opts, result) -def _elements_to_dict(data, view, position, obj_end, opts, result=None): +def _elements_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: Any, result: Any = None) -> Any: """Decode a BSON document into result.""" if result is None: result = opts.document_class() @@ -428,7 +433,7 @@ def _elements_to_dict(data, view, position, obj_end, opts, result=None): return result -def _bson_to_dict(data, opts): +def _bson_to_dict(data: Any, opts: Any) -> Any: """Decode a BSON string to document_class.""" data, view = get_data_and_view(data) try: @@ -454,7 +459,7 @@ def _bson_to_dict(data, opts): _LIST_NAMES = tuple((str(i) + "\x00").encode('utf8') for i in range(1000)) -def gen_list_name(): +def gen_list_name() -> Generator[bytes, None, None]: """Generate "keys" for encoded lists in the sequence b"0\x00", b"1\x00", b"2\x00", ... @@ -469,7 +474,7 @@ def gen_list_name(): yield (str(next(counter)) + "\x00").encode('utf8') -def _make_c_string_check(string): +def _make_c_string_check(string: Union[str, bytes]) -> bytes: """Make a 'C' string, checking for embedded NUL characters.""" if isinstance(string, bytes): if b"\x00" in string: @@ -485,10 +490,10 @@ def _make_c_string_check(string): if "\x00" in string: raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") - return _utf_8_encode(string)[0] + b"\x00" + return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" -def _make_c_string(string): +def _make_c_string(string: Union[str, bytes]) -> bytes: """Make a 'C' string.""" if isinstance(string, bytes): try: @@ -498,30 +503,30 @@ def _make_c_string(string): raise InvalidStringData("strings in documents must be valid " "UTF-8: %r" % string) else: - return _utf_8_encode(string)[0] + b"\x00" + return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" -def _make_name(string): +def _make_name(string: str) -> bytes: """Make a 'C' string suitable for a BSON key.""" # Keys can only be text in python 3. if "\x00" in string: raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") - return _utf_8_encode(string)[0] + b"\x00" + return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" -def _encode_float(name, value, dummy0, dummy1): +def _encode_float(name: bytes, value: float, dummy0: Any, dummy1: Any) -> bytes: """Encode a float.""" return b"\x01" + name + _PACK_FLOAT(value) -def _encode_bytes(name, value, dummy0, dummy1): +def _encode_bytes(name: bytes, value: bytes, dummy0: Any, dummy1: Any) -> bytes: """Encode a python bytes.""" # Python3 special case. Store 'bytes' as BSON binary subtype 0. return b"\x05" + name + _PACK_INT(len(value)) + b"\x00" + value -def _encode_mapping(name, value, check_keys, opts): +def _encode_mapping(name: bytes, value: Any, check_keys: bool, opts: Any) -> bytes: """Encode a mapping type.""" if _raw_document_class(value): return b'\x03' + name + value.raw @@ -530,7 +535,7 @@ def _encode_mapping(name, value, check_keys, opts): return b"\x03" + name + _PACK_INT(len(data) + 5) + data + b"\x00" -def _encode_dbref(name, value, check_keys, opts): +def _encode_dbref(name: bytes, value: DBRef, check_keys: bool, opts: Any) -> bytes: """Encode bson.dbref.DBRef.""" buf = bytearray(b"\x03" + name + b"\x00\x00\x00\x00") begin = len(buf) - 4 @@ -550,7 +555,7 @@ def _encode_dbref(name, value, check_keys, opts): return bytes(buf) -def _encode_list(name, value, check_keys, opts): +def _encode_list(name: bytes, value: Sequence[Any], check_keys: bool, opts: Any) -> bytes: """Encode a list/tuple.""" lname = gen_list_name() data = b"".join([_name_value_to_bson(next(lname), item, @@ -559,48 +564,48 @@ def _encode_list(name, value, check_keys, opts): return b"\x04" + name + _PACK_INT(len(data) + 5) + data + b"\x00" -def _encode_text(name, value, dummy0, dummy1): +def _encode_text(name: bytes, value: str, dummy0: Any, dummy1: Any) -> bytes: """Encode a python str.""" value = _utf_8_encode(value)[0] - return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00" + return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00" # type: ignore -def _encode_binary(name, value, dummy0, dummy1): +def _encode_binary(name: bytes, value: Binary, dummy0: Any, dummy1: Any) -> bytes: """Encode bson.binary.Binary.""" subtype = value.subtype if subtype == 2: - value = _PACK_INT(len(value)) + value + value = _PACK_INT(len(value)) + value # type: ignore return b"\x05" + name + _PACK_LENGTH_SUBTYPE(len(value), subtype) + value -def _encode_uuid(name, value, dummy, opts): +def _encode_uuid(name: bytes, value: uuid.UUID, dummy: Any, opts: Any) -> bytes: """Encode uuid.UUID.""" uuid_representation = opts.uuid_representation binval = Binary.from_uuid(value, uuid_representation=uuid_representation) return _encode_binary(name, binval, dummy, opts) -def _encode_objectid(name, value, dummy0, dummy1): +def _encode_objectid(name: bytes, value: ObjectId, dummy: Any, dummy1: Any) -> bytes: """Encode bson.objectid.ObjectId.""" return b"\x07" + name + value.binary -def _encode_bool(name, value, dummy0, dummy1): +def _encode_bool(name: bytes, value: bool, dummy0: Any, dummy1: Any) -> bytes: """Encode a python boolean (True/False).""" return b"\x08" + name + (value and b"\x01" or b"\x00") -def _encode_datetime(name, value, dummy0, dummy1): +def _encode_datetime(name: bytes, value: datetime.datetime, dummy0: Any, dummy1: Any) -> bytes: """Encode datetime.datetime.""" millis = _datetime_to_millis(value) return b"\x09" + name + _PACK_LONG(millis) -def _encode_none(name, dummy0, dummy1, dummy2): +def _encode_none(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: """Encode python None.""" return b"\x0A" + name -def _encode_regex(name, value, dummy0, dummy1): +def _encode_regex(name: bytes, value: Regex, dummy0: Any, dummy1: Any) -> bytes: """Encode a python regex or bson.regex.Regex.""" flags = value.flags # Python 3 common case @@ -626,7 +631,7 @@ def _encode_regex(name, value, dummy0, dummy1): return b"\x0B" + name + _make_c_string_check(value.pattern) + sflags -def _encode_code(name, value, dummy, opts): +def _encode_code(name: bytes, value: Code, dummy: Any, opts: Any) -> bytes: """Encode bson.code.Code.""" cstring = _make_c_string(value) cstrlen = len(cstring) @@ -637,7 +642,7 @@ def _encode_code(name, value, dummy, opts): return b"\x0F" + name + full_length + _PACK_INT(cstrlen) + cstring + scope -def _encode_int(name, value, dummy0, dummy1): +def _encode_int(name: bytes, value: int, dummy0: Any, dummy1: Any) -> bytes: """Encode a python int.""" if -2147483648 <= value <= 2147483647: return b"\x10" + name + _PACK_INT(value) @@ -648,12 +653,12 @@ def _encode_int(name, value, dummy0, dummy1): raise OverflowError("BSON can only handle up to 8-byte ints") -def _encode_timestamp(name, value, dummy0, dummy1): +def _encode_timestamp(name: bytes, value: Any, dummy0: Any, dummy1: Any) -> bytes: """Encode bson.timestamp.Timestamp.""" return b"\x11" + name + _PACK_TIMESTAMP(value.inc, value.time) -def _encode_long(name, value, dummy0, dummy1): +def _encode_long(name: bytes, value: Any, dummy0: Any, dummy1: Any) -> bytes: """Encode a python long (python 2.x)""" try: return b"\x12" + name + _PACK_LONG(value) @@ -661,17 +666,17 @@ def _encode_long(name, value, dummy0, dummy1): raise OverflowError("BSON can only handle up to 8-byte ints") -def _encode_decimal128(name, value, dummy0, dummy1): +def _encode_decimal128(name: bytes, value: Decimal128, dummy0: Any, dummy1: Any) -> bytes: """Encode bson.decimal128.Decimal128.""" return b"\x13" + name + value.bid -def _encode_minkey(name, dummy0, dummy1, dummy2): +def _encode_minkey(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: """Encode bson.min_key.MinKey.""" return b"\xFF" + name -def _encode_maxkey(name, dummy0, dummy1, dummy2): +def _encode_maxkey(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: """Encode bson.max_key.MaxKey.""" return b"\x7F" + name @@ -726,14 +731,14 @@ def _encode_maxkey(name, dummy0, dummy1, dummy2): _BUILT_IN_TYPES = tuple(t for t in _ENCODERS) -def _name_value_to_bson(name, value, check_keys, opts, - in_custom_call=False, - in_fallback_call=False): +def _name_value_to_bson(name: bytes, value: Any, check_keys: bool, opts: Any, + in_custom_call: bool = False, + in_fallback_call: bool = False) -> bytes: """Encode a single name, value pair.""" # First see if the type is already cached. KeyError will only ever # happen once per subtype. try: - return _ENCODERS[type(value)](name, value, check_keys, opts) + return _ENCODERS[type(value)](name, value, check_keys, opts) # type: ignore except KeyError: pass @@ -745,7 +750,7 @@ def _name_value_to_bson(name, value, check_keys, opts, func = _MARKERS[marker] # Cache this type for faster subsequent lookup. _ENCODERS[type(value)] = func - return func(name, value, check_keys, opts) + return func(name, value, check_keys, opts) # type: ignore # Third, check if a type encoder is registered for this type. # Note that subtypes of registered custom types are not auto-encoded. @@ -765,7 +770,7 @@ def _name_value_to_bson(name, value, check_keys, opts, func = _ENCODERS[base] # Cache this type for faster subsequent lookup. _ENCODERS[type(value)] = func - return func(name, value, check_keys, opts) + return func(name, value, check_keys, opts) # type: ignore # As a last resort, try using the fallback encoder, if the user has # provided one. @@ -779,7 +784,7 @@ def _name_value_to_bson(name, value, check_keys, opts, "cannot encode object: %r, of type: %r" % (value, type(value))) -def _element_to_bson(key, value, check_keys, opts): +def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: Any) -> bytes: """Encode a single key, value pair.""" if not isinstance(key, str): raise InvalidDocument("documents must have only string keys, " @@ -794,10 +799,10 @@ def _element_to_bson(key, value, check_keys, opts): return _name_value_to_bson(name, value, check_keys, opts) -def _dict_to_bson(doc, check_keys, opts, top_level=True): +def _dict_to_bson(doc: Any, check_keys: bool, opts: Any, top_level: bool = True) -> bytes: """Encode a document to BSON.""" if _raw_document_class(doc): - return doc.raw + return cast(bytes, doc.raw) try: elements = [] if top_level and "_id" in doc: @@ -816,7 +821,7 @@ def _dict_to_bson(doc, check_keys, opts, top_level=True): _dict_to_bson = _cbson._dict_to_bson -def _millis_to_datetime(millis, opts): +def _millis_to_datetime(millis: int, opts: Any) -> datetime.datetime: """Convert milliseconds since epoch UTC to datetime.""" diff = ((millis % 1000) + 1000) % 1000 seconds = (millis - diff) // 1000 @@ -832,10 +837,10 @@ def _millis_to_datetime(millis, opts): microseconds=micros) -def _datetime_to_millis(dtm): +def _datetime_to_millis(dtm: datetime.datetime) -> int: """Convert datetime to milliseconds since epoch UTC.""" if dtm.utcoffset() is not None: - dtm = dtm - dtm.utcoffset() + dtm = dtm - dtm.utcoffset() # type: ignore return int(calendar.timegm(dtm.timetuple()) * 1000 + dtm.microsecond // 1000) @@ -844,7 +849,11 @@ def _datetime_to_millis(dtm): "codec_options must be an instance of CodecOptions") -def encode(document, check_keys=False, codec_options=DEFAULT_CODEC_OPTIONS): +_DocumentIn = Mapping[str, Any] +_DocumentOut = Union[MutableMapping[str, Any], "RawBSONDocument"] + + +def encode(document: _DocumentIn, check_keys: bool = False, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> bytes: """Encode a document to BSON. A document can be any mapping type (like :class:`dict`). @@ -871,7 +880,7 @@ def encode(document, check_keys=False, codec_options=DEFAULT_CODEC_OPTIONS): return _dict_to_bson(document, check_keys, codec_options) -def decode(data, codec_options=DEFAULT_CODEC_OPTIONS): +def decode(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> _DocumentOut: """Decode BSON to a document. By default, returns a BSON document represented as a Python @@ -903,7 +912,7 @@ def decode(data, codec_options=DEFAULT_CODEC_OPTIONS): return _bson_to_dict(data, codec_options) -def decode_all(data, codec_options=DEFAULT_CODEC_OPTIONS): +def decode_all(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> List[_DocumentOut]: """Decode BSON data to multiple documents. `data` must be a bytes-like object implementing the buffer protocol that @@ -967,7 +976,7 @@ def decode_all(data, codec_options=DEFAULT_CODEC_OPTIONS): decode_all = _cbson.decode_all -def _decode_selective(rawdoc, fields, codec_options): +def _decode_selective(rawdoc: Any, fields: Any, codec_options: Any) -> Mapping[Any, Any]: if _raw_document_class(codec_options.document_class): # If document_class is RawBSONDocument, use vanilla dictionary for # decoding command response. @@ -986,7 +995,7 @@ def _decode_selective(rawdoc, fields, codec_options): return doc -def _convert_raw_document_lists_to_streams(document): +def _convert_raw_document_lists_to_streams(document: Any) -> None: cursor = document.get('cursor') if cursor: for key in ('firstBatch', 'nextBatch'): @@ -996,7 +1005,7 @@ def _convert_raw_document_lists_to_streams(document): cursor[key] = [stream] -def _decode_all_selective(data, codec_options, fields): +def _decode_all_selective(data: Any, codec_options: CodecOptions, fields: Any) -> List[Any]: """Decode BSON data to a single document while using user-provided custom decoding logic. @@ -1033,7 +1042,7 @@ def _decode_all_selective(data, codec_options, fields): return [_decode_selective(_doc, fields, codec_options,)] -def decode_iter(data, codec_options=DEFAULT_CODEC_OPTIONS): +def decode_iter(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Iterator[_DocumentOut]: """Decode BSON data to multiple documents as a generator. Works similarly to the decode_all function, but yields one document at a @@ -1066,7 +1075,7 @@ def decode_iter(data, codec_options=DEFAULT_CODEC_OPTIONS): yield _bson_to_dict(elements, codec_options) -def decode_file_iter(file_obj, codec_options=DEFAULT_CODEC_OPTIONS): +def decode_file_iter(file_obj: BinaryIO, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Iterator[_DocumentOut]: """Decode bson data from a file to multiple documents as a generator. Works similarly to the decode_all function, but reads from the file object @@ -1095,7 +1104,7 @@ def decode_file_iter(file_obj, codec_options=DEFAULT_CODEC_OPTIONS): yield _bson_to_dict(elements, codec_options) -def is_valid(bson): +def is_valid(bson: bytes) -> bool: """Check that the given string represents valid :class:`BSON` data. Raises :class:`TypeError` if `bson` is not an instance of @@ -1124,8 +1133,8 @@ class BSON(bytes): """ @classmethod - def encode(cls, document, check_keys=False, - codec_options=DEFAULT_CODEC_OPTIONS): + def encode(cls: Type["BSON"], document: _DocumentIn, check_keys: bool = False, + codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> "BSON": """Encode a document to a new :class:`BSON` instance. A document can be any mapping type (like :class:`dict`). @@ -1149,7 +1158,7 @@ def encode(cls, document, check_keys=False, """ return cls(encode(document, check_keys, codec_options)) - def decode(self, codec_options=DEFAULT_CODEC_OPTIONS): + def decode(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> _DocumentOut: # type: ignore[override] """Decode this BSON data. By default, returns a BSON document represented as a Python @@ -1183,7 +1192,7 @@ def decode(self, codec_options=DEFAULT_CODEC_OPTIONS): return decode(self, codec_options) -def has_c(): +def has_c() -> bool: """Is the C extension installed? """ return _USE_C diff --git a/bson/_helpers.py b/bson/_helpers.py index 6449705eb2..2d89789586 100644 --- a/bson/_helpers.py +++ b/bson/_helpers.py @@ -15,14 +15,15 @@ """Setstate and getstate functions for objects with __slots__, allowing compatibility with default pickling protocol """ +from typing import Any, Mapping -def _setstate_slots(self, state): +def _setstate_slots(self: Any, state: Any) -> None: for slot, value in state.items(): setattr(self, slot, value) -def _mangle_name(name, prefix): +def _mangle_name(name: str, prefix: str) -> str: if name.startswith("__"): prefix = "_"+prefix else: @@ -30,7 +31,7 @@ def _mangle_name(name, prefix): return prefix + name -def _getstate_slots(self): +def _getstate_slots(self: Any) -> Mapping[Any, Any]: prefix = self.__class__.__name__ ret = dict() for name in self.__slots__: diff --git a/bson/binary.py b/bson/binary.py index 50cfbd8439..53d5419b49 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any, Tuple, Type from uuid import UUID -from warnings import warn """Tools for representing BSON binary data. """ @@ -69,7 +69,7 @@ class UuidRepresentation: code. When decoding a BSON binary field with a UUID subtype, a :class:`~bson.binary.Binary` instance will be returned instead of a :class:`uuid.UUID` instance. - + See :ref:`unspecified-representation-details` for details. .. versionadded:: 3.11 @@ -81,7 +81,7 @@ class UuidRepresentation: :class:`uuid.UUID` instances will automatically be encoded to and decoded from BSON binary, using RFC-4122 byte order with binary subtype :data:`UUID_SUBTYPE`. - + See :ref:`standard-representation-details` for details. .. versionadded:: 3.11 @@ -93,7 +93,7 @@ class UuidRepresentation: :class:`uuid.UUID` instances will automatically be encoded to and decoded from BSON binary, using RFC-4122 byte order with binary subtype :data:`OLD_UUID_SUBTYPE`. - + See :ref:`python-legacy-representation-details` for details. .. versionadded:: 3.11 @@ -105,7 +105,7 @@ class UuidRepresentation: :class:`uuid.UUID` instances will automatically be encoded to and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, using the Java driver's legacy byte order. - + See :ref:`java-legacy-representation-details` for details. .. versionadded:: 3.11 @@ -117,7 +117,7 @@ class UuidRepresentation: :class:`uuid.UUID` instances will automatically be encoded to and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, using the C# driver's legacy byte order. - + See :ref:`csharp-legacy-representation-details` for details. .. versionadded:: 3.11 @@ -153,11 +153,12 @@ class UuidRepresentation: """ ALL_UUID_SUBTYPES = (OLD_UUID_SUBTYPE, UUID_SUBTYPE) -ALL_UUID_REPRESENTATIONS = (UuidRepresentation.UNSPECIFIED, - UuidRepresentation.STANDARD, - UuidRepresentation.PYTHON_LEGACY, - UuidRepresentation.JAVA_LEGACY, - UuidRepresentation.CSHARP_LEGACY) +ALL_UUID_REPRESENTATIONS = ( + UuidRepresentation.UNSPECIFIED, + UuidRepresentation.STANDARD, + UuidRepresentation.PYTHON_LEGACY, + UuidRepresentation.JAVA_LEGACY, + UuidRepresentation.CSHARP_LEGACY) UUID_REPRESENTATION_NAMES = { UuidRepresentation.UNSPECIFIED: 'UuidRepresentation.UNSPECIFIED', UuidRepresentation.STANDARD: 'UuidRepresentation.STANDARD', @@ -208,8 +209,9 @@ class Binary(bytes): """ _type_marker = 5 + __subtype: int - def __new__(cls, data, subtype=BINARY_SUBTYPE): + def __new__(cls: Type["Binary"], data: bytes, subtype: int = BINARY_SUBTYPE) -> "Binary": if not isinstance(subtype, int): raise TypeError("subtype must be an instance of int") if subtype >= 256 or subtype < 0: @@ -220,7 +222,7 @@ def __new__(cls, data, subtype=BINARY_SUBTYPE): return self @classmethod - def from_uuid(cls, uuid, uuid_representation=UuidRepresentation.STANDARD): + def from_uuid(cls: Type["Binary"], uuid: UUID, uuid_representation: int = UuidRepresentation.STANDARD) -> "Binary": """Create a BSON Binary object from a Python UUID. Creates a :class:`~bson.binary.Binary` object from a @@ -271,7 +273,7 @@ def from_uuid(cls, uuid, uuid_representation=UuidRepresentation.STANDARD): return cls(payload, subtype) - def as_uuid(self, uuid_representation=UuidRepresentation.STANDARD): + def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUID: """Create a Python UUID from this BSON Binary object. Decodes this binary object as a native :class:`uuid.UUID` instance @@ -316,19 +318,19 @@ def as_uuid(self, uuid_representation=UuidRepresentation.STANDARD): self.subtype, UUID_REPRESENTATION_NAMES[uuid_representation])) @property - def subtype(self): + def subtype(self) -> int: """Subtype of this binary data. """ return self.__subtype - def __getnewargs__(self): + def __getnewargs__(self) -> Tuple[bytes, int]: # type: ignore[override] # Work around http://bugs.python.org/issue7382 data = super(Binary, self).__getnewargs__()[0] if not isinstance(data, bytes): data = data.encode('latin-1') return data, self.__subtype - def __eq__(self, other): + def __eq__(self, other : Any) -> bool: if isinstance(other, Binary): return ((self.__subtype, bytes(self)) == (other.subtype, bytes(other))) @@ -337,10 +339,10 @@ def __eq__(self, other): # subclass of str... return False - def __hash__(self): + def __hash__(self) -> int: return super(Binary, self).__hash__() ^ hash(self.__subtype) - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self): diff --git a/bson/code.py b/bson/code.py index 3d8b4904da..6f4b1838d8 100644 --- a/bson/code.py +++ b/bson/code.py @@ -16,6 +16,7 @@ """ from collections.abc import Mapping as _Mapping +from typing import Any, Mapping, Optional, Type, Union class Code(str): @@ -47,15 +48,16 @@ class Code(str): """ _type_marker = 13 + __scope: Union[Mapping[str, Any], None] - def __new__(cls, code, scope=None, **kwargs): + def __new__(cls: Type["Code"], code: Union[str, "Code"], scope: Optional[Mapping[str, Any]] = None, **kwargs: Any) -> "Code": if not isinstance(code, str): raise TypeError("code must be an instance of str") self = str.__new__(cls, code) try: - self.__scope = code.scope + self.__scope = code.scope # type: ignore except AttributeError: self.__scope = None @@ -63,20 +65,20 @@ def __new__(cls, code, scope=None, **kwargs): if not isinstance(scope, _Mapping): raise TypeError("scope must be an instance of dict") if self.__scope is not None: - self.__scope.update(scope) + self.__scope.update(scope) # type: ignore else: self.__scope = scope if kwargs: if self.__scope is not None: - self.__scope.update(kwargs) + self.__scope.update(kwargs) # type: ignore else: self.__scope = kwargs return self @property - def scope(self): + def scope(self) -> Optional[Mapping[str, Any]]: """Scope dictionary for this instance or ``None``. """ return self.__scope @@ -84,12 +86,12 @@ def scope(self): def __repr__(self): return "Code(%s, %r)" % (str.__repr__(self), self.__scope) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Code): return (self.__scope, str(self)) == (other.__scope, str(other)) return False - __hash__ = None + __hash__: Any = None - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other diff --git a/bson/codec_options.py b/bson/codec_options.py index 6fcffcc17a..81e79158b4 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -16,22 +16,26 @@ import abc import datetime -import warnings - from collections import namedtuple from collections.abc import MutableMapping as _MutableMapping +from typing import (TYPE_CHECKING, Any, Callable, Dict, Generic, Iterable, + MutableMapping, Optional, Type, TypeVar, Union, cast) + +from bson.binary import (ALL_UUID_REPRESENTATIONS, UUID_REPRESENTATION_NAMES, + UuidRepresentation) + +# Import RawBSONDocument for type-checking only to avoid circular dependency. +if TYPE_CHECKING: + from bson.raw_bson import RawBSONDocument -from bson.binary import (UuidRepresentation, - ALL_UUID_REPRESENTATIONS, - UUID_REPRESENTATION_NAMES) -def _abstractproperty(func): +def _abstractproperty(func: Callable[..., Any]) -> property: return property(abc.abstractmethod(func)) _RAW_BSON_DOCUMENT_MARKER = 101 -def _raw_document_class(document_class): +def _raw_document_class(document_class: Any) -> bool: """Determine if a document_class is a RawBSONDocument class.""" marker = getattr(document_class, '_type_marker', None) return marker == _RAW_BSON_DOCUMENT_MARKER @@ -47,12 +51,12 @@ class TypeEncoder(abc.ABC): See :ref:`custom-type-type-codec` documentation for an example. """ @_abstractproperty - def python_type(self): + def python_type(self) -> Any: """The Python type to be converted into something serializable.""" pass @abc.abstractmethod - def transform_python(self, value): + def transform_python(self, value: Any) -> Any: """Convert the given Python object into something serializable.""" pass @@ -67,12 +71,12 @@ class TypeDecoder(abc.ABC): See :ref:`custom-type-type-codec` documentation for an example. """ @_abstractproperty - def bson_type(self): + def bson_type(self) -> Any: """The BSON type to be converted into our own type.""" pass @abc.abstractmethod - def transform_bson(self, value): + def transform_bson(self, value: Any) -> Any: """Convert the given BSON value into our own type.""" pass @@ -92,6 +96,9 @@ class TypeCodec(TypeEncoder, TypeDecoder): pass +_Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] +_Fallback = Callable[[Any], Any] + class TypeRegistry(object): """Encapsulates type codecs used in encoding and / or decoding BSON, as well as the fallback encoder. Type registries cannot be modified after @@ -118,7 +125,7 @@ class TypeRegistry(object): :mod:`bson` can encode. See :ref:`fallback-encoder-callable` documentation for an example. """ - def __init__(self, type_codecs=None, fallback_encoder=None): + def __init__(self, type_codecs: Optional[Iterable[_Codec]] = None, fallback_encoder: Optional[_Fallback] = None) -> None: self.__type_codecs = list(type_codecs or []) self._fallback_encoder = fallback_encoder self._encoder_map = {} @@ -144,10 +151,10 @@ def __init__(self, type_codecs=None, fallback_encoder=None): TypeEncoder.__name__, TypeDecoder.__name__, TypeCodec.__name__, codec)) - def _validate_type_encoder(self, codec): + def _validate_type_encoder(self, codec: _Codec) -> None: from bson import _BUILT_IN_TYPES for pytype in _BUILT_IN_TYPES: - if issubclass(codec.python_type, pytype): + if issubclass(cast(TypeCodec, codec).python_type, pytype): err_msg = ("TypeEncoders cannot change how built-in types are " "encoded (encoder %s transforms type %s)" % (codec, pytype)) @@ -158,7 +165,7 @@ def __repr__(self): self.__class__.__name__, self.__type_codecs, self._fallback_encoder)) - def __eq__(self, other): + def __eq__(self, other: Any) -> Any: if not isinstance(other, type(self)): return NotImplemented return ((self._decoder_map == other._decoder_map) and @@ -166,7 +173,7 @@ def __eq__(self, other): (self._fallback_encoder == other._fallback_encoder)) -_options_base = namedtuple( +_options_base = namedtuple( # type: ignore 'CodecOptions', ('document_class', 'tz_aware', 'uuid_representation', 'unicode_decode_error_handler', 'tzinfo', 'type_registry')) @@ -247,12 +254,12 @@ class CodecOptions(_options_base): retrieved from the server will be modified in the client application and stored back to the server. """ - - def __new__(cls, document_class=dict, - tz_aware=False, - uuid_representation=UuidRepresentation.UNSPECIFIED, - unicode_decode_error_handler="strict", - tzinfo=None, type_registry=None): + def __new__(cls: Type["CodecOptions"], document_class: Union[Type[MutableMapping], Type["RawBSONDocument"]] = dict, + tz_aware: bool = False, + uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, + unicode_decode_error_handler: Optional[str] = "strict", + tzinfo: Optional[datetime.tzinfo] = None, + type_registry: Optional[TypeRegistry] = None) -> "CodecOptions": if not (issubclass(document_class, _MutableMapping) or _raw_document_class(document_class)): raise TypeError("document_class must be dict, bson.son.SON, " @@ -263,7 +270,7 @@ def __new__(cls, document_class=dict, if uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError("uuid_representation must be a value " "from bson.binary.UuidRepresentation") - if not isinstance(unicode_decode_error_handler, (str, None)): + if not isinstance(unicode_decode_error_handler, (str, None)): # type: ignore raise ValueError("unicode_decode_error_handler must be a string " "or None") if tzinfo is not None: @@ -283,7 +290,7 @@ def __new__(cls, document_class=dict, cls, (document_class, tz_aware, uuid_representation, unicode_decode_error_handler, tzinfo, type_registry)) - def _arguments_repr(self): + def _arguments_repr(self) -> str: """Representation of the arguments used to create this object.""" document_class_repr = ( 'dict' if self.document_class is dict @@ -299,7 +306,7 @@ def _arguments_repr(self): self.unicode_decode_error_handler, self.tzinfo, self.type_registry)) - def _options_dict(self): + def _options_dict(self) -> Dict[str, Any]: """Dictionary of the arguments used to create this object.""" # TODO: PYTHON-2442 use _asdict() instead return { @@ -313,7 +320,7 @@ def _options_dict(self): def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self._arguments_repr()) - def with_options(self, **kwargs): + def with_options(self, **kwargs: Any) -> "CodecOptions": """Make a copy of this CodecOptions, overriding some options:: >>> from bson.codec_options import DEFAULT_CODEC_OPTIONS @@ -330,10 +337,10 @@ def with_options(self, **kwargs): return CodecOptions(**opts) -DEFAULT_CODEC_OPTIONS = CodecOptions() +DEFAULT_CODEC_OPTIONS: CodecOptions = CodecOptions() -def _parse_codec_options(options): +def _parse_codec_options(options: Any) -> CodecOptions: """Parse BSON codec options.""" kwargs = {} for k in set(options) & {'document_class', 'tz_aware', diff --git a/bson/dbref.py b/bson/dbref.py index 24e97a6698..92a3a68367 100644 --- a/bson/dbref.py +++ b/bson/dbref.py @@ -15,9 +15,11 @@ """Tools for manipulating DBRefs (references to MongoDB documents).""" from copy import deepcopy +from typing import Any, Mapping, Optional -from bson.son import SON from bson._helpers import _getstate_slots, _setstate_slots +from bson.son import SON + class DBRef(object): """A reference to a document stored in MongoDB. @@ -28,7 +30,7 @@ class DBRef(object): # DBRef isn't actually a BSON "type" so this number was arbitrarily chosen. _type_marker = 100 - def __init__(self, collection, id, database=None, _extra={}, **kwargs): + def __init__(self, collection: str, id: Any, database: Optional[str] = None, _extra: Mapping[str, Any] = {}, **kwargs: Any) -> None: """Initialize a new :class:`DBRef`. Raises :class:`TypeError` if `collection` or `database` is not @@ -58,32 +60,32 @@ def __init__(self, collection, id, database=None, _extra={}, **kwargs): self.__kwargs = kwargs @property - def collection(self): + def collection(self) -> str: """Get the name of this DBRef's collection. """ return self.__collection @property - def id(self): + def id(self) -> Any: """Get this DBRef's _id. """ return self.__id @property - def database(self): + def database(self) -> Optional[str]: """Get the name of this DBRef's database. Returns None if this DBRef doesn't specify a database. """ return self.__database - def __getattr__(self, key): + def __getattr__(self, key: Any) -> Any: try: return self.__kwargs[key] except KeyError: raise AttributeError(key) - def as_doc(self): + def as_doc(self) -> SON[str, Any]: """Get the SON document representation of this DBRef. Generally not needed by application developers @@ -103,7 +105,7 @@ def __repr__(self): return "DBRef(%r, %r, %r%s)" % (self.collection, self.id, self.database, extra) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, DBRef): us = (self.__database, self.__collection, self.__id, self.__kwargs) @@ -112,15 +114,15 @@ def __eq__(self, other): return us == them return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __hash__(self): + def __hash__(self) -> int: """Get a hash value for this :class:`DBRef`.""" return hash((self.__collection, self.__id, self.__database, tuple(sorted(self.__kwargs.items())))) - def __deepcopy__(self, memo): + def __deepcopy__(self, memo: Any) -> "DBRef": """Support function for `copy.deepcopy()`.""" return DBRef(deepcopy(self.__collection, memo), deepcopy(self.__id, memo), diff --git a/bson/decimal128.py b/bson/decimal128.py index ede728bbab..bbf5d326e4 100644 --- a/bson/decimal128.py +++ b/bson/decimal128.py @@ -19,8 +19,7 @@ import decimal import struct -import sys - +from typing import Any, Sequence, Tuple, Type, Union _PACK_64 = struct.Struct(" decimal.Context: """Returns an instance of :class:`decimal.Context` appropriate for working with IEEE-754 128-bit decimal floating point values. """ opts = _CTX_OPTIONS.copy() opts['traps'] = [] - return decimal.Context(**opts) + return decimal.Context(**opts) # type: ignore -def _decimal_to_128(value): +def _decimal_to_128(value: _VALUE_OPTIONS) -> Tuple[int, int]: """Converts a decimal.Decimal to BID (high bits, low bits). :Parameters: @@ -215,7 +215,7 @@ class Decimal128(object): _type_marker = 19 - def __init__(self, value): + def __init__(self, value: _VALUE_OPTIONS) -> None: if isinstance(value, (str, decimal.Decimal)): self.__high, self.__low = _decimal_to_128(value) elif isinstance(value, (list, tuple)): @@ -223,11 +223,11 @@ def __init__(self, value): raise ValueError('Invalid size for creation of Decimal128 ' 'from list or tuple. Must have exactly 2 ' 'elements.') - self.__high, self.__low = value + self.__high, self.__low = value # type: ignore else: raise TypeError("Cannot convert %r to Decimal128" % (value,)) - def to_decimal(self): + def to_decimal(self) -> decimal.Decimal: """Returns an instance of :class:`decimal.Decimal` for this :class:`Decimal128`. """ @@ -236,11 +236,11 @@ def to_decimal(self): sign = 1 if (high & _SIGN) else 0 if (high & _SNAN) == _SNAN: - return decimal.Decimal((sign, (), 'N')) + return decimal.Decimal((sign, (), 'N')) # type: ignore elif (high & _NAN) == _NAN: - return decimal.Decimal((sign, (), 'n')) + return decimal.Decimal((sign, (), 'n')) # type: ignore elif (high & _INF) == _INF: - return decimal.Decimal((sign, (), 'F')) + return decimal.Decimal((sign, (), 'F')) # type: ignore if (high & _EXPONENT_MASK) == _EXPONENT_MASK: exponent = ((high & 0x1fffe00000000000) >> 47) - _EXPONENT_BIAS @@ -270,7 +270,7 @@ def to_decimal(self): return ctx.create_decimal((sign, digits, exponent)) @classmethod - def from_bid(cls, value): + def from_bid(cls: Type["Decimal128"], value: bytes) -> "Decimal128": """Create an instance of :class:`Decimal128` from Binary Integer Decimal string. @@ -282,14 +282,14 @@ def from_bid(cls, value): raise TypeError("value must be an instance of bytes") if len(value) != 16: raise ValueError("value must be exactly 16 bytes") - return cls((_UNPACK_64(value[8:])[0], _UNPACK_64(value[:8])[0])) + return cls((_UNPACK_64(value[8:])[0], _UNPACK_64(value[:8])[0])) # type: ignore @property - def bid(self): + def bid(self) -> bytes: """The Binary Integer Decimal (BID) encoding of this instance.""" return _PACK_64(self.__low) + _PACK_64(self.__high) - def __str__(self): + def __str__(self) -> str: dec = self.to_decimal() if dec.is_nan(): # Required by the drivers spec to match MongoDB behavior. @@ -299,16 +299,16 @@ def __str__(self): def __repr__(self): return "Decimal128('%s')" % (str(self),) - def __setstate__(self, value): + def __setstate__(self, value: Tuple[int, int]) -> None: self.__high, self.__low = value - def __getstate__(self): + def __getstate__(self) -> Tuple[int, int]: return self.__high, self.__low - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Decimal128): return self.bid == other.bid return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other diff --git a/bson/int64.py b/bson/int64.py index fb9bfe9143..f1424c8812 100644 --- a/bson/int64.py +++ b/bson/int64.py @@ -14,6 +14,9 @@ """A BSON wrapper for long (int in python3)""" +from typing import Any + + class Int64(int): """Representation of the BSON int64 type. @@ -28,8 +31,8 @@ class Int64(int): _type_marker = 18 - def __getstate__(self): + def __getstate__(self) -> Any: return {} - def __setstate__(self, state): + def __setstate__(self, state: Any) -> None: pass diff --git a/bson/json_util.py b/bson/json_util.py index ed67d9a36c..d7f501f120 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -92,11 +92,13 @@ import math import re import uuid +from typing import (Any, Dict, Mapping, Optional, Sequence, Tuple, Type, Union, + cast) import bson -from bson import EPOCH_AWARE, RE_TYPE, SON -from bson.binary import (Binary, UuidRepresentation, ALL_UUID_SUBTYPES, - UUID_SUBTYPE) +from bson import EPOCH_AWARE +from bson.binary import (ALL_UUID_SUBTYPES, UUID_SUBTYPE, Binary, + UuidRepresentation) from bson.code import Code from bson.codec_options import CodecOptions from bson.dbref import DBRef @@ -106,10 +108,10 @@ from bson.min_key import MinKey from bson.objectid import ObjectId from bson.regex import Regex +from bson.son import RE_TYPE, SON from bson.timestamp import Timestamp from bson.tz_util import utc - _RE_OPT_TABLE = { "i": re.I, "l": re.L, @@ -246,11 +248,17 @@ class JSONOptions(CodecOptions): .. versionchanged:: 4.0 Changed default value of `tz_aware` to False. """ - - def __new__(cls, strict_number_long=None, - datetime_representation=None, - strict_uuid=None, json_mode=JSONMode.RELAXED, - *args, **kwargs): + json_mode: int + strict_number_long: bool + datetime_representation: int + strict_uuid: bool + + def __new__(cls: Type["JSONOptions"], + strict_number_long: Optional[bool] = None, + datetime_representation: Optional[int] = None, + strict_uuid: Optional[bool] = None, + json_mode: int = JSONMode.RELAXED, + *args: Any, **kwargs: Any) -> "JSONOptions": kwargs["tz_aware"] = kwargs.get("tz_aware", False) if kwargs["tz_aware"]: kwargs["tzinfo"] = kwargs.get("tzinfo", utc) @@ -261,7 +269,7 @@ def __new__(cls, strict_number_long=None, raise ValueError( "JSONOptions.datetime_representation must be one of LEGACY, " "NUMBERLONG, or ISO8601 from DatetimeRepresentation.") - self = super(JSONOptions, cls).__new__(cls, *args, **kwargs) + self = cast(JSONOptions, super(JSONOptions, cls).__new__(cls, *args, **kwargs)) if json_mode not in (JSONMode.LEGACY, JSONMode.RELAXED, JSONMode.CANONICAL): @@ -313,7 +321,7 @@ def __new__(cls, strict_number_long=None, self.strict_uuid = strict_uuid return self - def _arguments_repr(self): + def _arguments_repr(self) -> str: return ('strict_number_long=%r, ' 'datetime_representation=%r, ' 'strict_uuid=%r, json_mode=%r, %s' % ( @@ -323,7 +331,7 @@ def _arguments_repr(self): self.json_mode, super(JSONOptions, self)._arguments_repr())) - def _options_dict(self): + def _options_dict(self) -> Dict[Any, Any]: # TODO: PYTHON-2442 use _asdict() instead options_dict = super(JSONOptions, self)._options_dict() options_dict.update({ @@ -333,7 +341,7 @@ def _options_dict(self): 'json_mode': self.json_mode}) return options_dict - def with_options(self, **kwargs): + def with_options(self, **kwargs: Any) -> "JSONOptions": """ Make a copy of this JSONOptions, overriding some options:: @@ -354,7 +362,7 @@ def with_options(self, **kwargs): return JSONOptions(**opts) -LEGACY_JSON_OPTIONS = JSONOptions(json_mode=JSONMode.LEGACY) +LEGACY_JSON_OPTIONS: JSONOptions = JSONOptions(json_mode=JSONMode.LEGACY) """:class:`JSONOptions` for encoding to PyMongo's legacy JSON format. .. seealso:: The documentation for :const:`bson.json_util.JSONMode.LEGACY`. @@ -362,7 +370,7 @@ def with_options(self, **kwargs): .. versionadded:: 3.5 """ -CANONICAL_JSON_OPTIONS = JSONOptions(json_mode=JSONMode.CANONICAL) +CANONICAL_JSON_OPTIONS: JSONOptions = JSONOptions(json_mode=JSONMode.CANONICAL) """:class:`JSONOptions` for Canonical Extended JSON. .. seealso:: The documentation for :const:`bson.json_util.JSONMode.CANONICAL`. @@ -370,7 +378,7 @@ def with_options(self, **kwargs): .. versionadded:: 3.5 """ -RELAXED_JSON_OPTIONS = JSONOptions(json_mode=JSONMode.RELAXED) +RELAXED_JSON_OPTIONS: JSONOptions = JSONOptions(json_mode=JSONMode.RELAXED) """:class:`JSONOptions` for Relaxed Extended JSON. .. seealso:: The documentation for :const:`bson.json_util.JSONMode.RELAXED`. @@ -378,7 +386,7 @@ def with_options(self, **kwargs): .. versionadded:: 3.5 """ -DEFAULT_JSON_OPTIONS = RELAXED_JSON_OPTIONS +DEFAULT_JSON_OPTIONS: JSONOptions = RELAXED_JSON_OPTIONS """The default :class:`JSONOptions` for JSON encoding/decoding. The same as :const:`RELAXED_JSON_OPTIONS`. @@ -391,7 +399,7 @@ def with_options(self, **kwargs): """ -def dumps(obj, *args, **kwargs): +def dumps(obj: Any, *args: Any, **kwargs: Any) -> str: """Helper function that wraps :func:`json.dumps`. Recursive function that handles all BSON types including @@ -413,7 +421,7 @@ def dumps(obj, *args, **kwargs): return json.dumps(_json_convert(obj, json_options), *args, **kwargs) -def loads(s, *args, **kwargs): +def loads(s: str, *args: Any, **kwargs: Any) -> Any: """Helper function that wraps :func:`json.loads`. Automatically passes the object_hook for BSON type conversion. @@ -440,7 +448,7 @@ def loads(s, *args, **kwargs): return json.loads(s, *args, **kwargs) -def _json_convert(obj, json_options=DEFAULT_JSON_OPTIONS): +def _json_convert(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: """Recursive helper method that converts BSON types so they can be converted into json. """ @@ -455,11 +463,11 @@ def _json_convert(obj, json_options=DEFAULT_JSON_OPTIONS): return obj -def object_pairs_hook(pairs, json_options=DEFAULT_JSON_OPTIONS): +def object_pairs_hook(pairs: Sequence[Tuple[str, Any]], json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: return object_hook(json_options.document_class(pairs), json_options) -def object_hook(dct, json_options=DEFAULT_JSON_OPTIONS): +def object_hook(dct: Mapping[str, Any], json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: if "$oid" in dct: return _parse_canonical_oid(dct) if (isinstance(dct.get('$ref'), str) and @@ -505,7 +513,7 @@ def object_hook(dct, json_options=DEFAULT_JSON_OPTIONS): return dct -def _parse_legacy_regex(doc): +def _parse_legacy_regex(doc: Any) -> Any: pattern = doc["$regex"] # Check if this is the $regex query operator. if not isinstance(pattern, (str, bytes)): @@ -517,7 +525,7 @@ def _parse_legacy_regex(doc): return Regex(pattern, flags) -def _parse_legacy_uuid(doc, json_options): +def _parse_legacy_uuid(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: """Decode a JSON legacy $uuid to Python UUID.""" if len(doc) != 1: raise TypeError('Bad $uuid, extra field(s): %s' % (doc,)) @@ -529,7 +537,7 @@ def _parse_legacy_uuid(doc, json_options): return uuid.UUID(doc["$uuid"]) -def _binary_or_uuid(data, subtype, json_options): +def _binary_or_uuid(data: Any, subtype: int, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: # special handling for UUID if subtype in ALL_UUID_SUBTYPES: uuid_representation = json_options.uuid_representation @@ -546,11 +554,11 @@ def _binary_or_uuid(data, subtype, json_options): return binary_value.as_uuid(uuid_representation) if subtype == 0: - return data + return cast(uuid.UUID, data) return Binary(data, subtype) -def _parse_legacy_binary(doc, json_options): +def _parse_legacy_binary(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: if isinstance(doc["$type"], int): doc["$type"] = "%02x" % doc["$type"] subtype = int(doc["$type"], 16) @@ -560,7 +568,7 @@ def _parse_legacy_binary(doc, json_options): return _binary_or_uuid(data, subtype, json_options) -def _parse_canonical_binary(doc, json_options): +def _parse_canonical_binary(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: binary = doc["$binary"] b64 = binary["base64"] subtype = binary["subType"] @@ -577,7 +585,7 @@ def _parse_canonical_binary(doc, json_options): return _binary_or_uuid(data, int(subtype, 16), json_options) -def _parse_canonical_datetime(doc, json_options): +def _parse_canonical_datetime(doc: Any, json_options: JSONOptions) -> datetime.datetime: """Decode a JSON datetime to python datetime.datetime.""" dtm = doc["$date"] if len(doc) != 1: @@ -636,14 +644,14 @@ def _parse_canonical_datetime(doc, json_options): return bson._millis_to_datetime(int(dtm), json_options) -def _parse_canonical_oid(doc): +def _parse_canonical_oid(doc: Any) -> ObjectId: """Decode a JSON ObjectId to bson.objectid.ObjectId.""" if len(doc) != 1: raise TypeError('Bad $oid, extra field(s): %s' % (doc,)) return ObjectId(doc['$oid']) -def _parse_canonical_symbol(doc): +def _parse_canonical_symbol(doc: Any) -> str: """Decode a JSON symbol to Python string.""" symbol = doc['$symbol'] if len(doc) != 1: @@ -651,7 +659,7 @@ def _parse_canonical_symbol(doc): return str(symbol) -def _parse_canonical_code(doc): +def _parse_canonical_code(doc: Any) -> Code: """Decode a JSON code to bson.code.Code.""" for key in doc: if key not in ('$code', '$scope'): @@ -659,7 +667,7 @@ def _parse_canonical_code(doc): return Code(doc['$code'], scope=doc.get('$scope')) -def _parse_canonical_regex(doc): +def _parse_canonical_regex(doc: Any) -> Regex: """Decode a JSON regex to bson.regex.Regex.""" regex = doc['$regularExpression'] if len(doc) != 1: @@ -674,13 +682,13 @@ def _parse_canonical_regex(doc): return Regex(regex['pattern'], opts) -def _parse_canonical_dbref(doc): +def _parse_canonical_dbref(doc: Any) -> DBRef: """Decode a JSON DBRef to bson.dbref.DBRef.""" return DBRef(doc.pop('$ref'), doc.pop('$id'), database=doc.pop('$db', None), **doc) -def _parse_canonical_dbpointer(doc): +def _parse_canonical_dbpointer(doc: Any) -> Any: """Decode a JSON (deprecated) DBPointer to bson.dbref.DBRef.""" dbref = doc['$dbPointer'] if len(doc) != 1: @@ -702,7 +710,7 @@ def _parse_canonical_dbpointer(doc): raise TypeError('Bad $dbPointer, expected a DBRef: %s' % (doc,)) -def _parse_canonical_int32(doc): +def _parse_canonical_int32(doc: Any) -> int: """Decode a JSON int32 to python int.""" i_str = doc['$numberInt'] if len(doc) != 1: @@ -712,7 +720,7 @@ def _parse_canonical_int32(doc): return int(i_str) -def _parse_canonical_int64(doc): +def _parse_canonical_int64(doc: Any) -> Int64: """Decode a JSON int64 to bson.int64.Int64.""" l_str = doc['$numberLong'] if len(doc) != 1: @@ -720,7 +728,7 @@ def _parse_canonical_int64(doc): return Int64(l_str) -def _parse_canonical_double(doc): +def _parse_canonical_double(doc: Any) -> float: """Decode a JSON double to python float.""" d_str = doc['$numberDouble'] if len(doc) != 1: @@ -730,7 +738,7 @@ def _parse_canonical_double(doc): return float(d_str) -def _parse_canonical_decimal128(doc): +def _parse_canonical_decimal128(doc: Any) -> Decimal128: """Decode a JSON decimal128 to bson.decimal128.Decimal128.""" d_str = doc['$numberDecimal'] if len(doc) != 1: @@ -740,7 +748,7 @@ def _parse_canonical_decimal128(doc): return Decimal128(d_str) -def _parse_canonical_minkey(doc): +def _parse_canonical_minkey(doc: Any) -> MinKey: """Decode a JSON MinKey to bson.min_key.MinKey.""" if type(doc['$minKey']) is not int or doc['$minKey'] != 1: raise TypeError('$minKey value must be 1: %s' % (doc,)) @@ -749,7 +757,7 @@ def _parse_canonical_minkey(doc): return MinKey() -def _parse_canonical_maxkey(doc): +def _parse_canonical_maxkey(doc: Any) -> MaxKey: """Decode a JSON MaxKey to bson.max_key.MaxKey.""" if type(doc['$maxKey']) is not int or doc['$maxKey'] != 1: raise TypeError('$maxKey value must be 1: %s', (doc,)) @@ -758,7 +766,7 @@ def _parse_canonical_maxkey(doc): return MaxKey() -def _encode_binary(data, subtype, json_options): +def _encode_binary(data: bytes, subtype: int, json_options: JSONOptions) -> Any: if json_options.json_mode == JSONMode.LEGACY: return SON([ ('$binary', base64.b64encode(data).decode()), @@ -768,7 +776,7 @@ def _encode_binary(data, subtype, json_options): ('subType', "%02x" % subtype)])} -def default(obj, json_options=DEFAULT_JSON_OPTIONS): +def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: # We preserve key order when rendering SON, DBRef, etc. as JSON by # returning a SON for those types instead of a dict. if isinstance(obj, ObjectId): @@ -780,9 +788,10 @@ def default(obj, json_options=DEFAULT_JSON_OPTIONS): DatetimeRepresentation.ISO8601): if not obj.tzinfo: obj = obj.replace(tzinfo=utc) + assert obj.tzinfo is not None if obj >= EPOCH_AWARE: off = obj.tzinfo.utcoffset(obj) - if (off.days, off.seconds, off.microseconds) == (0, 0, 0): + if (off.days, off.seconds, off.microseconds) == (0, 0, 0): # type: ignore tz_string = 'Z' else: tz_string = obj.strftime('%z') diff --git a/bson/max_key.py b/bson/max_key.py index afd7fcb1b3..107dc9dec6 100644 --- a/bson/max_key.py +++ b/bson/max_key.py @@ -14,6 +14,7 @@ """Representation for the MongoDB internal MaxKey type. """ +from typing import Any class MaxKey(object): @@ -22,31 +23,31 @@ class MaxKey(object): _type_marker = 127 - def __getstate__(self): + def __getstate__(self) -> Any: return {} - def __setstate__(self, state): + def __setstate__(self, state: Any) -> None: pass - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: return isinstance(other, MaxKey) - def __hash__(self): + def __hash__(self) -> int: return hash(self._type_marker) - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __le__(self, other): + def __le__(self, other: Any) -> bool: return isinstance(other, MaxKey) - def __lt__(self, dummy): + def __lt__(self, dummy: Any) -> bool: return False - def __ge__(self, dummy): + def __ge__(self, dummy: Any) -> bool: return True - def __gt__(self, other): + def __gt__(self, other: Any) -> bool: return not isinstance(other, MaxKey) def __repr__(self): diff --git a/bson/min_key.py b/bson/min_key.py index bcb7f9e60f..5483eb6cf8 100644 --- a/bson/min_key.py +++ b/bson/min_key.py @@ -14,6 +14,7 @@ """Representation for the MongoDB internal MinKey type. """ +from typing import Any class MinKey(object): @@ -22,31 +23,31 @@ class MinKey(object): _type_marker = 255 - def __getstate__(self): + def __getstate__(self) -> Any: return {} - def __setstate__(self, state): + def __setstate__(self, state: Any) -> None: pass - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: return isinstance(other, MinKey) - def __hash__(self): + def __hash__(self) -> int: return hash(self._type_marker) - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __le__(self, dummy): + def __le__(self, dummy: Any) -> bool: return True - def __lt__(self, other): + def __lt__(self, other: Any) -> bool: return not isinstance(other, MinKey) - def __ge__(self, other): + def __ge__(self, other: Any) -> bool: return isinstance(other, MinKey) - def __gt__(self, dummy): + def __gt__(self, dummy: Any) -> bool: return False def __repr__(self): diff --git a/bson/objectid.py b/bson/objectid.py index faf8910edc..baf1966bce 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -23,23 +23,22 @@ import struct import threading import time - from random import SystemRandom +from typing import Any, NoReturn, Optional, Type, Union from bson.errors import InvalidId from bson.tz_util import utc - _MAX_COUNTER_VALUE = 0xFFFFFF -def _raise_invalid_id(oid): +def _raise_invalid_id(oid: str) -> NoReturn: raise InvalidId( "%r is not a valid ObjectId, it must be a 12-byte input" " or a 24-character hex string" % oid) -def _random_bytes(): +def _random_bytes() -> bytes: """Get the 5-byte random field of an ObjectId.""" return os.urandom(5) @@ -59,7 +58,7 @@ class ObjectId(object): _type_marker = 7 - def __init__(self, oid=None): + def __init__(self, oid: Optional[Union[str, "ObjectId", bytes]] = None) -> None: """Initialize a new ObjectId. An ObjectId is a 12-byte unique identifier consisting of: @@ -105,7 +104,7 @@ def __init__(self, oid=None): self.__validate(oid) @classmethod - def from_datetime(cls, generation_time): + def from_datetime(cls: Type["ObjectId"], generation_time: datetime.datetime) -> "ObjectId": """Create a dummy ObjectId instance with a specific generation time. This method is useful for doing range queries on a field @@ -132,15 +131,16 @@ def from_datetime(cls, generation_time): - `generation_time`: :class:`~datetime.datetime` to be used as the generation time for the resulting ObjectId. """ - if generation_time.utcoffset() is not None: - generation_time = generation_time - generation_time.utcoffset() + offset = generation_time.utcoffset() + if offset is not None: + generation_time = generation_time - offset timestamp = calendar.timegm(generation_time.timetuple()) oid = struct.pack( ">I", int(timestamp)) + b"\x00\x00\x00\x00\x00\x00\x00\x00" return cls(oid) @classmethod - def is_valid(cls, oid): + def is_valid(cls: Type["ObjectId"], oid: Any) -> bool: """Checks if a `oid` string is valid or not. :Parameters: @@ -158,7 +158,7 @@ def is_valid(cls, oid): return False @classmethod - def _random(cls): + def _random(cls) -> bytes: """Generate a 5-byte random number once per process. """ pid = os.getpid() @@ -167,7 +167,7 @@ def _random(cls): cls.__random = _random_bytes() return cls.__random - def __generate(self): + def __generate(self) -> None: """Generate a new value for this ObjectId. """ @@ -184,7 +184,7 @@ def __generate(self): self.__id = oid - def __validate(self, oid): + def __validate(self, oid: Any) -> None: """Validate and use the given id for this ObjectId. Raises TypeError if id is not an instance of @@ -210,13 +210,13 @@ def __validate(self, oid): "not %s" % (type(oid),)) @property - def binary(self): + def binary(self) -> bytes: """12-byte binary representation of this ObjectId. """ return self.__id @property - def generation_time(self): + def generation_time(self) -> datetime.datetime: """A :class:`datetime.datetime` instance representing the time of generation for this :class:`ObjectId`. @@ -227,13 +227,13 @@ def generation_time(self): timestamp = struct.unpack(">I", self.__id[0:4])[0] return datetime.datetime.fromtimestamp(timestamp, utc) - def __getstate__(self): + def __getstate__(self) -> bytes: """return value of object for pickling. needed explicitly because __slots__() defined. """ return self.__id - def __setstate__(self, value): + def __setstate__(self, value: Any) -> None: """explicit state set from pickling """ # Provide backwards compatability with OIDs @@ -250,42 +250,42 @@ def __setstate__(self, value): else: self.__id = oid - def __str__(self): + def __str__(self) -> str: return binascii.hexlify(self.__id).decode() def __repr__(self): return "ObjectId('%s')" % (str(self),) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id == other.binary return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id != other.binary return NotImplemented - def __lt__(self, other): + def __lt__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id < other.binary return NotImplemented - def __le__(self, other): + def __le__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id <= other.binary return NotImplemented - def __gt__(self, other): + def __gt__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id > other.binary return NotImplemented - def __ge__(self, other): + def __ge__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id >= other.binary return NotImplemented - def __hash__(self): + def __hash__(self) -> int: """Get a hash value for this :class:`ObjectId`.""" return hash(self.__id) diff --git a/bson/raw_bson.py b/bson/raw_bson.py index bfe888b6b7..8a3b0cb4fb 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -52,14 +52,16 @@ """ from collections.abc import Mapping as _Mapping +from typing import Any, ItemsView, Iterator, Mapping, Optional, cast -from bson import _raw_to_dict, _get_object_size -from bson.codec_options import ( - DEFAULT_CODEC_OPTIONS as DEFAULT, _RAW_BSON_DOCUMENT_MARKER) +from bson import _get_object_size, _raw_to_dict +from bson.codec_options import _RAW_BSON_DOCUMENT_MARKER +from bson.codec_options import DEFAULT_CODEC_OPTIONS as DEFAULT +from bson.codec_options import CodecOptions from bson.son import SON -class RawBSONDocument(_Mapping): +class RawBSONDocument(Mapping[str, Any]): """Representation for a MongoDB document that provides access to the raw BSON bytes that compose it. @@ -70,7 +72,7 @@ class RawBSONDocument(_Mapping): __slots__ = ('__raw', '__inflated_doc', '__codec_options') _type_marker = _RAW_BSON_DOCUMENT_MARKER - def __init__(self, bson_bytes, codec_options=None): + def __init__(self, bson_bytes: bytes, codec_options: Optional[CodecOptions] = None) -> None: """Create a new :class:`RawBSONDocument` :class:`RawBSONDocument` is a representation of a BSON document that @@ -105,7 +107,7 @@ class from the standard library so it can be used like a read-only `document_class` must be :class:`RawBSONDocument`. """ self.__raw = bson_bytes - self.__inflated_doc = None + self.__inflated_doc: Optional[Mapping[str, Any]] = None # Can't default codec_options to DEFAULT_RAW_BSON_OPTIONS in signature, # it refers to this class RawBSONDocument. if codec_options is None: @@ -119,16 +121,16 @@ class from the standard library so it can be used like a read-only _get_object_size(bson_bytes, 0, len(bson_bytes)) @property - def raw(self): + def raw(self) -> bytes: """The raw BSON bytes composing this document.""" return self.__raw - def items(self): + def items(self) -> ItemsView[str, Any]: """Lazily decode and iterate elements in this document.""" return self.__inflated.items() @property - def __inflated(self): + def __inflated(self) -> Mapping[str, Any]: if self.__inflated_doc is None: # We already validated the object's size when this document was # created, so no need to do that again. @@ -137,16 +139,16 @@ def __inflated(self): self.__raw, self.__codec_options) return self.__inflated_doc - def __getitem__(self, item): + def __getitem__(self, item: str) -> Any: return self.__inflated[item] - def __iter__(self): + def __iter__(self) -> Iterator[str]: return iter(self.__inflated) - def __len__(self): + def __len__(self) -> int: return len(self.__inflated) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, RawBSONDocument): return self.__raw == other.raw return NotImplemented @@ -156,7 +158,7 @@ def __repr__(self): % (self.raw, self.__codec_options)) -def _inflate_bson(bson_bytes, codec_options): +def _inflate_bson(bson_bytes: bytes, codec_options: CodecOptions) -> Mapping[Any, Any]: """Inflates the top level fields of a BSON document. :Parameters: @@ -170,7 +172,7 @@ def _inflate_bson(bson_bytes, codec_options): bson_bytes, 4, len(bson_bytes)-1, codec_options, SON()) -DEFAULT_RAW_BSON_OPTIONS = DEFAULT.with_options(document_class=RawBSONDocument) +DEFAULT_RAW_BSON_OPTIONS: CodecOptions = DEFAULT.with_options(document_class=RawBSONDocument) """The default :class:`~bson.codec_options.CodecOptions` for :class:`RawBSONDocument`. """ diff --git a/bson/regex.py b/bson/regex.py index 5cf097f08c..454aca3cec 100644 --- a/bson/regex.py +++ b/bson/regex.py @@ -16,12 +16,13 @@ """ import re +from typing import Any, Pattern, Type, Union -from bson.son import RE_TYPE from bson._helpers import _getstate_slots, _setstate_slots +from bson.son import RE_TYPE -def str_flags_to_int(str_flags): +def str_flags_to_int(str_flags: str) -> int: flags = 0 if "i" in str_flags: flags |= re.IGNORECASE @@ -49,7 +50,7 @@ class Regex(object): _type_marker = 11 @classmethod - def from_native(cls, regex): + def from_native(cls: Type["Regex"], regex: Pattern[Any]) -> "Regex": """Convert a Python regular expression into a ``Regex`` instance. Note that in Python 3, a regular expression compiled from a @@ -80,7 +81,7 @@ def from_native(cls, regex): return Regex(regex.pattern, regex.flags) - def __init__(self, pattern, flags=0): + def __init__(self, pattern: Union[str, bytes], flags: Union[str, int] = 0) -> None: """BSON regular expression data. This class is useful to store and retrieve regular expressions that are @@ -103,21 +104,21 @@ def __init__(self, pattern, flags=0): raise TypeError( "flags must be a string or int, not %s" % type(flags)) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Regex): return self.pattern == other.pattern and self.flags == other.flags else: return NotImplemented - __hash__ = None + __hash__ = None # type: ignore - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self): return "Regex(%r, %r)" % (self.pattern, self.flags) - def try_compile(self): + def try_compile(self) -> Pattern[Any]: """Compile this :class:`Regex` as a Python regular expression. .. warning:: diff --git a/bson/son.py b/bson/son.py index 5a3210fcdb..7207367f3d 100644 --- a/bson/son.py +++ b/bson/son.py @@ -20,29 +20,35 @@ import copy import re - from collections.abc import Mapping as _Mapping +from typing import (Any, Dict, Iterable, Iterator, List, Mapping, + Optional, Pattern, Tuple, Type, TypeVar, Union) # This sort of sucks, but seems to be as good as it gets... # This is essentially the same as re._pattern_type -RE_TYPE = type(re.compile("")) +RE_TYPE: Type[Pattern[Any]] = type(re.compile("")) + +_Key = TypeVar("_Key", bound=str) +_Value = TypeVar("_Value") +_T = TypeVar("_T") -class SON(dict): +class SON(Dict[_Key, _Value]): """SON data. A subclass of dict that maintains ordering of keys and provides a few extra niceties for dealing with SON. SON provides an API similar to collections.OrderedDict. """ + __keys: List[Any] - def __init__(self, data=None, **kwargs): + def __init__(self, data: Optional[Union[Mapping[_Key, _Value], Iterable[Tuple[_Key, _Value]]]] = None, **kwargs: Any) -> None: self.__keys = [] dict.__init__(self) self.update(data) self.update(kwargs) - def __new__(cls, *args, **kwargs): + def __new__(cls: Type["SON[_Key, _Value]"], *args: Any, **kwargs: Any) -> "SON[_Key, _Value]": instance = super(SON, cls).__new__(cls, *args, **kwargs) instance.__keys = [] return instance @@ -53,53 +59,53 @@ def __repr__(self): result.append("(%r, %r)" % (key, self[key])) return "SON([%s])" % ", ".join(result) - def __setitem__(self, key, value): + def __setitem__(self, key: _Key, value: _Value) -> None: if key not in self.__keys: self.__keys.append(key) dict.__setitem__(self, key, value) - def __delitem__(self, key): + def __delitem__(self, key: _Key) -> None: self.__keys.remove(key) dict.__delitem__(self, key) - def copy(self): - other = SON() + def copy(self) -> "SON[_Key, _Value]": + other: SON[_Key, _Value] = SON() other.update(self) return other # TODO this is all from UserDict.DictMixin. it could probably be made more # efficient. # second level definitions support higher levels - def __iter__(self): + def __iter__(self) -> Iterator[_Key]: for k in self.__keys: yield k - def has_key(self, key): + def has_key(self, key: _Key) -> bool: return key in self.__keys - def iterkeys(self): + def iterkeys(self) -> Iterator[_Key]: return self.__iter__() # fourth level uses definitions from lower levels - def itervalues(self): + def itervalues(self) -> Iterator[_Value]: for _, v in self.items(): yield v - def values(self): + def values(self) -> List[_Value]: # type: ignore[override] return [v for _, v in self.items()] - def clear(self): + def clear(self) -> None: self.__keys = [] super(SON, self).clear() - def setdefault(self, key, default=None): + def setdefault(self, key: _Key, default: _Value) -> _Value: # type: ignore[override] try: return self[key] except KeyError: self[key] = default return default - def pop(self, key, *args): + def pop(self, key: _Key, *args: Union[_Value, _T]) -> Union[_Value, _T]: if len(args) > 1: raise TypeError("pop expected at most 2 arguments, got "\ + repr(1 + len(args))) @@ -112,7 +118,7 @@ def pop(self, key, *args): del self[key] return value - def popitem(self): + def popitem(self) -> Tuple[_Key, _Value]: try: k, v = next(iter(self.items())) except StopIteration: @@ -120,7 +126,7 @@ def popitem(self): del self[k] return (k, v) - def update(self, other=None, **kwargs): + def update(self, other: Optional[Any] = None, **kwargs: _Value) -> None: # type: ignore[override] # Make progressively weaker assumptions about "other" if other is None: pass @@ -136,13 +142,13 @@ def update(self, other=None, **kwargs): if kwargs: self.update(kwargs) - def get(self, key, default=None): + def get(self, key: _Key, default: Optional[Union[_Value, _T]] = None) -> Union[_Value, _T, None]: # type: ignore[override] try: return self[key] except KeyError: return default - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: """Comparison to another SON is order-sensitive while comparison to a regular dictionary is order-insensitive. """ @@ -151,20 +157,20 @@ def __eq__(self, other): list(other.items()) return self.to_dict() == other - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __len__(self): + def __len__(self) -> int: return len(self.__keys) - def to_dict(self): + def to_dict(self) -> Dict[_Key, _Value]: """Convert a SON document to a normal Python dictionary instance. This is trickier than just *dict(...)* because it needs to be recursive. """ - def transform_value(value): + def transform_value(value: Any) -> Any: if isinstance(value, list): return [transform_value(v) for v in value] elif isinstance(value, _Mapping): @@ -176,11 +182,11 @@ def transform_value(value): return transform_value(dict(self)) - def __deepcopy__(self, memo): - out = SON() + def __deepcopy__(self, memo: Dict[int, "SON[_Key, _Value]"]) -> "SON[_Key, _Value]": + out: SON[_Key, _Value] = SON() val_id = id(self) if val_id in memo: - return memo.get(val_id) + return memo[val_id] memo[val_id] = out for k, v in self.items(): if not isinstance(v, RE_TYPE): diff --git a/bson/timestamp.py b/bson/timestamp.py index 69c061d2a5..93c7540fd0 100644 --- a/bson/timestamp.py +++ b/bson/timestamp.py @@ -17,9 +17,10 @@ import calendar import datetime +from typing import Any, Union -from bson.tz_util import utc from bson._helpers import _getstate_slots, _setstate_slots +from bson.tz_util import utc UPPERBOUND = 4294967296 @@ -34,7 +35,7 @@ class Timestamp(object): _type_marker = 17 - def __init__(self, time, inc): + def __init__(self, time: Union[datetime.datetime, int], inc: int) -> None: """Create a new :class:`Timestamp`. This class is only for use with the MongoDB opLog. If you need @@ -53,8 +54,9 @@ def __init__(self, time, inc): - `inc`: the incrementing counter """ if isinstance(time, datetime.datetime): - if time.utcoffset() is not None: - time = time - time.utcoffset() + offset = time.utcoffset() + if offset is not None: + time = time - offset time = int(calendar.timegm(time.timetuple())) if not isinstance(time, int): raise TypeError("time must be an instance of int") @@ -69,45 +71,45 @@ def __init__(self, time, inc): self.__inc = inc @property - def time(self): + def time(self) -> int: """Get the time portion of this :class:`Timestamp`. """ return self.__time @property - def inc(self): + def inc(self) -> int: """Get the inc portion of this :class:`Timestamp`. """ return self.__inc - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.__time == other.time and self.__inc == other.inc) else: return NotImplemented - def __hash__(self): + def __hash__(self) -> int: return hash(self.time) ^ hash(self.inc) - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __lt__(self, other): + def __lt__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) < (other.time, other.inc) return NotImplemented - def __le__(self, other): + def __le__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) <= (other.time, other.inc) return NotImplemented - def __gt__(self, other): + def __gt__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) > (other.time, other.inc) return NotImplemented - def __ge__(self, other): + def __ge__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) >= (other.time, other.inc) return NotImplemented @@ -115,7 +117,7 @@ def __ge__(self, other): def __repr__(self): return "Timestamp(%s, %s)" % (self.__time, self.__inc) - def as_datetime(self): + def as_datetime(self) -> datetime.datetime: """Return a :class:`~datetime.datetime` instance corresponding to the time portion of this :class:`Timestamp`. diff --git a/bson/tz_util.py b/bson/tz_util.py index 6ec918fb2b..43ae52ccff 100644 --- a/bson/tz_util.py +++ b/bson/tz_util.py @@ -14,10 +14,10 @@ """Timezone related utilities for BSON.""" -from datetime import (timedelta, - tzinfo) +from datetime import datetime, timedelta, tzinfo +from typing import Any, Optional, Tuple, Union -ZERO = timedelta(0) +ZERO: timedelta = timedelta(0) class FixedOffset(tzinfo): @@ -28,25 +28,25 @@ class FixedOffset(tzinfo): Defining __getinitargs__ enables pickling / copying. """ - def __init__(self, offset, name): + def __init__(self, offset: Union[float, timedelta], name: str) -> None: if isinstance(offset, timedelta): self.__offset = offset else: self.__offset = timedelta(minutes=offset) self.__name = name - def __getinitargs__(self): + def __getinitargs__(self) -> Tuple[timedelta, str]: return self.__offset, self.__name - def utcoffset(self, dt): + def utcoffset(self, dt: Optional[datetime]) -> timedelta: return self.__offset - def tzname(self, dt): + def tzname(self, dt: Optional[datetime]) -> str: return self.__name - def dst(self, dt): + def dst(self, dt: Optional[datetime]) -> timedelta: return ZERO -utc = FixedOffset(0, "UTC") +utc: FixedOffset = FixedOffset(0, "UTC") """Fixed offset timezone representing UTC.""" diff --git a/doc/changelog.rst b/doc/changelog.rst index 062104bc8f..4ff9cd781d 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,7 +4,7 @@ Changelog Changes in Version 4.1 ---------------------- -PyMongo 4.0 brings a number of improvements including: +PyMongo 4.1 brings a number of improvements including: - :meth:`pymongo.collection.Collection.update_one`, :meth:`pymongo.collection.Collection.update_many`, @@ -21,6 +21,8 @@ PyMongo 4.0 brings a number of improvements including: $merge and $out executing on secondaries on MongoDB >=5.0. aggregate() now always obeys the collection's :attr:`read_preference` on MongoDB >= 5.0. +- :meth:`gridfs.GridOut.seek` now returns the new position in the file, to + conform to the behavior of :meth:`io.IOBase.seek`. Changes in Version 4.0 diff --git a/gridfs/__init__.py b/gridfs/__init__.py index c36d921e8c..02c42d6eb6 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -21,25 +21,28 @@ """ from collections import abc +from typing import Any, List, Mapping, Optional, cast -from pymongo import (ASCENDING, - DESCENDING) +from bson.objectid import ObjectId +from gridfs.errors import NoFile +from gridfs.grid_file import (DEFAULT_CHUNK_SIZE, GridIn, GridOut, + GridOutCursor, _clear_entity_type_registry, + _disallow_transactions) +from pymongo import ASCENDING, DESCENDING +from pymongo.client_session import ClientSession +from pymongo.collation import Collation +from pymongo.collection import Collection from pymongo.common import UNAUTHORIZED_CODES, validate_string from pymongo.database import Database from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.read_preferences import _ServerMode +from pymongo.write_concern import WriteConcern -from gridfs.errors import NoFile -from gridfs.grid_file import (GridIn, - GridOut, - GridOutCursor, - DEFAULT_CHUNK_SIZE, - _clear_entity_type_registry, - _disallow_transactions) class GridFS(object): """An instance of GridFS on top of a single Database. """ - def __init__(self, database, collection="fs"): + def __init__(self, database: Database, collection: str = "fs"): """Create a new instance of :class:`GridFS`. Raises :class:`TypeError` if `database` is not an instance of @@ -82,7 +85,7 @@ def __init__(self, database, collection="fs"): self.__files = self.__collection.files self.__chunks = self.__collection.chunks - def new_file(self, **kwargs): + def new_file(self, **kwargs: Any) -> GridIn: """Create a new file in GridFS. Returns a new :class:`~gridfs.grid_file.GridIn` instance to @@ -98,7 +101,7 @@ def new_file(self, **kwargs): """ return GridIn(self.__collection, **kwargs) - def put(self, data, **kwargs): + def put(self, data: Any, **kwargs: Any) -> Any: """Put data in GridFS as a new file. Equivalent to doing:: @@ -136,7 +139,7 @@ def put(self, data, **kwargs): return grid_file._id - def get(self, file_id, session=None): + def get(self, file_id: Any, session: Optional[ClientSession] = None) -> GridOut: """Get a file from GridFS by ``"_id"``. Returns an instance of :class:`~gridfs.grid_file.GridOut`, @@ -156,7 +159,7 @@ def get(self, file_id, session=None): gout._ensure_file() return gout - def get_version(self, filename=None, version=-1, session=None, **kwargs): + def get_version(self, filename: Optional[str] = None, version: Optional[int] = -1, session: Optional[ClientSession] = None, **kwargs: Any) -> GridOut: """Get a file from GridFS by ``"filename"`` or metadata fields. Returns a version of the file in GridFS whose filename matches @@ -197,6 +200,8 @@ def get_version(self, filename=None, version=-1, session=None, **kwargs): _disallow_transactions(session) cursor = self.__files.find(query, session=session) + if version is None: + version = -1 if version < 0: skip = abs(version) - 1 cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) @@ -209,7 +214,7 @@ def get_version(self, filename=None, version=-1, session=None, **kwargs): except StopIteration: raise NoFile("no version %d for filename %r" % (version, filename)) - def get_last_version(self, filename=None, session=None, **kwargs): + def get_last_version(self, filename: Optional[str] = None, session: Optional[ClientSession] = None, **kwargs: Any) -> GridOut: """Get the most recent version of a file in GridFS by ``"filename"`` or metadata fields. @@ -228,7 +233,7 @@ def get_last_version(self, filename=None, session=None, **kwargs): return self.get_version(filename=filename, session=session, **kwargs) # TODO add optional safe mode for chunk removal? - def delete(self, file_id, session=None): + def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: """Delete a file from GridFS by ``"_id"``. Deletes all data belonging to the file with ``"_id"``: @@ -257,7 +262,7 @@ def delete(self, file_id, session=None): self.__files.delete_one({"_id": file_id}, session=session) self.__chunks.delete_many({"files_id": file_id}, session=session) - def list(self, session=None): + def list(self, session: Optional[ClientSession] = None) -> List[str]: """List the names of all files stored in this instance of :class:`GridFS`. @@ -278,7 +283,7 @@ def list(self, session=None): name for name in self.__files.distinct("filename", session=session) if name is not None] - def find_one(self, filter=None, session=None, *args, **kwargs): + def find_one(self, filter: Optional[Any] = None, session: Optional[ClientSession] = None, *args: Any, **kwargs: Any) -> Optional[GridOut]: """Get a single file from gridfs. All arguments to :meth:`find` are also valid arguments for @@ -311,7 +316,7 @@ def find_one(self, filter=None, session=None, *args, **kwargs): return None - def find(self, *args, **kwargs): + def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: """Query GridFS for files. Returns a cursor that iterates across files matching @@ -372,7 +377,7 @@ def find(self, *args, **kwargs): """ return GridOutCursor(self.__collection, *args, **kwargs) - def exists(self, document_or_id=None, session=None, **kwargs): + def exists(self, document_or_id: Optional[Any] = None, session: Optional[ClientSession] = None, **kwargs: Any) -> bool: """Check if a file exists in this instance of :class:`GridFS`. The file to check for can be specified by the value of its @@ -422,9 +427,10 @@ def exists(self, document_or_id=None, session=None, **kwargs): class GridFSBucket(object): """An instance of GridFS on top of a single Database.""" - def __init__(self, db, bucket_name="fs", - chunk_size_bytes=DEFAULT_CHUNK_SIZE, write_concern=None, - read_preference=None): + def __init__(self, db: Database, bucket_name: str = "fs", + chunk_size_bytes: int = DEFAULT_CHUNK_SIZE, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None) -> None: """Create a new instance of :class:`GridFSBucket`. Raises :exc:`TypeError` if `database` is not an instance of @@ -470,18 +476,19 @@ def __init__(self, db, bucket_name="fs", self._bucket_name = bucket_name self._collection = db[bucket_name] - self._chunks = self._collection.chunks.with_options( + self._chunks: Collection = self._collection.chunks.with_options( write_concern=write_concern, read_preference=read_preference) - self._files = self._collection.files.with_options( + self._files: Collection = self._collection.files.with_options( write_concern=write_concern, read_preference=read_preference) self._chunk_size_bytes = chunk_size_bytes - def open_upload_stream(self, filename, chunk_size_bytes=None, - metadata=None, session=None): + def open_upload_stream(self, filename: str, chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None) -> GridIn: """Opens a Stream that the application can write the contents of the file to. @@ -528,8 +535,9 @@ def open_upload_stream(self, filename, chunk_size_bytes=None, return GridIn(self._collection, session=session, **opts) def open_upload_stream_with_id( - self, file_id, filename, chunk_size_bytes=None, metadata=None, - session=None): + self, file_id: Any, filename: str, chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None) -> GridIn: """Opens a Stream that the application can write the contents of the file to. @@ -580,8 +588,10 @@ def open_upload_stream_with_id( return GridIn(self._collection, session=session, **opts) - def upload_from_stream(self, filename, source, chunk_size_bytes=None, - metadata=None, session=None): + def upload_from_stream(self, filename: str, source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None) -> ObjectId: """Uploads a user file to a GridFS bucket. Reads the contents of the user file from `source` and uploads @@ -621,11 +631,12 @@ def upload_from_stream(self, filename, source, chunk_size_bytes=None, filename, chunk_size_bytes, metadata, session=session) as gin: gin.write(source) - return gin._id + return cast(ObjectId, gin._id) - def upload_from_stream_with_id(self, file_id, filename, source, - chunk_size_bytes=None, metadata=None, - session=None): + def upload_from_stream_with_id(self, file_id: Any, filename: str, source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None) -> None: """Uploads a user file to a GridFS bucket with a custom file id. Reads the contents of the user file from `source` and uploads @@ -667,7 +678,7 @@ def upload_from_stream_with_id(self, file_id, filename, source, session=session) as gin: gin.write(source) - def open_download_stream(self, file_id, session=None): + def open_download_stream(self, file_id: Any, session: Optional[ClientSession] = None) -> GridOut: """Opens a Stream from which the application can read the contents of the stored file specified by file_id. @@ -698,7 +709,7 @@ def open_download_stream(self, file_id, session=None): gout._ensure_file() return gout - def download_to_stream(self, file_id, destination, session=None): + def download_to_stream(self, file_id: Any, destination: Any, session: Optional[ClientSession] = None) -> None: """Downloads the contents of the stored file specified by file_id and writes the contents to `destination`. @@ -729,7 +740,7 @@ def download_to_stream(self, file_id, destination, session=None): for chunk in gout: destination.write(chunk) - def delete(self, file_id, session=None): + def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: """Given an file_id, delete this stored file's files collection document and associated chunks from a GridFS bucket. @@ -758,7 +769,7 @@ def delete(self, file_id, session=None): raise NoFile( "no file could be deleted because none matched %s" % file_id) - def find(self, *args, **kwargs): + def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: """Find and return the files collection documents that match ``filter`` Returns a cursor that iterates across files matching @@ -806,7 +817,7 @@ def find(self, *args, **kwargs): """ return GridOutCursor(self._collection, *args, **kwargs) - def open_download_stream_by_name(self, filename, revision=-1, session=None): + def open_download_stream_by_name(self, filename: str, revision: int = -1, session: Optional[ClientSession] = None) -> GridOut: """Opens a Stream from which the application can read the contents of `filename` and optional `revision`. @@ -861,8 +872,9 @@ def open_download_stream_by_name(self, filename, revision=-1, session=None): raise NoFile( "no version %d for filename %r" % (revision, filename)) - def download_to_stream_by_name(self, filename, destination, revision=-1, - session=None): + def download_to_stream_by_name(self, filename: str, destination: Any, + revision: int = -1, + session: Optional[ClientSession] = None) -> None: """Write the contents of `filename` (with optional `revision`) to `destination`. @@ -905,7 +917,7 @@ def download_to_stream_by_name(self, filename, destination, revision=-1, for chunk in gout: destination.write(chunk) - def rename(self, file_id, new_filename, session=None): + def rename(self, file_id: Any, new_filename: str, session: Optional[ClientSession] = None) -> None: """Renames the stored file with the specified file_id. For example:: diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index fc01d88d24..9353a97a1c 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -17,32 +17,25 @@ import io import math import os +from typing import Any, Iterable, List, Mapping, Optional, cast -from bson.int64 import Int64 -from bson.son import SON from bson.binary import Binary +from bson.int64 import Int64 from bson.objectid import ObjectId +from bson.son import SON +from gridfs.errors import CorruptGridFile, FileExists, NoFile from pymongo import ASCENDING +from pymongo.client_session import ClientSession from pymongo.collection import Collection from pymongo.cursor import Cursor -from pymongo.errors import (ConfigurationError, - CursorNotFound, - DuplicateKeyError, - InvalidOperation, +from pymongo.errors import (ConfigurationError, CursorNotFound, + DuplicateKeyError, InvalidOperation, OperationFailure) from pymongo.read_preferences import ReadPreference -from gridfs.errors import CorruptGridFile, FileExists, NoFile - -try: - _SEEK_SET = os.SEEK_SET - _SEEK_CUR = os.SEEK_CUR - _SEEK_END = os.SEEK_END -# before 2.5 -except AttributeError: - _SEEK_SET = 0 - _SEEK_CUR = 1 - _SEEK_END = 2 +_SEEK_SET = os.SEEK_SET +_SEEK_CUR = os.SEEK_CUR +_SEEK_END = os.SEEK_END EMPTY = b"" NEWLN = b"\n" @@ -51,14 +44,14 @@ # Slightly under a power of 2, to work well with server's record allocations. DEFAULT_CHUNK_SIZE = 255 * 1024 -_C_INDEX = SON([("files_id", ASCENDING), ("n", ASCENDING)]) -_F_INDEX = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)]) +_C_INDEX: SON[str, Any] = SON([("files_id", ASCENDING), ("n", ASCENDING)]) +_F_INDEX: SON[str, Any] = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)]) -def _grid_in_property(field_name, docstring, read_only=False, - closed_only=False): +def _grid_in_property(field_name: str, docstring: str, read_only: Optional[bool] = False, + closed_only: Optional[bool] = False) -> Any: """Create a GridIn property.""" - def getter(self): + def getter(self: Any) -> Any: if closed_only and not self._closed: raise AttributeError("can only get %r on a closed file" % field_name) @@ -67,7 +60,7 @@ def getter(self): return self._file.get(field_name, 0) return self._file.get(field_name, None) - def setter(self, value): + def setter(self: Any, value: Any) -> Any: if self._closed: self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {field_name: value}}) @@ -85,9 +78,9 @@ def setter(self, value): return property(getter, doc=docstring) -def _grid_out_property(field_name, docstring): +def _grid_out_property(field_name: str, docstring: str) -> Any: """Create a GridOut property.""" - def getter(self): + def getter(self: Any) -> Any: self._ensure_file() # Protect against PHP-237 @@ -99,13 +92,13 @@ def getter(self): return property(getter, doc=docstring) -def _clear_entity_type_registry(entity, **kwargs): +def _clear_entity_type_registry(entity: Any, **kwargs: Any) -> Any: """Clear the given database/collection object's type registry.""" codecopts = entity.codec_options.with_options(type_registry=None) return entity.with_options(codec_options=codecopts, **kwargs) -def _disallow_transactions(session): +def _disallow_transactions(session: Optional[ClientSession]) -> None: if session and session.in_transaction: raise InvalidOperation( 'GridFS does not support multi-document transactions') @@ -114,7 +107,7 @@ def _disallow_transactions(session): class GridIn(object): """Class to write data to GridFS. """ - def __init__(self, root_collection, session=None, **kwargs): + def __init__(self, root_collection: Collection, session: Optional[ClientSession] = None, **kwargs: Any) -> None: """Write a file to GridFS Application developers should generally not need to @@ -150,7 +143,7 @@ def __init__(self, root_collection, session=None, **kwargs): - `session` (optional): a :class:`~pymongo.client_session.ClientSession` to use for all commands - - `**kwargs` (optional): file level options (see above) + - `**kwargs: Any` (optional): file level options (see above) .. versionchanged:: 4.0 Removed the `disable_md5` parameter. See @@ -197,7 +190,7 @@ def __init__(self, root_collection, session=None, **kwargs): object.__setattr__(self, "_closed", False) object.__setattr__(self, "_ensured_index", False) - def __create_index(self, collection, index_key, unique): + def __create_index(self, collection: Collection, index_key: Any, unique: bool) -> None: doc = collection.find_one(projection={"_id": 1}, session=self._session) if doc is None: try: @@ -209,14 +202,14 @@ def __create_index(self, collection, index_key, unique): collection.create_index( index_key.items(), unique=unique, session=self._session) - def __ensure_indexes(self): + def __ensure_indexes(self) -> None: if not object.__getattribute__(self, "_ensured_index"): _disallow_transactions(self._session) self.__create_index(self._coll.files, _F_INDEX, False) self.__create_index(self._coll.chunks, _C_INDEX, True) object.__setattr__(self, "_ensured_index", True) - def abort(self): + def abort(self) -> None: """Remove all chunks/files that may have been uploaded and close. """ self._coll.chunks.delete_many( @@ -226,33 +219,36 @@ def abort(self): object.__setattr__(self, "_closed", True) @property - def closed(self): + def closed(self) -> bool: """Is this file closed? """ return self._closed - _id = _grid_in_property("_id", "The ``'_id'`` value for this file.", + _id: Any = _grid_in_property("_id", "The ``'_id'`` value for this file.", read_only=True) - filename = _grid_in_property("filename", "Name of this file.") - name = _grid_in_property("filename", "Alias for `filename`.") - content_type = _grid_in_property("contentType", "Mime-type for this file.") - length = _grid_in_property("length", "Length (in bytes) of this file.", + filename: Optional[str] = _grid_in_property("filename", "Name of this file.") + name: Optional[str] = _grid_in_property("filename", "Alias for `filename`.") + content_type: Optional[str] = _grid_in_property("contentType", "Mime-type for this file.") + length: int = _grid_in_property("length", "Length (in bytes) of this file.", closed_only=True) - chunk_size = _grid_in_property("chunkSize", "Chunk size for this file.", + chunk_size: int = _grid_in_property("chunkSize", "Chunk size for this file.", read_only=True) - upload_date = _grid_in_property("uploadDate", + upload_date: datetime.datetime = _grid_in_property("uploadDate", "Date that this file was uploaded.", closed_only=True) - md5 = _grid_in_property("md5", "MD5 of the contents of this file " + md5: Optional[str] = _grid_in_property("md5", "MD5 of the contents of this file " "if an md5 sum was created.", closed_only=True) - def __getattr__(self, name): + _buffer: io.BytesIO + _closed: bool + + def __getattr__(self, name: str) -> Any: if name in self._file: return self._file[name] raise AttributeError("GridIn object has no attribute '%s'" % name) - def __setattr__(self, name, value): + def __setattr__(self, name: str, value: Any) -> None: # For properties of this instance like _buffer, or descriptors set on # the class like filename, use regular __setattr__ if name in self.__dict__ or name in self.__class__.__dict__: @@ -266,7 +262,7 @@ def __setattr__(self, name, value): self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) - def __flush_data(self, data): + def __flush_data(self, data: Any) -> None: """Flush `data` to a chunk. """ self.__ensure_indexes() @@ -285,14 +281,14 @@ def __flush_data(self, data): self._chunk_number += 1 self._position += len(data) - def __flush_buffer(self): + def __flush_buffer(self) -> None: """Flush the buffer contents out to a chunk. """ self.__flush_data(self._buffer.getvalue()) self._buffer.close() self._buffer = io.BytesIO() - def __flush(self): + def __flush(self) -> Any: """Flush the file to the database. """ try: @@ -306,11 +302,11 @@ def __flush(self): except DuplicateKeyError: self._raise_file_exists(self._id) - def _raise_file_exists(self, file_id): + def _raise_file_exists(self, file_id: Any) -> None: """Raise a FileExists exception for the given file_id.""" raise FileExists("file with _id %r already exists" % file_id) - def close(self): + def close(self) -> None: """Flush the file and close it. A closed file cannot be written any more. Calling @@ -320,16 +316,16 @@ def close(self): self.__flush() object.__setattr__(self, "_closed", True) - def read(self, size=-1): + def read(self, size: Optional[int] = -1) -> None: raise io.UnsupportedOperation('read') - def readable(self): + def readable(self) -> bool: return False - def seekable(self): + def seekable(self)-> bool: return False - def write(self, data): + def write(self, data: Any) -> None: """Write data to the file. There is no return value. `data` can be either a string of bytes or a file-like object @@ -387,7 +383,7 @@ def write(self, data): to_write = read(self.chunk_size) self._buffer.write(to_write) - def writelines(self, sequence): + def writelines(self, sequence: Iterable[Any]) -> None: """Write a sequence of strings to the file. Does not add seperators. @@ -395,15 +391,15 @@ def writelines(self, sequence): for line in sequence: self.write(line) - def writeable(self): + def writeable(self) -> bool: return True - def __enter__(self): + def __enter__(self) -> "GridIn": """Support for the context manager protocol. """ return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: """Support for the context manager protocol. Close the file and allow exceptions to propagate. @@ -417,8 +413,9 @@ def __exit__(self, exc_type, exc_val, exc_tb): class GridOut(io.IOBase): """Class to read data out of GridFS. """ - def __init__(self, root_collection, file_id=None, file_document=None, - session=None): + def __init__(self, root_collection: Collection, file_id: Optional[int] = None, + file_document: Optional[Any] = None, + session: Optional[ClientSession] = None) -> None: """Read a file from GridFS Application developers should generally not need to @@ -469,20 +466,23 @@ def __init__(self, root_collection, file_id=None, file_document=None, self._file = file_document self._session = session - _id = _grid_out_property("_id", "The ``'_id'`` value for this file.") - filename = _grid_out_property("filename", "Name of this file.") - name = _grid_out_property("filename", "Alias for `filename`.") - content_type = _grid_out_property("contentType", "Mime-type for this file.") - length = _grid_out_property("length", "Length (in bytes) of this file.") - chunk_size = _grid_out_property("chunkSize", "Chunk size for this file.") - upload_date = _grid_out_property("uploadDate", + _id: Any = _grid_out_property("_id", "The ``'_id'`` value for this file.") + filename: str = _grid_out_property("filename", "Name of this file.") + name: str = _grid_out_property("filename", "Alias for `filename`.") + content_type: Optional[str] = _grid_out_property("contentType", "Mime-type for this file.") + length: int = _grid_out_property("length", "Length (in bytes) of this file.") + chunk_size: int = _grid_out_property("chunkSize", "Chunk size for this file.") + upload_date: datetime.datetime = _grid_out_property("uploadDate", "Date that this file was first uploaded.") - aliases = _grid_out_property("aliases", "List of aliases for this file.") - metadata = _grid_out_property("metadata", "Metadata attached to this file.") - md5 = _grid_out_property("md5", "MD5 of the contents of this file " + aliases: Optional[List[str]] = _grid_out_property("aliases", "List of aliases for this file.") + metadata: Optional[Mapping[str, Any]] = _grid_out_property("metadata", "Metadata attached to this file.") + md5: Optional[str] = _grid_out_property("md5", "MD5 of the contents of this file " "if an md5 sum was created.") - def _ensure_file(self): + _file: Any + __chunk_iter: Any + + def _ensure_file(self) -> None: if not self._file: _disallow_transactions(self._session) self._file = self.__files.find_one({"_id": self.__file_id}, @@ -491,16 +491,16 @@ def _ensure_file(self): raise NoFile("no file in gridfs collection %r with _id %r" % (self.__files, self.__file_id)) - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: self._ensure_file() if name in self._file: return self._file[name] raise AttributeError("GridOut object has no attribute '%s'" % name) - def readable(self): + def readable(self) -> bool: return True - def readchunk(self): + def readchunk(self) -> bytes: """Reads a chunk at a time. If the current position is within a chunk the remainder of the chunk is returned. """ @@ -526,7 +526,7 @@ def readchunk(self): self.__buffer = EMPTY return chunk_data - def read(self, size=-1): + def read(self, size: int = -1) -> bytes: """Read at most `size` bytes from the file (less if there isn't enough data). @@ -572,7 +572,7 @@ def read(self, size=-1): data.seek(0) return data.read(size) - def readline(self, size=-1): + def readline(self, size: int = -1) -> bytes: # type: ignore[override] """Read one line or up to `size` bytes from the file. :Parameters: @@ -606,12 +606,12 @@ def readline(self, size=-1): data.seek(0) return data.read(size) - def tell(self): + def tell(self) -> int: """Return the current position of this file. """ return self.__position - def seek(self, pos, whence=_SEEK_SET): + def seek(self, pos: int, whence: int = _SEEK_SET) -> int: """Set the current position of this file. :Parameters: @@ -622,6 +622,10 @@ def seek(self, pos, whence=_SEEK_SET): positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative to the current position, :attr:`os.SEEK_END` (``2``) to seek relative to the file's end. + + .. versionchanged:: 4.1 + The method now returns the new position in the file, to + conform to the behavior of :meth:`io.IOBase.seek`. """ if whence == _SEEK_SET: new_pos = pos @@ -637,18 +641,19 @@ def seek(self, pos, whence=_SEEK_SET): # Optimization, continue using the same buffer and chunk iterator. if new_pos == self.__position: - return + return new_pos self.__position = new_pos self.__buffer = EMPTY if self.__chunk_iter: self.__chunk_iter.close() self.__chunk_iter = None + return new_pos - def seekable(self): + def seekable(self) -> bool: return True - def __iter__(self): + def __iter__(self) -> "GridOut": """Return an iterator over all of this file's data. The iterator will return lines (delimited by ``b'\\n'``) of @@ -669,46 +674,46 @@ def __iter__(self): """ return self - def close(self): + def close(self) -> None: """Make GridOut more generically file-like.""" if self.__chunk_iter: self.__chunk_iter.close() self.__chunk_iter = None super().close() - def write(self, value): + def write(self, value: Any) -> None: raise io.UnsupportedOperation('write') - def writelines(self, lines): + def writelines(self, lines: Any) -> None: raise io.UnsupportedOperation('writelines') - def writable(self): + def writable(self) -> bool: return False - def __enter__(self): + def __enter__(self) -> "GridOut": """Makes it possible to use :class:`GridOut` files with the context manager protocol. """ return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: """Makes it possible to use :class:`GridOut` files with the context manager protocol. """ self.close() return False - def fileno(self): + def fileno(self) -> int: raise io.UnsupportedOperation('fileno') - def flush(self): + def flush(self) -> None: # GridOut is read-only, so flush does nothing. pass - def isatty(self): + def isatty(self) -> bool: return False - def truncate(self, size=None): + def truncate(self, size: Optional[int] = None) -> int: # See https://docs.python.org/3/library/io.html#io.IOBase.writable # for why truncate has to raise. raise io.UnsupportedOperation('truncate') @@ -716,7 +721,7 @@ def truncate(self, size=None): # Override IOBase.__del__ otherwise it will lead to __getattr__ on # __IOBase_closed which calls _ensure_file and potentially performs I/O. # We cannot do I/O in __del__ since it can lead to a deadlock. - def __del__(self): + def __del__(self) -> None: pass @@ -726,7 +731,7 @@ class _GridOutChunkIterator(object): Raises CorruptGridFile when encountering any truncated, missing, or extra chunk in a file. """ - def __init__(self, grid_out, chunks, session, next_chunk): + def __init__(self, grid_out: GridOut, chunks: Collection, session: Optional[ClientSession], next_chunk: Any) -> None: self._id = grid_out._id self._chunk_size = int(grid_out.chunk_size) self._length = int(grid_out.length) @@ -736,15 +741,17 @@ def __init__(self, grid_out, chunks, session, next_chunk): self._num_chunks = math.ceil(float(self._length) / self._chunk_size) self._cursor = None - def expected_chunk_length(self, chunk_n): + _cursor: Optional[Cursor] + + def expected_chunk_length(self, chunk_n: int) -> int: if chunk_n < self._num_chunks - 1: return self._chunk_size return self._length - (self._chunk_size * (self._num_chunks - 1)) - def __iter__(self): + def __iter__(self) -> "_GridOutChunkIterator": return self - def _create_cursor(self): + def _create_cursor(self) -> None: filter = {"files_id": self._id} if self._next_chunk > 0: filter["n"] = {"$gte": self._next_chunk} @@ -752,7 +759,7 @@ def _create_cursor(self): self._cursor = self._chunks.find(filter, sort=[("n", 1)], session=self._session) - def _next_with_retry(self): + def _next_with_retry(self) -> Mapping[str, Any]: """Return the next chunk and retry once on CursorNotFound. We retry on CursorNotFound to maintain backwards compatibility in @@ -761,7 +768,7 @@ def _next_with_retry(self): """ if self._cursor is None: self._create_cursor() - + assert self._cursor is not None try: return self._cursor.next() except CursorNotFound: @@ -769,7 +776,7 @@ def _next_with_retry(self): self._create_cursor() return self._cursor.next() - def next(self): + def next(self) -> Mapping[str, Any]: try: chunk = self._next_with_retry() except StopIteration: @@ -804,20 +811,20 @@ def next(self): __next__ = next - def close(self): + def close(self) -> None: if self._cursor: self._cursor.close() self._cursor = None class GridOutIterator(object): - def __init__(self, grid_out, chunks, session): + def __init__(self, grid_out: GridOut, chunks: Collection, session: ClientSession): self.__chunk_iter = _GridOutChunkIterator(grid_out, chunks, session, 0) - def __iter__(self): + def __iter__(self) -> "GridOutIterator": return self - def next(self): + def next(self) -> bytes: chunk = self.__chunk_iter.next() return bytes(chunk["data"]) @@ -828,9 +835,13 @@ class GridOutCursor(Cursor): """A cursor / iterator for returning GridOut objects as the result of an arbitrary query against the GridFS files collection. """ - def __init__(self, collection, filter=None, skip=0, limit=0, - no_cursor_timeout=False, sort=None, batch_size=0, - session=None): + def __init__(self, collection: Collection, filter: Optional[Mapping[str, Any]] = None, + skip: int = 0, + limit: int = 0, + no_cursor_timeout: bool = False, + sort: Optional[Any] = None, + batch_size: int = 0, + session: Optional[ClientSession] = None) -> None: """Create a new cursor, similar to the normal :class:`~pymongo.cursor.Cursor`. @@ -852,7 +863,7 @@ def __init__(self, collection, filter=None, skip=0, limit=0, no_cursor_timeout=no_cursor_timeout, sort=sort, batch_size=batch_size, session=session) - def next(self): + def next(self) -> GridOut: """Get next GridOut object from cursor. """ _disallow_transactions(self.session) @@ -863,13 +874,13 @@ def next(self): __next__ = next - def add_option(self, *args, **kwargs): + def add_option(self, *args: Any, **kwargs: Any) -> None: raise NotImplementedError("Method does not exist for GridOutCursor") - def remove_option(self, *args, **kwargs): + def remove_option(self, *args: Any, **kwargs: Any) -> None: raise NotImplementedError("Method does not exist for GridOutCursor") - def _clone_base(self, session): + def _clone_base(self, session: ClientSession) -> "GridOutCursor": """Creates an empty GridOutCursor for information to be copied into. """ return GridOutCursor(self.__root_collection, session=session) diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 0000000000..2646febb6f --- /dev/null +++ b/mypy.ini @@ -0,0 +1,11 @@ +[mypy] +disallow_subclassing_any = true +disallow_incomplete_defs = true +no_implicit_optional = true +strict_equality = true +warn_unused_configs = true +warn_unused_ignores = true +warn_redundant_casts = true + +[mypy-mockupdb] +ignore_missing_imports = True diff --git a/setup.py b/setup.py index 7d1ad52dc7..fde9ae1b3f 100755 --- a/setup.py +++ b/setup.py @@ -282,7 +282,7 @@ def build_extension(self, ext): 'snappy': ['python-snappy'], 'zstd': ['zstandard'], 'aws': ['pymongo-auth-aws<2.0.0'], - 'srv': ["dnspython>=1.16.0,<3.0.0"], + 'srv': ["dnspython>=1.16.0,<3.0.0"] } # GSSAPI extras diff --git a/test/mockupdb/operations.py b/test/mockupdb/operations.py index 47890f80ee..138c059ac6 100644 --- a/test/mockupdb/operations.py +++ b/test/mockupdb/operations.py @@ -14,15 +14,14 @@ from collections import namedtuple -from mockupdb import * -from mockupdb import OpMsgReply +from mockupdb import OpMsgReply, OpMsg, OpReply from pymongo import ReadPreference __all__ = ['operations', 'upgrades'] Operation = namedtuple( - 'operation', + 'Operation', ['name', 'function', 'reply', 'op_type', 'not_master']) """Client operations on MongoDB. diff --git a/test/mockupdb/test_getmore_sharded.py b/test/mockupdb/test_getmore_sharded.py index 2b3a1fd6ce..5461a13e35 100644 --- a/test/mockupdb/test_getmore_sharded.py +++ b/test/mockupdb/test_getmore_sharded.py @@ -15,10 +15,7 @@ """Test PyMongo cursor with a sharded cluster.""" from pymongo import MongoClient -try: - from queue import Queue -except ImportError: - from Queue import Queue +from queue import Queue from mockupdb import MockupDB, going diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index d13af3562b..2b6ea6a513 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -16,10 +16,7 @@ import time -try: - from queue import Queue -except ImportError: - from Queue import Queue +from queue import Queue from mockupdb import MockupDB, go, OpMsg from pymongo import MongoClient diff --git a/test/mockupdb/test_network_disconnect_primary.py b/test/mockupdb/test_network_disconnect_primary.py index 1df5febb78..bc29ce5f0f 100755 --- a/test/mockupdb/test_network_disconnect_primary.py +++ b/test/mockupdb/test_network_disconnect_primary.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - from queue import Queue -except ImportError: - from Queue import Queue +from queue import Queue from mockupdb import MockupDB, wait_until, OpReply, going, Future from pymongo.errors import ConnectionFailure diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py index 719de57553..63bb0fe303 100644 --- a/test/mockupdb/test_slave_okay_sharded.py +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -23,10 +23,7 @@ from pymongo.read_preferences import make_read_preference from pymongo.read_preferences import read_pref_mode_from_name -try: - from queue import Queue -except ImportError: - from Queue import Queue +from queue import Queue from mockupdb import MockupDB, going from pymongo import MongoClient diff --git a/test/mod_wsgi_test/test_client.py b/test/mod_wsgi_test/test_client.py index 61cf8df674..f99ac0054e 100644 --- a/test/mod_wsgi_test/test_client.py +++ b/test/mod_wsgi_test/test_client.py @@ -21,18 +21,8 @@ from optparse import OptionParser -try: - from urllib2 import urlopen -except ImportError: - # Python 3. - from urllib.request import urlopen - - -try: - import thread -except ImportError: - # Python 3. - import _thread as thread +from urllib.request import urlopen +import _thread as thread def parse_args(): diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index dab7138add..b752453f13 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -19,12 +19,13 @@ import sys import tempfile import time +from typing import Any, List import warnings try: import simplejson as json except ImportError: - import json + import json # type: ignore sys.path[0:0] = [""] @@ -44,7 +45,7 @@ OUTPUT_FILE = os.environ.get('OUTPUT_FILE') -result_data = [] +result_data: List = [] def tearDownModule(): output = json.dumps(result_data, indent=4) diff --git a/tools/clean.py b/tools/clean.py index a5d383af4e..55896781a4 100644 --- a/tools/clean.py +++ b/tools/clean.py @@ -33,7 +33,7 @@ pass try: - from pymongo import _cmessage + from pymongo import _cmessage # type: ignore sys.exit("could still import _cmessage") except ImportError: pass From c9229ace268379da61d18fac192f96f440a65fe4 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 18 Jan 2022 16:40:28 -0800 Subject: [PATCH 0051/1588] PYTHON-3061 Add 'let' option to ReplaceOptions (#832) --- doc/changelog.rst | 8 +- pymongo/collection.py | 13 +- test/crud/unified/replaceOne-let.json | 207 ++++++++++++++++++++++++++ 3 files changed, 222 insertions(+), 6 deletions(-) create mode 100644 test/crud/unified/replaceOne-let.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 4ff9cd781d..de38f188e4 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -14,9 +14,11 @@ PyMongo 4.1 brings a number of improvements including: :meth:`pymongo.collection.Collection.find_one_and_delete`, :meth:`pymongo.collection.Collection.find_one_and_replace`, :meth:`pymongo.collection.Collection.find_one_and_update`, - and :meth:`pymongo.collection.Collection.find` all support a new keyword - argument ``let`` which is a map of parameter names and values. Parameters - can then be accessed as variables in an aggregate expression context. + :meth:`pymongo.collection.Collection.find`, + and :meth:`pymongo.collection.Collection.replace_one `all support a new + keyword argument ``let`` which is a map of parameter names and values. + Parameters can then be accessed as variables in an aggregate expression + context. - :meth:`~pymongo.collection.Collection.aggregate` now supports $merge and $out executing on secondaries on MongoDB >=5.0. aggregate() now always obeys the collection's :attr:`read_preference` on diff --git a/pymongo/collection.py b/pymongo/collection.py index 82e29f4061..0a8d011217 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -673,7 +673,7 @@ def _update(session, sock_info, retryable_write): def replace_one(self, filter, replacement, upsert=False, bypass_document_validation=False, collation=None, - hint=None, session=None): + hint=None, session=None, let=None): """Replace a single document matching the filter. >>> for doc in db.test.find({}): @@ -721,10 +721,16 @@ def replace_one(self, filter, replacement, upsert=False, MongoDB 4.2 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.6 @@ -738,14 +744,15 @@ def replace_one(self, filter, replacement, upsert=False, """ common.validate_is_mapping("filter", filter) common.validate_ok_for_replace(replacement) - + if let: + common.validate_is_mapping("let", let) write_concern = self._write_concern_for(session) return UpdateResult( self._update_retryable( filter, replacement, upsert, write_concern=write_concern, bypass_doc_val=bypass_document_validation, - collation=collation, hint=hint, session=session), + collation=collation, hint=hint, session=session, let=let), write_concern.acknowledged) def update_one(self, filter, update, upsert=False, diff --git a/test/crud/unified/replaceOne-let.json b/test/crud/unified/replaceOne-let.json new file mode 100644 index 0000000000..6cf8e15675 --- /dev/null +++ b/test/crud/unified/replaceOne-let.json @@ -0,0 +1,207 @@ +{ + "description": "replaceOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": "foo" + }, + "let": { + "id": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": { + "x": "foo" + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "foo" + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "ReplaceOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": "foo" + }, + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": { + "x": "foo" + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} From f074cfb696f7e44d4fb6fdd3c465a303cf4cca9c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 20 Jan 2022 08:18:13 -0600 Subject: [PATCH 0052/1588] PYTHON-2863 Allow hint for unacknowledged writes using OP_MSG when supported by the server (#830) --- pymongo/bulk.py | 93 ++++-- pymongo/collection.py | 17 +- ...kWrite-deleteMany-hint-unacknowledged.json | 269 +++++++++++++++ ...lkWrite-deleteOne-hint-unacknowledged.json | 265 +++++++++++++++ ...kWrite-replaceOne-hint-unacknowledged.json | 293 +++++++++++++++++ ...kWrite-updateMany-hint-unacknowledged.json | 305 ++++++++++++++++++ ...lkWrite-updateOne-hint-unacknowledged.json | 305 ++++++++++++++++++ .../deleteMany-hint-unacknowledged.json | 245 ++++++++++++++ .../deleteOne-hint-unacknowledged.json | 241 ++++++++++++++ .../findOneAndDelete-hint-unacknowledged.json | 225 +++++++++++++ ...findOneAndReplace-hint-unacknowledged.json | 248 ++++++++++++++ .../findOneAndUpdate-hint-unacknowledged.json | 253 +++++++++++++++ .../replaceOne-hint-unacknowledged.json | 269 +++++++++++++++ ...ged-bulkWrite-delete-hint-clientError.json | 193 ----------- ...ged-bulkWrite-update-hint-clientError.json | 284 ---------------- ...nowledged-deleteMany-hint-clientError.json | 149 --------- ...knowledged-deleteOne-hint-clientError.json | 133 -------- ...ged-findOneAndDelete-hint-clientError.json | 133 -------- ...ed-findOneAndReplace-hint-clientError.json | 139 -------- ...ged-findOneAndUpdate-hint-clientError.json | 143 -------- ...nowledged-replaceOne-hint-clientError.json | 143 -------- ...nowledged-updateMany-hint-clientError.json | 159 --------- ...knowledged-updateOne-hint-clientError.json | 147 --------- .../updateMany-hint-unacknowledged.json | 281 ++++++++++++++++ .../updateOne-hint-unacknowledged.json | 281 ++++++++++++++++ 25 files changed, 3549 insertions(+), 1664 deletions(-) create mode 100644 test/crud/unified/bulkWrite-deleteMany-hint-unacknowledged.json create mode 100644 test/crud/unified/bulkWrite-deleteOne-hint-unacknowledged.json create mode 100644 test/crud/unified/bulkWrite-replaceOne-hint-unacknowledged.json create mode 100644 test/crud/unified/bulkWrite-updateMany-hint-unacknowledged.json create mode 100644 test/crud/unified/bulkWrite-updateOne-hint-unacknowledged.json create mode 100644 test/crud/unified/deleteMany-hint-unacknowledged.json create mode 100644 test/crud/unified/deleteOne-hint-unacknowledged.json create mode 100644 test/crud/unified/findOneAndDelete-hint-unacknowledged.json create mode 100644 test/crud/unified/findOneAndReplace-hint-unacknowledged.json create mode 100644 test/crud/unified/findOneAndUpdate-hint-unacknowledged.json create mode 100644 test/crud/unified/replaceOne-hint-unacknowledged.json delete mode 100644 test/crud/unified/unacknowledged-bulkWrite-delete-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-bulkWrite-update-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-deleteMany-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-deleteOne-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-findOneAndDelete-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-findOneAndReplace-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-findOneAndUpdate-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-replaceOne-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-updateMany-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-updateOne-hint-clientError.json create mode 100644 test/crud/unified/updateMany-hint-unacknowledged.json create mode 100644 test/crud/unified/updateOne-hint-unacknowledged.json diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 1bb8edf943..1921108a12 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -149,12 +149,14 @@ def __init__(self, collection, ordered, bypass_document_validation): self.bypass_doc_val = bypass_document_validation self.uses_collation = False self.uses_array_filters = False - self.uses_hint = False + self.uses_hint_update = False + self.uses_hint_delete = False self.is_retryable = True self.retrying = False self.started_retryable_write = False # Extra state so that we know where to pick up on a retry attempt. self.current_run = None + self.next_run = None @property def bulk_ctx_class(self): @@ -188,7 +190,7 @@ def add_update(self, selector, update, multi=False, upsert=False, self.uses_array_filters = True cmd['arrayFilters'] = array_filters if hint is not None: - self.uses_hint = True + self.uses_hint_update = True cmd['hint'] = hint if multi: # A bulk_write containing an update_many is not retryable. @@ -207,7 +209,7 @@ def add_replace(self, selector, replacement, upsert=False, self.uses_collation = True cmd['collation'] = collation if hint is not None: - self.uses_hint = True + self.uses_hint_update = True cmd['hint'] = hint self.ops.append((_UPDATE, cmd)) @@ -220,7 +222,7 @@ def add_delete(self, selector, limit, collation=None, hint=None): self.uses_collation = True cmd['collation'] = collation if hint is not None: - self.uses_hint = True + self.uses_hint_delete = True cmd['hint'] = hint if limit == _DELETE_ALL: # A bulk_write containing a delete_many is not retryable. @@ -254,25 +256,39 @@ def gen_unordered(self): yield run def _execute_command(self, generator, write_concern, session, - sock_info, op_id, retryable, full_result): + sock_info, op_id, retryable, full_result, + final_write_concern=None): db_name = self.collection.database.name client = self.collection.database.client listeners = client._event_listeners if not self.current_run: self.current_run = next(generator) + self.next_run = None run = self.current_run # sock_info.command validates the session, but we use # sock_info.write_command. sock_info.validate_session(client, session) + last_run = False + while run: + if not self.retrying: + self.next_run = next(generator, None) + if self.next_run is None: + last_run = True + cmd_name = _COMMANDS[run.op_type] bwc = self.bulk_ctx_class( db_name, cmd_name, sock_info, op_id, listeners, session, run.op_type, self.collection.codec_options) while run.idx_offset < len(run.ops): + # If this is the last possible operation, use the + # final write concern. + if last_run and (len(run.ops) - run.idx_offset) == 1: + write_concern = final_write_concern or write_concern + cmd = SON([(cmd_name, self.collection.name), ('ordered', self.ordered)]) if not write_concern.is_server_default: @@ -290,25 +306,31 @@ def _execute_command(self, generator, write_concern, session, sock_info.send_cluster_time(cmd, session, client) sock_info.add_server_api(cmd) ops = islice(run.ops, run.idx_offset, None) + # Run as many ops as possible in one command. - result, to_send = bwc.execute(cmd, ops, client) - - # Retryable writeConcernErrors halt the execution of this run. - wce = result.get('writeConcernError', {}) - if wce.get('code', 0) in _RETRYABLE_ERROR_CODES: - # Synthesize the full bulk result without modifying the - # current one because this write operation may be retried. - full = copy.deepcopy(full_result) - _merge_command(run, full, run.idx_offset, result) - _raise_bulk_write_error(full) - - _merge_command(run, full_result, run.idx_offset, result) - # We're no longer in a retry once a command succeeds. - self.retrying = False - self.started_retryable_write = False - - if self.ordered and "writeErrors" in result: - break + if write_concern.acknowledged: + result, to_send = bwc.execute(cmd, ops, client) + + # Retryable writeConcernErrors halt the execution of this run. + wce = result.get('writeConcernError', {}) + if wce.get('code', 0) in _RETRYABLE_ERROR_CODES: + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + full = copy.deepcopy(full_result) + _merge_command(run, full, run.idx_offset, result) + _raise_bulk_write_error(full) + + _merge_command(run, full_result, run.idx_offset, result) + + # We're no longer in a retry once a command succeeds. + self.retrying = False + self.started_retryable_write = False + + if self.ordered and "writeErrors" in result: + break + else: + to_send = bwc.execute_unack(cmd, ops, client) + run.idx_offset += len(to_send) # We're supposed to continue if errors are @@ -316,7 +338,7 @@ def _execute_command(self, generator, write_concern, session, if self.ordered and full_result['writeErrors']: break # Reset our state - self.current_run = run = next(generator, None) + self.current_run = run = self.next_run def execute_command(self, generator, write_concern, session): """Execute using write commands. @@ -377,7 +399,7 @@ def execute_op_msg_no_results(self, sock_info, generator): run.idx_offset += len(to_send) self.current_run = run = next(generator, None) - def execute_command_no_results(self, sock_info, generator): + def execute_command_no_results(self, sock_info, generator, write_concern): """Execute write commands with OP_MSG and w=0 WriteConcern, ordered. """ full_result = { @@ -393,16 +415,16 @@ def execute_command_no_results(self, sock_info, generator): # Ordered bulk writes have to be acknowledged so that we stop # processing at the first error, even when the application # specified unacknowledged writeConcern. - write_concern = WriteConcern() + initial_write_concern = WriteConcern() op_id = _randint() try: self._execute_command( - generator, write_concern, None, - sock_info, op_id, False, full_result) + generator, initial_write_concern, None, + sock_info, op_id, False, full_result, write_concern) except OperationFailure: pass - def execute_no_results(self, sock_info, generator): + def execute_no_results(self, sock_info, generator, write_concern): """Execute all operations, returning no results (w=0). """ if self.uses_collation: @@ -411,16 +433,21 @@ def execute_no_results(self, sock_info, generator): if self.uses_array_filters: raise ConfigurationError( 'arrayFilters is unsupported for unacknowledged writes.') - if self.uses_hint: + # Guard against unsupported unacknowledged writes. + unack = write_concern and not write_concern.acknowledged + if unack and self.uses_hint_delete and sock_info.max_wire_version < 9: + raise ConfigurationError( + 'Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands.') + if unack and self.uses_hint_update and sock_info.max_wire_version < 8: raise ConfigurationError( - 'hint is unsupported for unacknowledged writes.') + 'Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands.') # Cannot have both unacknowledged writes and bypass document validation. if self.bypass_doc_val: raise OperationFailure("Cannot set bypass_document_validation with" " unacknowledged write concern") if self.ordered: - return self.execute_command_no_results(sock_info, generator) + return self.execute_command_no_results(sock_info, generator, write_concern) return self.execute_op_msg_no_results(sock_info, generator) def execute(self, write_concern, session): @@ -443,6 +470,6 @@ def execute(self, write_concern, session): client = self.collection.database.client if not write_concern.acknowledged: with client._socket_for_writes(session) as sock_info: - self.execute_no_results(sock_info, generator) + self.execute_no_results(sock_info, generator, write_concern) else: return self.execute_command(generator, write_concern, session) diff --git a/pymongo/collection.py b/pymongo/collection.py index 0a8d011217..ecb82a2cac 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -608,9 +608,9 @@ def _update(self, sock_info, criteria, document, upsert=False, else: update_doc['arrayFilters'] = array_filters if hint is not None: - if not acknowledged: + if not acknowledged and sock_info.max_wire_version < 8: raise ConfigurationError( - 'hint is unsupported for unacknowledged writes.') + 'Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands.') if not isinstance(hint, str): hint = helpers._index_document(hint) update_doc['hint'] = hint @@ -961,9 +961,9 @@ def _delete( else: delete_doc['collation'] = collation if hint is not None: - if not acknowledged: + if not acknowledged and sock_info.max_wire_version < 9: raise ConfigurationError( - 'hint is unsupported for unacknowledged writes.') + 'Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands.') if not isinstance(hint, str): hint = helpers._index_document(hint) delete_doc['hint'] = hint @@ -2277,8 +2277,9 @@ def __find_and_modify(self, filter, projection, sort, upsert=None, write_concern = self._write_concern_for_cmd(cmd, session) def _find_and_modify(session, sock_info, retryable_write): + acknowledged = write_concern.acknowledged if array_filters is not None: - if not write_concern.acknowledged: + if not acknowledged: raise ConfigurationError( 'arrayFilters is unsupported for unacknowledged ' 'writes.') @@ -2286,10 +2287,10 @@ def _find_and_modify(session, sock_info, retryable_write): if hint is not None: if sock_info.max_wire_version < 8: raise ConfigurationError( - 'Must be connected to MongoDB 4.2+ to use hint.') - if not write_concern.acknowledged: + 'Must be connected to MongoDB 4.2+ to use hint on find and modify commands.') + elif (not acknowledged and sock_info.max_wire_version < 9): raise ConfigurationError( - 'hint is unsupported for unacknowledged writes.') + 'Must be connected to MongoDB 4.4+ to use hint on unacknowledged find and modify commands.') cmd['hint'] = hint if not write_concern.is_server_default: cmd['writeConcern'] = write_concern.document diff --git a/test/crud/unified/bulkWrite-deleteMany-hint-unacknowledged.json b/test/crud/unified/bulkWrite-deleteMany-hint-unacknowledged.json new file mode 100644 index 0000000000..2dda9486e8 --- /dev/null +++ b/test/crud/unified/bulkWrite-deleteMany-hint-unacknowledged.json @@ -0,0 +1,269 @@ +{ + "description": "bulkWrite-deleteMany-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteMany with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 0 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 0 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-deleteOne-hint-unacknowledged.json b/test/crud/unified/bulkWrite-deleteOne-hint-unacknowledged.json new file mode 100644 index 0000000000..aadf6d9e99 --- /dev/null +++ b/test/crud/unified/bulkWrite-deleteOne-hint-unacknowledged.json @@ -0,0 +1,265 @@ +{ + "description": "bulkWrite-deleteOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteOne with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 1 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 1 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-replaceOne-hint-unacknowledged.json b/test/crud/unified/bulkWrite-replaceOne-hint-unacknowledged.json new file mode 100644 index 0000000000..e54cd704df --- /dev/null +++ b/test/crud/unified/bulkWrite-replaceOne-hint-unacknowledged.json @@ -0,0 +1,293 @@ +{ + "description": "bulkWrite-replaceOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged replaceOne with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateMany-hint-unacknowledged.json b/test/crud/unified/bulkWrite-updateMany-hint-unacknowledged.json new file mode 100644 index 0000000000..87478918d2 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateMany-hint-unacknowledged.json @@ -0,0 +1,305 @@ +{ + "description": "bulkWrite-updateMany-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateMany with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateOne-hint-unacknowledged.json b/test/crud/unified/bulkWrite-updateOne-hint-unacknowledged.json new file mode 100644 index 0000000000..1345f6b536 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateOne-hint-unacknowledged.json @@ -0,0 +1,305 @@ +{ + "description": "bulkWrite-updateOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateOne with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-hint-unacknowledged.json b/test/crud/unified/deleteMany-hint-unacknowledged.json new file mode 100644 index 0000000000..ab7e9c7c09 --- /dev/null +++ b/test/crud/unified/deleteMany-hint-unacknowledged.json @@ -0,0 +1,245 @@ +{ + "description": "deleteMany-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteMany with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 0 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 0 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-hint-unacknowledged.json b/test/crud/unified/deleteOne-hint-unacknowledged.json new file mode 100644 index 0000000000..1782f0f525 --- /dev/null +++ b/test/crud/unified/deleteOne-hint-unacknowledged.json @@ -0,0 +1,241 @@ +{ + "description": "deleteOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteOne with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 1 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 1 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-hint-unacknowledged.json b/test/crud/unified/findOneAndDelete-hint-unacknowledged.json new file mode 100644 index 0000000000..077f9892b9 --- /dev/null +++ b/test/crud/unified/findOneAndDelete-hint-unacknowledged.json @@ -0,0 +1,225 @@ +{ + "description": "findOneAndDelete-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged findOneAndDelete with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndDelete with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndDelete with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "remove": true, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged findOneAndDelete with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "remove": true, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-hint-unacknowledged.json b/test/crud/unified/findOneAndReplace-hint-unacknowledged.json new file mode 100644 index 0000000000..8228d8a2aa --- /dev/null +++ b/test/crud/unified/findOneAndReplace-hint-unacknowledged.json @@ -0,0 +1,248 @@ +{ + "description": "findOneAndReplace-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged findOneAndReplace with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndReplace with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndReplace with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 111 + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged findOneAndReplace with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 111 + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-hint-unacknowledged.json b/test/crud/unified/findOneAndUpdate-hint-unacknowledged.json new file mode 100644 index 0000000000..d116a06d0d --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-hint-unacknowledged.json @@ -0,0 +1,253 @@ +{ + "description": "findOneAndUpdate-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged findOneAndUpdate with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndUpdate with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndUpdate with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged findOneAndUpdate with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-hint-unacknowledged.json b/test/crud/unified/replaceOne-hint-unacknowledged.json new file mode 100644 index 0000000000..5c5dec64f6 --- /dev/null +++ b/test/crud/unified/replaceOne-hint-unacknowledged.json @@ -0,0 +1,269 @@ +{ + "description": "replaceOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged replaceOne with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/unacknowledged-bulkWrite-delete-hint-clientError.json b/test/crud/unified/unacknowledged-bulkWrite-delete-hint-clientError.json deleted file mode 100644 index dca8108109..0000000000 --- a/test/crud/unified/unacknowledged-bulkWrite-delete-hint-clientError.json +++ /dev/null @@ -1,193 +0,0 @@ -{ - "description": "unacknowledged-bulkWrite-delete-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "BulkWrite_delete_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "BulkWrite_delete_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged bulkWrite deleteOne with hints fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "deleteOne": { - "filter": { - "_id": 1 - }, - "hint": "_id_" - } - }, - { - "deleteOne": { - "filter": { - "_id": 2 - }, - "hint": { - "_id": 1 - } - } - } - ], - "ordered": true - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "BulkWrite_delete_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ] - }, - { - "description": "Unacknowledged bulkWrite deleteMany with hints fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "deleteMany": { - "filter": { - "_id": { - "$lt": 3 - } - }, - "hint": "_id_" - } - }, - { - "deleteMany": { - "filter": { - "_id": { - "$gte": 4 - } - }, - "hint": { - "_id": 1 - } - } - } - ], - "ordered": true - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "BulkWrite_delete_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-bulkWrite-update-hint-clientError.json b/test/crud/unified/unacknowledged-bulkWrite-update-hint-clientError.json deleted file mode 100644 index 22377b9ac1..0000000000 --- a/test/crud/unified/unacknowledged-bulkWrite-update-hint-clientError.json +++ /dev/null @@ -1,284 +0,0 @@ -{ - "description": "unacknowledged-bulkWrite-update-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "Bulkwrite_update_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "Bulkwrite_update_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged bulkWrite updateOne with hints fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "updateOne": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - } - }, - { - "updateOne": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - } - } - ], - "ordered": true - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "Bulkwrite_update_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ] - }, - { - "description": "Unacknowledged bulkWrite updateMany with hints fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "updateMany": { - "filter": { - "_id": { - "$lt": 3 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - } - }, - { - "updateMany": { - "filter": { - "_id": { - "$lt": 3 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - } - } - ], - "ordered": true - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "Bulkwrite_update_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ] - }, - { - "description": "Unacknowledged bulkWrite replaceOne with hints fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "replaceOne": { - "filter": { - "_id": 3 - }, - "replacement": { - "x": 333 - }, - "hint": "_id_" - } - }, - { - "replaceOne": { - "filter": { - "_id": 4 - }, - "replacement": { - "x": 444 - }, - "hint": { - "_id": 1 - } - } - } - ], - "ordered": true - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "Bulkwrite_update_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-deleteMany-hint-clientError.json b/test/crud/unified/unacknowledged-deleteMany-hint-clientError.json deleted file mode 100644 index 21776eae80..0000000000 --- a/test/crud/unified/unacknowledged-deleteMany-hint-clientError.json +++ /dev/null @@ -1,149 +0,0 @@ -{ - "description": "unacknowledged-deleteMany-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "DeleteMany_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "DeleteMany_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged deleteMany with hint string fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "hint": "_id_" - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "DeleteMany_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ] - }, - { - "description": "Unacknowledged deleteMany with hint document fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "DeleteMany_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-deleteOne-hint-clientError.json b/test/crud/unified/unacknowledged-deleteOne-hint-clientError.json deleted file mode 100644 index 870c08339c..0000000000 --- a/test/crud/unified/unacknowledged-deleteOne-hint-clientError.json +++ /dev/null @@ -1,133 +0,0 @@ -{ - "description": "unacknowledged-deleteOne-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "DeleteOne_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "DeleteOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged deleteOne with hint string fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": "_id_" - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "DeleteOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - }, - { - "description": "Unacknowledged deleteOne with hint document fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": { - "_id": 1 - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "DeleteOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-findOneAndDelete-hint-clientError.json b/test/crud/unified/unacknowledged-findOneAndDelete-hint-clientError.json deleted file mode 100644 index a19cd77638..0000000000 --- a/test/crud/unified/unacknowledged-findOneAndDelete-hint-clientError.json +++ /dev/null @@ -1,133 +0,0 @@ -{ - "description": "unacknowledged-findOneAndDelete-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "findOneAndDelete_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "findOneAndDelete_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged findOneAndDelete with hint string fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "findOneAndDelete", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": "_id_" - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "findOneAndDelete_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - }, - { - "description": "Unacknowledged findOneAndDelete with hint document fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "findOneAndDelete", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": { - "_id": 1 - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "findOneAndDelete_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-findOneAndReplace-hint-clientError.json b/test/crud/unified/unacknowledged-findOneAndReplace-hint-clientError.json deleted file mode 100644 index c60bfdef17..0000000000 --- a/test/crud/unified/unacknowledged-findOneAndReplace-hint-clientError.json +++ /dev/null @@ -1,139 +0,0 @@ -{ - "description": "unacknowledged-findOneAndReplace-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "FindOneAndReplace_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "FindOneAndReplace_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged findOneAndReplace with hint string fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "x": 33 - }, - "hint": "_id_" - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "FindOneAndReplace_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - }, - { - "description": "Unacknowledged findOneAndReplace with hint document fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "x": 33 - }, - "hint": { - "_id": 1 - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "FindOneAndReplace_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-findOneAndUpdate-hint-clientError.json b/test/crud/unified/unacknowledged-findOneAndUpdate-hint-clientError.json deleted file mode 100644 index 506510a3c9..0000000000 --- a/test/crud/unified/unacknowledged-findOneAndUpdate-hint-clientError.json +++ /dev/null @@ -1,143 +0,0 @@ -{ - "description": "unacknowledged-findOneAndUpdate-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "FindOneAndUpdate_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "FindOneAndUpdate_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged findOneAndUpdate with hint string fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "FindOneAndUpdate_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - }, - { - "description": "Unacknowledged findOneAndUpdate with hint document fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "FindOneAndUpdate_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-replaceOne-hint-clientError.json b/test/crud/unified/unacknowledged-replaceOne-hint-clientError.json deleted file mode 100644 index b4f4bed5f9..0000000000 --- a/test/crud/unified/unacknowledged-replaceOne-hint-clientError.json +++ /dev/null @@ -1,143 +0,0 @@ -{ - "description": "unacknowledged-replaceOne-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "ReplaceOne_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "ReplaceOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged ReplaceOne with hint string fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "replaceOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "replacement": { - "x": 111 - }, - "hint": "_id_" - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "ReplaceOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - }, - { - "description": "Unacknowledged ReplaceOne with hint document fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "replaceOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "replacement": { - "x": 111 - }, - "hint": { - "_id": 1 - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "ReplaceOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-updateMany-hint-clientError.json b/test/crud/unified/unacknowledged-updateMany-hint-clientError.json deleted file mode 100644 index 3087dc4dbc..0000000000 --- a/test/crud/unified/unacknowledged-updateMany-hint-clientError.json +++ /dev/null @@ -1,159 +0,0 @@ -{ - "description": "unacknowledged-updateMany-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "Updatemany_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "Updatemany_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged updateMany with hint string fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "Updatemany_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ] - }, - { - "description": "Unacknowledged updateMany with hint document fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "Updatemany_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-updateOne-hint-clientError.json b/test/crud/unified/unacknowledged-updateOne-hint-clientError.json deleted file mode 100644 index 208703c26f..0000000000 --- a/test/crud/unified/unacknowledged-updateOne-hint-clientError.json +++ /dev/null @@ -1,147 +0,0 @@ -{ - "description": "unacknowledged-updateOne-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "UpdateOne_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "UpdateOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged updateOne with hint string fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "UpdateOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - }, - { - "description": "Unacknowledged updateOne with hint document fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "UpdateOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/updateMany-hint-unacknowledged.json b/test/crud/unified/updateMany-hint-unacknowledged.json new file mode 100644 index 0000000000..e83838aac2 --- /dev/null +++ b/test/crud/unified/updateMany-hint-unacknowledged.json @@ -0,0 +1,281 @@ +{ + "description": "updateMany-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateMany with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-hint-unacknowledged.json b/test/crud/unified/updateOne-hint-unacknowledged.json new file mode 100644 index 0000000000..859b0f92f9 --- /dev/null +++ b/test/crud/unified/updateOne-hint-unacknowledged.json @@ -0,0 +1,281 @@ +{ + "description": "updateOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateOne with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} From d6fc05ae49c8225f865b83c55ed7bfa33931611c Mon Sep 17 00:00:00 2001 From: Alexander Golin Date: Thu, 20 Jan 2022 17:03:43 -0500 Subject: [PATCH 0053/1588] Create CODEOWNERS (#834) --- .github/CODEOWNERS | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..15a41b6ce6 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,2 @@ +# Global owner for repo +* @blink1073 @juliusgeo @ShaneHarvey From b7c33debbf43cd2e2c65c66c646e1833db2ad01c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 21 Jan 2022 10:08:48 -0800 Subject: [PATCH 0054/1588] PYTHON-3046 Document support for backslashreplace and surrogateescape (#836) --- bson/codec_options.py | 3 +- pymongo/mongo_client.py | 3 +- test/test_bson.py | 71 +++++++++++++---------------------------- 3 files changed, 27 insertions(+), 50 deletions(-) diff --git a/bson/codec_options.py b/bson/codec_options.py index 81e79158b4..27df48de8a 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -233,7 +233,8 @@ class CodecOptions(_options_base): - `unicode_decode_error_handler`: The error handler to apply when a Unicode-related error occurs during BSON decoding that would otherwise raise :exc:`UnicodeDecodeError`. Valid options include - 'strict', 'replace', and 'ignore'. Defaults to 'strict'. + 'strict', 'replace', 'backslashreplace', 'surrogateescape', and + 'ignore'. Defaults to 'strict'. - `tzinfo`: A :class:`~datetime.tzinfo` subclass that specifies the timezone to/from which :class:`~datetime.datetime` objects should be encoded/decoded. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index a133c96a7f..052ade3853 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -330,7 +330,8 @@ def __init__( - `unicode_decode_error_handler`: The error handler to apply when a Unicode-related error occurs during BSON decoding that would otherwise raise :exc:`UnicodeDecodeError`. Valid options include - 'strict', 'replace', and 'ignore'. Defaults to 'strict'. + 'strict', 'replace', 'backslashreplace', 'surrogateescape', and + 'ignore'. Defaults to 'strict'. - `srvServiceName`: (string) The SRV service name to use for "mongodb+srv://" URIs. Defaults to "mongodb". Use it like so:: diff --git a/test/test_bson.py b/test/test_bson.py index b91bc7f5fb..eb4f4e47c2 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -994,57 +994,32 @@ def test_decode_all_defaults(self): def test_unicode_decode_error_handler(self): enc = encode({"keystr": "foobar"}) - # Test handling of bad key value. + # Test handling of bad key value, bad string value, and both. invalid_key = enc[:7] + b'\xe9' + enc[8:] - replaced_key = b'ke\xe9str'.decode('utf-8', 'replace') - ignored_key = b'ke\xe9str'.decode('utf-8', 'ignore') - - dec = decode(invalid_key, - CodecOptions(unicode_decode_error_handler="replace")) - self.assertEqual(dec, {replaced_key: "foobar"}) - - dec = decode(invalid_key, - CodecOptions(unicode_decode_error_handler="ignore")) - self.assertEqual(dec, {ignored_key: "foobar"}) - - self.assertRaises(InvalidBSON, decode, invalid_key, CodecOptions( - unicode_decode_error_handler="strict")) - self.assertRaises(InvalidBSON, decode, invalid_key, CodecOptions()) - self.assertRaises(InvalidBSON, decode, invalid_key) - - # Test handing of bad string value. - invalid_val = BSON(enc[:18] + b'\xe9' + enc[19:]) - replaced_val = b'fo\xe9bar'.decode('utf-8', 'replace') - ignored_val = b'fo\xe9bar'.decode('utf-8', 'ignore') - - dec = decode(invalid_val, - CodecOptions(unicode_decode_error_handler="replace")) - self.assertEqual(dec, {"keystr": replaced_val}) - - dec = decode(invalid_val, - CodecOptions(unicode_decode_error_handler="ignore")) - self.assertEqual(dec, {"keystr": ignored_val}) - - self.assertRaises(InvalidBSON, decode, invalid_val, CodecOptions( - unicode_decode_error_handler="strict")) - self.assertRaises(InvalidBSON, decode, invalid_val, CodecOptions()) - self.assertRaises(InvalidBSON, decode, invalid_val) - - # Test handing bad key + bad value. + invalid_val = enc[:18] + b'\xe9' + enc[19:] invalid_both = enc[:7] + b'\xe9' + enc[8:18] + b'\xe9' + enc[19:] - dec = decode(invalid_both, - CodecOptions(unicode_decode_error_handler="replace")) - self.assertEqual(dec, {replaced_key: replaced_val}) - - dec = decode(invalid_both, - CodecOptions(unicode_decode_error_handler="ignore")) - self.assertEqual(dec, {ignored_key: ignored_val}) - - self.assertRaises(InvalidBSON, decode, invalid_both, CodecOptions( - unicode_decode_error_handler="strict")) - self.assertRaises(InvalidBSON, decode, invalid_both, CodecOptions()) - self.assertRaises(InvalidBSON, decode, invalid_both) + # Ensure that strict mode raises an error. + for invalid in [invalid_key, invalid_val, invalid_both]: + self.assertRaises(InvalidBSON, decode, invalid, CodecOptions( + unicode_decode_error_handler="strict")) + self.assertRaises(InvalidBSON, decode, invalid, CodecOptions()) + self.assertRaises(InvalidBSON, decode, invalid) + + # Test all other error handlers. + for handler in ['replace', 'backslashreplace', 'surrogateescape', + 'ignore']: + expected_key = b'ke\xe9str'.decode('utf-8', handler) + expected_val = b'fo\xe9bar'.decode('utf-8', handler) + doc = decode(invalid_key, + CodecOptions(unicode_decode_error_handler=handler)) + self.assertEqual(doc, {expected_key: "foobar"}) + doc = decode(invalid_val, + CodecOptions(unicode_decode_error_handler=handler)) + self.assertEqual(doc, {"keystr": expected_val}) + doc = decode(invalid_both, + CodecOptions(unicode_decode_error_handler=handler)) + self.assertEqual(doc, {expected_key: expected_val}) # Test handling bad error mode. dec = decode(enc, From bdafc357331813222b1e677b66041dad1fc852a5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 21 Jan 2022 10:09:03 -0800 Subject: [PATCH 0055/1588] PYTHON-3041 Fix doc example for initializing a replica set (#835) --- doc/examples/high_availability.rst | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/doc/examples/high_availability.rst b/doc/examples/high_availability.rst index a5c252f8a3..19b48f7d01 100644 --- a/doc/examples/high_availability.rst +++ b/doc/examples/high_availability.rst @@ -52,11 +52,11 @@ At this point all of our nodes are up and running, but the set has yet to be initialized. Until the set is initialized no node will become the primary, and things are essentially "offline". -To initialize the set we need to connect to a single node and run the -initiate command:: +To initialize the set we need to connect directly to a single node and run the +initiate command using the ``directConnection`` option:: >>> from pymongo import MongoClient - >>> c = MongoClient('localhost', 27017) + >>> c = MongoClient('localhost', 27017, directConnection=True) .. note:: We could have connected to any of the other nodes instead, but only the node we initiate from is allowed to contain any @@ -81,15 +81,19 @@ The initial connection as made above is a special case for an uninitialized replica set. Normally we'll want to connect differently. A connection to a replica set can be made using the :meth:`~pymongo.mongo_client.MongoClient` constructor, specifying -one or more members of the set, along with the replica set name. Any of -the following connects to the replica set we just created:: +one or more members of the set and optionally the replica set name. +Any of the following connects to the replica set we just created:: + >>> MongoClient('localhost') + MongoClient(host=['localhost:27017'], ...) >>> MongoClient('localhost', replicaset='foo') MongoClient(host=['localhost:27017'], replicaset='foo', ...) >>> MongoClient('localhost:27018', replicaset='foo') MongoClient(['localhost:27018'], replicaset='foo', ...) >>> MongoClient('localhost', 27019, replicaset='foo') MongoClient(['localhost:27019'], replicaset='foo', ...) + >>> MongoClient('mongodb://localhost:27017,localhost:27018/') + MongoClient(['localhost:27017', 'localhost:27018'], ...) >>> MongoClient('mongodb://localhost:27017,localhost:27018/?replicaSet=foo') MongoClient(['localhost:27017', 'localhost:27018'], replicaset='foo', ...) From 4eeb685c5794cc4fdac6103d083415603f987c90 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 26 Jan 2022 11:57:36 -0800 Subject: [PATCH 0056/1588] PYTHON-3069 Require hello command + OP_MSG when 'loadBalanced=True' (#837) --- pymongo/pool.py | 5 ++- test/mockupdb/test_handshake.py | 62 ++++++++++++++++++++++++++++++++- 2 files changed, 65 insertions(+), 2 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index 88b0e09737..a0868c9916 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -550,7 +550,10 @@ def unpin(self): self.close_socket(ConnectionClosedReason.STALE) def hello_cmd(self): - if self.opts.server_api or self.hello_ok: + # Handshake spec requires us to use OP_MSG+hello command for the + # initial handshake in load balanced or versioned api mode. + if self.opts.server_api or self.hello_ok or self.opts.load_balanced: + self.op_msg_enabled = True return SON([(HelloCompat.CMD, 1)]) else: return SON([(HelloCompat.LEGACY_CMD, 1), ('helloOk', True)]) diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index 621f01728f..34028a637f 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -12,14 +12,58 @@ # See the License for the specific language governing permissions and # limitations under the License. +from mockupdb import (MockupDB, OpReply, OpMsg, OpMsgReply, OpQuery, absent, + Command, go) -from mockupdb import MockupDB, OpReply, OpMsg, absent, Command, go from pymongo import MongoClient, version as pymongo_version from pymongo.errors import OperationFailure +from pymongo.server_api import ServerApi, ServerApiVersion +from bson.objectid import ObjectId import unittest +def test_hello_with_option(self, protocol, **kwargs): + hello = "ismaster" if isinstance(protocol(), OpQuery) else "hello" + # `db.command("hello"|"ismaster")` commands are the same for primaries and + # secondaries, so we only need one server. + primary = MockupDB() + # Set up a custom handler to save the first request from the driver. + self.handshake_req = None + def respond(r): + # Only save the very first request from the driver. + if self.handshake_req == None: + self.handshake_req = r + load_balanced_kwargs = {"serviceId": ObjectId()} if kwargs.get( + "loadBalanced") else {} + return r.reply(OpMsgReply(minWireVersion=0, maxWireVersion=13, + **kwargs, **load_balanced_kwargs)) + primary.autoresponds(respond) + primary.run() + self.addCleanup(primary.stop) + + # We need a special dict because MongoClient uses "server_api" and all + # of the commands use "apiVersion". + k_map = {("apiVersion", "1"):("server_api", ServerApi( + ServerApiVersion.V1))} + client = MongoClient("mongodb://"+primary.address_string, + appname='my app', # For _check_handshake_data() + **dict([k_map.get((k, v), (k, v)) for k, v + in kwargs.items()])) + + self.addCleanup(client.close) + + # We have an autoresponder luckily, so no need for `go()`. + assert client.db.command(hello) + + # We do this checking here rather than in the autoresponder `respond()` + # because it runs in another Python thread so there are some funky things + # with error handling within that thread, and we want to be able to use + # self.assertRaises(). + self.handshake_req.assert_matches(protocol(hello, **kwargs)) + _check_handshake_data(self.handshake_req) + + def _check_handshake_data(request): assert 'client' in request data = request['client'] @@ -156,6 +200,22 @@ def test_client_handshake_saslSupportedMechs(self): future() return + def test_handshake_load_balanced(self): + test_hello_with_option(self, OpMsg, loadBalanced=True) + with self.assertRaisesRegex(AssertionError, "does not match"): + test_hello_with_option(self, Command, loadBalanced=True) + + def test_handshake_versioned_api(self): + test_hello_with_option(self, OpMsg, apiVersion="1") + with self.assertRaisesRegex(AssertionError, "does not match"): + test_hello_with_option(self, Command, apiVersion="1") + + def test_handshake_not_either(self): + # If we don't specify either option then it should be using + # OP_QUERY for the initial step of the handshake. + test_hello_with_option(self, Command) + with self.assertRaisesRegex(AssertionError, "does not match"): + test_hello_with_option(self, OpMsg) if __name__ == '__main__': unittest.main() From 308b4f4e08c9fe91210ee498adbbafe0c7ebebe4 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Wed, 26 Jan 2022 17:28:38 -0800 Subject: [PATCH 0057/1588] PYTHON-1596 Test on RHEL7 FIPS (#838) --- .evergreen/config.yml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 44a075a727..bf96f220ff 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1688,6 +1688,16 @@ tasks: TOPOLOGY: "sharded_cluster" - func: "run load-balancer" - func: "run tests" + + - name: "test-fips-standalone" + tags: ["fips"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "latest" + TOPOLOGY: "server" + PYTHON_BINARY: "/opt/mongodbtoolchain/v3/bin/python3" + - func: "run tests" # }}} - name: "coverage-report" tags: ["coverage"] @@ -1758,6 +1768,12 @@ axes: batchtime: 10080 # 7 days variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz + - id: rhel70-fips + display_name: "RHEL 7.0 FIPS" + run_on: rhel70-fips + batchtime: 10080 # 7 days + variables: + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz - id: ubuntu-16.04 display_name: "Ubuntu 16.04" run_on: ubuntu1604-test @@ -2157,6 +2173,16 @@ buildvariants: - ".4.0" - ".3.6" +- matrix_name: "tests-fips" + matrix_spec: + platform: + - rhel70-fips + auth: "auth" + ssl: "ssl" + display_name: "${platform} ${auth} ${ssl}" + tasks: + - "test-fips-standalone" + - matrix_name: "test-macos" matrix_spec: platform: From aa60c2a2c051d9191aacf853405666bdeaa8d93a Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 1 Feb 2022 12:45:47 -0800 Subject: [PATCH 0058/1588] PYTHON-3071 [DevOps] Merge and improve resync_specs.sh (#839) --- .evergreen/resync-specs.sh | 145 +++++++++++++++++++++++++++++++++++++ CONTRIBUTING.rst | 20 +++++ 2 files changed, 165 insertions(+) create mode 100755 .evergreen/resync-specs.sh diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh new file mode 100755 index 0000000000..1d0742258b --- /dev/null +++ b/.evergreen/resync-specs.sh @@ -0,0 +1,145 @@ +#!/bin/bash +# exit when any command fails +set -e +PYMONGO=$(dirname "$(cd "$(dirname "$0")"; pwd)") +SPECS=${MDB_SPECS:-~/Work/specifications} + +help (){ + echo "Usage: resync_specs.sh [-bcsp] spec" + echo "Required arguments:" + echo " spec determines which folder the spec tests will be copied from." + echo "Optional flags:" + echo " -b is used to add a string to the blocklist for that next run. Can be used" + echo " any number of times on a single command to block multiple patterns." + echo " You can use any regex pattern (it is passed to 'grep -Ev')." + echo " -c is used to set a branch or commit that will be checked out in the" + echo " specifications repo before copying." + echo " -s is used to set a unique path to the specs repo for that specific" + echo " run." + echo "Notes:" + echo "You can export the environment variable MDB_SPECS to set the specs" + echo " repo similar to -s, but this will persist between runs until you " + echo "unset it." +} + +# Parse flag args +BRANCH='' +BLOCKLIST='.*\.yml' +while getopts 'b:c:s:' flag; do + case "${flag}" in + b) BLOCKLIST+="|$OPTARG" + ;; + c) BRANCH="${OPTARG}" + ;; + s) SPECS="${OPTARG}" + ;; + *) help; exit 0 + ;; + esac +done +shift $((OPTIND-1)) + +if [ -z $BRANCH ] +then + git -C $SPECS checkout $BRANCH +fi + +# Ensure the JSON files are up to date. +cd $SPECS/source +make +cd - +# cpjson unified-test-format/tests/invalid unified-test-format/invalid +# * param1: Path to spec tests dir in specifications repo +# * param2: Path to where the corresponding tests live in Python. +cpjson () { + find "$PYMONGO"/test/$2 -type f -delete + cd "$SPECS"/source/$1 + find . -name '*.json' | grep -Ev "${BLOCKLIST}" | cpio -pdm \ + $PYMONGO/test/$2 + printf "\nIgnored files for ${PWD}" + printf "\n%s\n" "$(diff <(find . -name '*.json' | sort) \ + <(find . -name '*.json' | grep -Ev "${BLOCKLIST}" | sort))" | \ + sed -e '/^[0-9]/d' | sed -e 's|< ./||g' +} + +for spec in "$@" +do + case "$spec" in + bson*corpus) + cpjson bson-corpus/tests/ bson_corpus + ;; + max*staleness) + cpjson max-staleness/tests/ max_staleness + ;; + connection*string) + cpjson connection-string/tests/ connection_string/test + ;; + change*streams) + cpjson change-streams/tests/ change_streams/ + ;; + cmap|CMAP) + cpjson connection-monitoring-and-pooling/tests cmap + ;; + command*monitoring) + cpjson command-monitoring/tests command_monitoring + ;; + crud|CRUD) + cpjson crud/tests/ crud + ;; + load*balancer) + cpjson load-balancers/tests load_balancer + ;; + initial-dns-seedlist-discovery|srv_seedlist) + cpjson initial-dns-seedlist-discovery/tests/ srv_seedlist + ;; + old_srv_seedlist) + cpjson initial-dns-seedlist-discovery/tests srv_seedlist + ;; + retryable*reads) + cpjson retryable-reads/tests/ retryable_reads + ;; + retryable*writes) + cpjson retryable-writes/tests/ retryable_writes + ;; + sdam|SDAM) + cpjson server-discovery-and-monitoring/tests/errors \ + discovery_and_monitoring/errors + cpjson server-discovery-and-monitoring/tests/rs \ + discovery_and_monitoring/rs + cpjson server-discovery-and-monitoring/tests/sharded \ + discovery_and_monitoring/sharded + cpjson server-discovery-and-monitoring/tests/single \ + discovery_and_monitoring/single + cpjson server-discovery-and-monitoring/tests/integration \ + discovery_and_monitoring_integration + cpjson server-discovery-and-monitoring/tests/load-balanced \ + discovery_and_monitoring/load-balanced + ;; + sdam*monitoring) + cpjson server-discovery-and-monitoring/tests/monitoring sdam_monitoring + ;; + server*selection) + cpjson server-selection/tests/ server_selection + ;; + sessions) + cpjson sessions/tests/ sessions + ;; + transactions|transactions-convenient-api) + cpjson transactions/tests/ transactions + cpjson transactions-convenient-api/tests/ transactions-convenient-api + ;; + unified) + cpjson unified-test-format/tests/ unified-test-format/ + ;; + uri|uri*options) + cpjson uri-options/tests uri_options + ;; + versioned-api) + cpjson versioned-api/tests versioned-api + ;; + *) + echo "Do not know how to resync spec tests for '${spec}'" + help + ;; + esac +done diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index cf451172a4..40dca00e0c 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -48,3 +48,23 @@ just make your changes to the inline documentation of the appropriate branch and submit a `pull request `_. You might also use the GitHub `Edit `_ button. + +Re-sync Spec Tests +----------------- + +If you would like to re-sync the copy of the specification tests in the +PyMongo repository with that which is inside the `specifications repo +`_, please +use the script provided in ``.evergreen/resync-specs.sh``.:: + + git clone git@github.com:mongodb/specifications.git + export MDB_SPECS=~/specifications + cd ~/mongo-python-driver/.evergreen + ./resync-specs.sh -b "connection-string*" crud bson-corpus + cd .. + +The ``-b`` flag adds as a regex pattern to block files you do not wish to +update in PyMongo. +This is primarily helpful if you are implementing a new feature in PyMongo +that has spec tests already implemented, or if you are attempting to +validate new spec tests in PyMongo. \ No newline at end of file From abfa0d35bcbc99107dfac71f58c4b1606dc7656a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 2 Feb 2022 13:53:58 -0600 Subject: [PATCH 0059/1588] PYTHON-3075 bulk_write does not apply CodecOptions to upserted_ids result (#840) --- pymongo/message.py | 13 +++---- pymongo/pool.py | 4 +-- test/test_bulk.py | 76 +++++++++++++++++++++++++++++++++++++++++ test/test_encryption.py | 33 +++++++++++++++++- 4 files changed, 117 insertions(+), 9 deletions(-) diff --git a/pymongo/message.py b/pymongo/message.py index 584528c2f2..f632214a08 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -32,6 +32,7 @@ _decode_selective, _dict_to_bson, _make_c_string) +from bson import codec_options from bson.int64 import Int64 from bson.raw_bson import (_inflate_bson, DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument) @@ -798,7 +799,7 @@ def write_command(self, cmd, request_id, msg, docs): self._start(cmd, request_id, docs) start = datetime.datetime.now() try: - reply = self.sock_info.write_command(request_id, msg) + reply = self.sock_info.write_command(request_id, msg, self.codec) if self.publish: duration = (datetime.datetime.now() - start) + duration self._succeed(request_id, reply, duration) @@ -866,7 +867,7 @@ def execute(self, cmd, docs, client): batched_cmd, to_send = self._batch_command(cmd, docs) result = self.sock_info.command( self.db_name, batched_cmd, - codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + codec_options=self.codec, session=self.session, client=client) return result, to_send @@ -1205,9 +1206,9 @@ def unpack_response(self, cursor_id=None, return bson._decode_all_selective( self.documents, codec_options, user_fields) - def command_response(self): + def command_response(self, codec_options): """Unpack a command response.""" - docs = self.unpack_response() + docs = self.unpack_response(codec_options=codec_options) assert self.number_returned == 1 return docs[0] @@ -1273,9 +1274,9 @@ def unpack_response(self, cursor_id=None, return bson._decode_all_selective( self.payload_document, codec_options, user_fields) - def command_response(self): + def command_response(self, codec_options): """Unpack a command response.""" - return self.unpack_response()[0] + return self.unpack_response(codec_options=codec_options)[0] def raw_command_response(self): """Return the bytes of the command response.""" diff --git a/pymongo/pool.py b/pymongo/pool.py index a0868c9916..70920d5b23 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -775,7 +775,7 @@ def unack_write(self, msg, max_doc_size): self._raise_if_not_writable(True) self.send_message(msg, max_doc_size) - def write_command(self, request_id, msg): + def write_command(self, request_id, msg, codec_options): """Send "insert" etc. command, returning response as a dict. Can raise ConnectionFailure or OperationFailure. @@ -786,7 +786,7 @@ def write_command(self, request_id, msg): """ self.send_message(msg, 0) reply = self.receive_message(request_id) - result = reply.command_response() + result = reply.command_response(codec_options) # Raises NotPrimaryError or OperationFailure. helpers._check_command_response(result, self.max_wire_version) diff --git a/test/test_bulk.py b/test/test_bulk.py index f93cd6c766..08740a437e 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -15,9 +15,13 @@ """Test the bulk API.""" import sys +import uuid +from bson.binary import UuidRepresentation +from bson.codec_options import CodecOptions sys.path[0:0] = [""] +from bson import Binary from bson.objectid import ObjectId from pymongo.common import partition_node from pymongo.errors import (BulkWriteError, @@ -376,6 +380,78 @@ def test_client_generated_upsert_id(self): {'index': 2, '_id': 2}]}, result.bulk_api_result) + def test_upsert_uuid_standard(self): + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + coll = self.coll.with_options(codec_options=options) + uuids = [uuid.uuid4() for _ in range(3)] + result = coll.bulk_write([ + UpdateOne({'_id': uuids[0]}, {'$set': {'a': 0}}, upsert=True), + ReplaceOne({'a': 1}, {'_id': uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({'_id': uuids[2]}, {'_id': uuids[2]}, upsert=True), + ]) + self.assertEqualResponse( + {'nMatched': 0, + 'nModified': 0, + 'nUpserted': 3, + 'nInserted': 0, + 'nRemoved': 0, + 'upserted': [{'index': 0, '_id': uuids[0]}, + {'index': 1, '_id': uuids[1]}, + {'index': 2, '_id': uuids[2]}]}, + result.bulk_api_result) + + def test_upsert_uuid_unspecified(self): + options = CodecOptions(uuid_representation=UuidRepresentation.UNSPECIFIED) + coll = self.coll.with_options(codec_options=options) + uuids = [Binary.from_uuid(uuid.uuid4()) for _ in range(3)] + result = coll.bulk_write([ + UpdateOne({'_id': uuids[0]}, {'$set': {'a': 0}}, upsert=True), + ReplaceOne({'a': 1}, {'_id': uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({'_id': uuids[2]}, {'_id': uuids[2]}, upsert=True), + ]) + self.assertEqualResponse( + {'nMatched': 0, + 'nModified': 0, + 'nUpserted': 3, + 'nInserted': 0, + 'nRemoved': 0, + 'upserted': [{'index': 0, '_id': uuids[0]}, + {'index': 1, '_id': uuids[1]}, + {'index': 2, '_id': uuids[2]}]}, + result.bulk_api_result) + + def test_upsert_uuid_standard_subdocuments(self): + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + coll = self.coll.with_options(codec_options=options) + ids = [ + {'f': Binary(bytes(i)), 'f2': uuid.uuid4()} + for i in range(3) + ] + + result = coll.bulk_write([ + UpdateOne({'_id': ids[0]}, {'$set': {'a': 0}}, upsert=True), + ReplaceOne({'a': 1}, {'_id': ids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({'_id': ids[2]}, {'_id': ids[2]}, upsert=True), + ]) + + # The `Binary` values are returned as `bytes` objects. + for _id in ids: + _id['f'] = bytes(_id['f']) + + self.assertEqualResponse( + {'nMatched': 0, + 'nModified': 0, + 'nUpserted': 3, + 'nInserted': 0, + 'nRemoved': 0, + 'upserted': [{'index': 0, '_id': ids[0]}, + {'index': 1, '_id': ids[1]}, + {'index': 2, '_id': ids[2]}]}, + result.bulk_api_result) + def test_single_ordered_batch(self): result = self.coll.bulk_write([ InsertOne({'a': 1}), diff --git a/test/test_encryption.py b/test/test_encryption.py index 88acadfbaf..8e47d44525 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -29,6 +29,7 @@ from bson import encode, json_util from bson.binary import (Binary, + UuidRepresentation, JAVA_LEGACY, STANDARD, UUID_SUBTYPE) @@ -50,13 +51,14 @@ ServerSelectionTimeoutError, WriteError) from pymongo.mongo_client import MongoClient -from pymongo.operations import InsertOne +from pymongo.operations import InsertOne, ReplaceOne, UpdateOne from pymongo.write_concern import WriteConcern from test import (unittest, CA_PEM, CLIENT_PEM, client_context, IntegrationTest, PyMongoTestCase) +from test.test_bulk import BulkTestBase from test.utils import (TestCreator, camel_to_snake_args, OvertCommandListener, @@ -313,6 +315,35 @@ def test_use_after_close(self): client.admin.command('ping') +class TestEncryptedBulkWrite(BulkTestBase, EncryptionIntegrationTest): + + def test_upsert_uuid_standard_encrypte(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') + client = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(client.close) + + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + encrypted_coll = client.pymongo_test.test + coll = encrypted_coll.with_options(codec_options=options) + uuids = [uuid.uuid4() for _ in range(3)] + result = coll.bulk_write([ + UpdateOne({'_id': uuids[0]}, {'$set': {'a': 0}}, upsert=True), + ReplaceOne({'a': 1}, {'_id': uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({'_id': uuids[2]}, {'_id': uuids[2]}, upsert=True), + ]) + self.assertEqualResponse( + {'nMatched': 0, + 'nModified': 0, + 'nUpserted': 3, + 'nInserted': 0, + 'nRemoved': 0, + 'upserted': [{'index': 0, '_id': uuids[0]}, + {'index': 1, '_id': uuids[1]}, + {'index': 2, '_id': uuids[2]}]}, + result.bulk_api_result) + + class TestClientMaxWireVersion(IntegrationTest): @classmethod From dd6c140d438039e9f6df96cd3d4221f380a37e18 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 2 Feb 2022 21:12:36 -0600 Subject: [PATCH 0060/1588] PYTHON-3060 Add typings to pymongo package (#831) --- bson/__init__.py | 19 +- bson/binary.py | 9 +- gridfs/grid_file.py | 4 +- mypy.ini | 22 +++ pymongo/__init__.py | 28 +-- pymongo/aggregation.py | 6 +- pymongo/auth.py | 14 +- pymongo/auth_aws.py | 7 +- pymongo/bulk.py | 22 +-- pymongo/change_stream.py | 70 ++++--- pymongo/client_options.py | 2 +- pymongo/client_session.py | 106 ++++++----- pymongo/collation.py | 31 ++-- pymongo/collection.py | 311 +++++++++++++++++++++----------- pymongo/command_cursor.py | 71 +++++--- pymongo/common.py | 146 +++++++-------- pymongo/compression_support.py | 7 +- pymongo/cursor.py | 189 +++++++++++-------- pymongo/database.py | 157 +++++++++++----- pymongo/driver_info.py | 3 +- pymongo/encryption.py | 63 ++++--- pymongo/encryption_options.py | 30 +-- pymongo/errors.py | 51 ++++-- pymongo/event_loggers.py | 46 +++-- pymongo/hello.py | 66 ++++--- pymongo/helpers.py | 14 +- pymongo/message.py | 29 ++- pymongo/mongo_client.py | 181 +++++++++++-------- pymongo/monitor.py | 10 +- pymongo/monitoring.py | 203 +++++++++++++-------- pymongo/network.py | 9 +- pymongo/ocsp_cache.py | 2 +- pymongo/ocsp_support.py | 61 +++---- pymongo/operations.py | 47 ++--- pymongo/periodic_executor.py | 18 +- pymongo/pool.py | 60 +++--- pymongo/pyopenssl_context.py | 41 ++--- pymongo/read_concern.py | 12 +- pymongo/read_preferences.py | 74 ++++---- pymongo/results.py | 58 +++--- pymongo/saslprep.py | 9 +- pymongo/server.py | 7 +- pymongo/server_description.py | 88 ++++----- pymongo/server_type.py | 19 +- pymongo/socket_checker.py | 12 +- pymongo/srv_resolver.py | 3 +- pymongo/ssl_context.py | 1 + pymongo/ssl_support.py | 8 +- pymongo/topology.py | 41 ++--- pymongo/topology_description.py | 99 ++++++---- pymongo/typings.py | 29 +++ pymongo/uri_parser.py | 40 ++-- pymongo/write_concern.py | 16 +- test/performance/perf_test.py | 2 +- test/test_cursor.py | 2 +- test/test_grid_file.py | 2 +- tools/clean.py | 2 +- 57 files changed, 1579 insertions(+), 1100 deletions(-) create mode 100644 pymongo/typings.py diff --git a/bson/__init__.py b/bson/__init__.py index 5be673cfc3..e518cd91c9 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -61,10 +61,10 @@ import struct import sys import uuid -from codecs import utf_8_decode as _utf_8_decode # type: ignore -from codecs import utf_8_encode as _utf_8_encode # type: ignore +from codecs import utf_8_decode as _utf_8_decode # type: ignore[attr-defined] +from codecs import utf_8_encode as _utf_8_encode # type: ignore[attr-defined] from collections import abc as _abc -from typing import (TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Generator, +from typing import (IO, TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Generator, Iterator, List, Mapping, MutableMapping, NoReturn, Sequence, Tuple, Type, TypeVar, Union, cast) @@ -88,11 +88,13 @@ # Import RawBSONDocument for type-checking only to avoid circular dependency. if TYPE_CHECKING: + from array import array + from mmap import mmap from bson.raw_bson import RawBSONDocument try: - from bson import _cbson # type: ignore + from bson import _cbson # type: ignore[attr-defined] _USE_C = True except ImportError: _USE_C = False @@ -851,6 +853,7 @@ def _datetime_to_millis(dtm: datetime.datetime) -> int: _DocumentIn = Mapping[str, Any] _DocumentOut = Union[MutableMapping[str, Any], "RawBSONDocument"] +_ReadableBuffer = Union[bytes, memoryview, "mmap", "array"] def encode(document: _DocumentIn, check_keys: bool = False, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> bytes: @@ -880,7 +883,7 @@ def encode(document: _DocumentIn, check_keys: bool = False, codec_options: Codec return _dict_to_bson(document, check_keys, codec_options) -def decode(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> _DocumentOut: +def decode(data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Dict[str, Any]: """Decode BSON to a document. By default, returns a BSON document represented as a Python @@ -912,7 +915,7 @@ def decode(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> return _bson_to_dict(data, codec_options) -def decode_all(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> List[_DocumentOut]: +def decode_all(data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> List[Dict[str, Any]]: """Decode BSON data to multiple documents. `data` must be a bytes-like object implementing the buffer protocol that @@ -1075,7 +1078,7 @@ def decode_iter(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS yield _bson_to_dict(elements, codec_options) -def decode_file_iter(file_obj: BinaryIO, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Iterator[_DocumentOut]: +def decode_file_iter(file_obj: Union[BinaryIO, IO], codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Iterator[_DocumentOut]: """Decode bson data from a file to multiple documents as a generator. Works similarly to the decode_all function, but reads from the file object @@ -1158,7 +1161,7 @@ def encode(cls: Type["BSON"], document: _DocumentIn, check_keys: bool = False, """ return cls(encode(document, check_keys, codec_options)) - def decode(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> _DocumentOut: # type: ignore[override] + def decode(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Dict[str, Any]: # type: ignore[override] """Decode this BSON data. By default, returns a BSON document represented as a Python diff --git a/bson/binary.py b/bson/binary.py index 53d5419b49..de44d48174 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Tuple, Type +from typing import Any, Tuple, Type, Union, TYPE_CHECKING from uuid import UUID """Tools for representing BSON binary data. @@ -57,6 +57,11 @@ """ +if TYPE_CHECKING: + from array import array as _array + from mmap import mmap as _mmap + + class UuidRepresentation: UNSPECIFIED = 0 """An unspecified UUID representation. @@ -211,7 +216,7 @@ class Binary(bytes): _type_marker = 5 __subtype: int - def __new__(cls: Type["Binary"], data: bytes, subtype: int = BINARY_SUBTYPE) -> "Binary": + def __new__(cls: Type["Binary"], data: Union[memoryview, bytes, "_mmap", "_array"], subtype: int = BINARY_SUBTYPE) -> "Binary": if not isinstance(subtype, int): raise TypeError("subtype must be an instance of int") if subtype >= 256 or subtype < 0: diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 9353a97a1c..686d328a3c 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -874,10 +874,10 @@ def next(self) -> GridOut: __next__ = next - def add_option(self, *args: Any, **kwargs: Any) -> None: + def add_option(self, *args: Any, **kwargs: Any) -> None: # type: ignore[override] raise NotImplementedError("Method does not exist for GridOutCursor") - def remove_option(self, *args: Any, **kwargs: Any) -> None: + def remove_option(self, *args: Any, **kwargs: Any) -> None: # type: ignore[override] raise NotImplementedError("Method does not exist for GridOutCursor") def _clone_base(self, session: ClientSession) -> "GridOutCursor": diff --git a/mypy.ini b/mypy.ini index 2646febb6f..926bf95745 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,11 +1,33 @@ [mypy] +check_untyped_defs = true disallow_subclassing_any = true disallow_incomplete_defs = true no_implicit_optional = true +pretty = true +show_error_context = true +show_error_codes = true strict_equality = true warn_unused_configs = true warn_unused_ignores = true warn_redundant_casts = true +[mypy-kerberos.*] +ignore_missing_imports = True + [mypy-mockupdb] ignore_missing_imports = True + +[mypy-pymongo_auth_aws.*] +ignore_missing_imports = True + +[mypy-pymongocrypt.*] +ignore_missing_imports = True + +[mypy-service_identity.*] +ignore_missing_imports = True + +[mypy-snappy.*] +ignore_missing_imports = True + +[mypy-winkerberos.*] +ignore_missing_imports = True diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 5db9363f90..54a962df57 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -14,6 +14,8 @@ """Python driver for MongoDB.""" +from typing import Tuple, Union + ASCENDING = 1 """Ascending sort order.""" DESCENDING = -1 @@ -53,35 +55,33 @@ .. _text index: http://docs.mongodb.org/manual/core/index-text/ """ -version_tuple = (4, 1, 0, '.dev0') +version_tuple: Tuple[Union[int, str], ...] = (4, 1, 0, '.dev0') -def get_version_string(): +def get_version_string() -> str: if isinstance(version_tuple[-1], str): return '.'.join(map(str, version_tuple[:-1])) + version_tuple[-1] return '.'.join(map(str, version_tuple)) -__version__ = version = get_version_string() +__version__: str = get_version_string() +version = __version__ + """Current version of PyMongo.""" from pymongo.collection import ReturnDocument -from pymongo.common import (MIN_SUPPORTED_WIRE_VERSION, - MAX_SUPPORTED_WIRE_VERSION) +from pymongo.common import (MAX_SUPPORTED_WIRE_VERSION, + MIN_SUPPORTED_WIRE_VERSION) from pymongo.cursor import CursorType from pymongo.mongo_client import MongoClient -from pymongo.operations import (IndexModel, - InsertOne, - DeleteOne, - DeleteMany, - UpdateOne, - UpdateMany, - ReplaceOne) +from pymongo.operations import (DeleteMany, DeleteOne, IndexModel, InsertOne, + ReplaceOne, UpdateMany, UpdateOne) from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern -def has_c(): + +def has_c() -> bool: """Is the C extension installed?""" try: - from pymongo import _cmessage + from pymongo import _cmessage # type: ignore[attr-defined] return True except ImportError: return False diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 8fb0225eb3..b2e20e9ca5 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -15,11 +15,10 @@ """Perform aggregation operations on a collection or database.""" from bson.son import SON - from pymongo import common from pymongo.collation import validate_collation_or_none from pymongo.errors import ConfigurationError -from pymongo.read_preferences import _AggWritePref, ReadPreference +from pymongo.read_preferences import ReadPreference, _AggWritePref class _AggregationCommand(object): @@ -37,7 +36,7 @@ def __init__(self, target, cursor_class, pipeline, options, self._target = target - common.validate_list('pipeline', pipeline) + pipeline = common.validate_list('pipeline', pipeline) self._pipeline = pipeline self._performs_write = False if pipeline and ("$out" in pipeline[-1] or "$merge" in pipeline[-1]): @@ -82,7 +81,6 @@ def _cursor_namespace(self): """The namespace in which the aggregate command is run.""" raise NotImplementedError - @property def _cursor_collection(self, cursor_doc): """The Collection used for the aggregate command cursor.""" raise NotImplementedError diff --git a/pymongo/auth.py b/pymongo/auth.py index a2e206357c..34f1c7fc94 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -19,9 +19,9 @@ import hmac import os import socket - from base64 import standard_b64decode, standard_b64encode from collections import namedtuple +from typing import Callable, Mapping from urllib.parse import quote from bson.binary import Binary @@ -97,7 +97,7 @@ def __hash__(self): """Mechanism properties for GSSAPI authentication.""" -_AWSProperties = namedtuple('AWSProperties', ['aws_session_token']) +_AWSProperties = namedtuple('_AWSProperties', ['aws_session_token']) """Mechanism properties for MONGODB-AWS authentication.""" @@ -140,9 +140,9 @@ def _build_credentials_tuple(mech, source, user, passwd, extra, database): properties = extra.get('authmechanismproperties', {}) aws_session_token = properties.get('AWS_SESSION_TOKEN') - props = _AWSProperties(aws_session_token=aws_session_token) + aws_props = _AWSProperties(aws_session_token=aws_session_token) # user can be None for temporary link-local EC2 credentials. - return MongoCredential(mech, '$external', user, passwd, props, None) + return MongoCredential(mech, '$external', user, passwd, aws_props, None) elif mech == 'PLAIN': source_database = source or database or '$external' return MongoCredential(mech, source_database, user, passwd, None, None) @@ -471,7 +471,7 @@ def _authenticate_default(credentials, sock_info): return _authenticate_scram(credentials, sock_info, 'SCRAM-SHA-1') -_AUTH_MAP = { +_AUTH_MAP: Mapping[str, Callable] = { 'GSSAPI': _authenticate_gssapi, 'MONGODB-CR': _authenticate_mongo_cr, 'MONGODB-X509': _authenticate_x509, @@ -532,7 +532,7 @@ def speculate_command(self): return cmd -_SPECULATIVE_AUTH_MAP = { +_SPECULATIVE_AUTH_MAP: Mapping[str, Callable] = { 'MONGODB-X509': _X509Context, 'SCRAM-SHA-1': functools.partial(_ScramContext, mechanism='SCRAM-SHA-1'), 'SCRAM-SHA-256': functools.partial(_ScramContext, @@ -544,6 +544,6 @@ def speculate_command(self): def authenticate(credentials, sock_info): """Authenticate sock_info.""" mechanism = credentials.mechanism - auth_func = _AUTH_MAP.get(mechanism) + auth_func = _AUTH_MAP[mechanism] auth_func(credentials, sock_info) diff --git a/pymongo/auth_aws.py b/pymongo/auth_aws.py index ff07a12e7f..0233d192d4 100644 --- a/pymongo/auth_aws.py +++ b/pymongo/auth_aws.py @@ -16,12 +16,11 @@ try: import pymongo_auth_aws - from pymongo_auth_aws import (AwsCredential, - AwsSaslContext, + from pymongo_auth_aws import (AwsCredential, AwsSaslContext, PyMongoAuthAwsError) _HAVE_MONGODB_AWS = True except ImportError: - class AwsSaslContext(object): + class AwsSaslContext(object): # type: ignore def __init__(self, credentials): pass _HAVE_MONGODB_AWS = False @@ -32,7 +31,7 @@ def __init__(self, credentials): from pymongo.errors import ConfigurationError, OperationFailure -class _AwsSaslContext(AwsSaslContext): +class _AwsSaslContext(AwsSaslContext): # type: ignore # Dependency injection: def binary_type(self): """Return the bson.binary.Binary type.""" diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 1921108a12..8d343bb2c6 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -17,31 +17,23 @@ .. versionadded:: 2.7 """ import copy - from itertools import islice from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument from bson.son import SON from pymongo.client_session import _validate_session_write_concern -from pymongo.common import (validate_is_mapping, - validate_is_document_type, - validate_ok_for_replace, - validate_ok_for_update) -from pymongo.helpers import _RETRYABLE_ERROR_CODES, _get_wce_doc from pymongo.collation import validate_collation_or_none -from pymongo.errors import (BulkWriteError, - ConfigurationError, - InvalidOperation, - OperationFailure) -from pymongo.message import (_INSERT, _UPDATE, _DELETE, - _randint, - _BulkWriteContext, - _EncryptedBulkWriteContext) +from pymongo.common import (validate_is_document_type, validate_is_mapping, + validate_ok_for_replace, validate_ok_for_update) +from pymongo.errors import (BulkWriteError, ConfigurationError, + InvalidOperation, OperationFailure) +from pymongo.helpers import _RETRYABLE_ERROR_CODES, _get_wce_doc +from pymongo.message import (_DELETE, _INSERT, _UPDATE, _BulkWriteContext, + _EncryptedBulkWriteContext, _randint) from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern - _DELETE_ALL = 0 _DELETE_ONE = 1 diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 54bf98d83e..69446fdecf 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -15,21 +15,20 @@ """Watch changes on a collection, a database, or the entire cluster.""" import copy +from typing import (TYPE_CHECKING, Any, Dict, Generic, Iterator, Mapping, + Optional, Union) from bson import _bson_to_dict from bson.raw_bson import RawBSONDocument - +from bson.timestamp import Timestamp from pymongo import common from pymongo.aggregation import (_CollectionAggregationCommand, _DatabaseAggregationCommand) from pymongo.collation import validate_collation_or_none from pymongo.command_cursor import CommandCursor -from pymongo.errors import (ConnectionFailure, - CursorNotFound, - InvalidOperation, - OperationFailure, - PyMongoError) - +from pymongo.errors import (ConnectionFailure, CursorNotFound, + InvalidOperation, OperationFailure, PyMongoError) +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline # The change streams spec considers the following server errors from the # getMore command non-resumable. All other getMore errors are resumable. @@ -55,7 +54,14 @@ ]) -class ChangeStream(object): +if TYPE_CHECKING: + from pymongo.client_session import ClientSession + from pymongo.collection import Collection + from pymongo.database import Database + from pymongo.mongo_client import MongoClient + + +class ChangeStream(Generic[_DocumentType]): """The internal abstract base class for change stream cursors. Should not be called directly by application developers. Use @@ -66,14 +72,22 @@ class ChangeStream(object): .. versionadded:: 3.6 .. seealso:: The MongoDB documentation on `changeStreams `_. """ - def __init__(self, target, pipeline, full_document, resume_after, - max_await_time_ms, batch_size, collation, - start_at_operation_time, session, start_after): + def __init__( + self, + target: Union["MongoClient[_DocumentType]", "Database[_DocumentType]", "Collection[_DocumentType]"], + pipeline: Optional[_Pipeline], + full_document: Optional[str], + resume_after: Optional[Mapping[str, Any]], + max_await_time_ms: Optional[int], + batch_size: Optional[int], + collation: Optional[_CollationIn], + start_at_operation_time: Optional[Timestamp], + session: Optional["ClientSession"], + start_after: Optional[Mapping[str, Any]], + ) -> None: if pipeline is None: pipeline = [] - elif not isinstance(pipeline, list): - raise TypeError("pipeline must be a list") - + pipeline = common.validate_list('pipeline', pipeline) common.validate_string_or_none('full_document', full_document) validate_collation_or_none(collation) common.validate_non_negative_integer_or_none("batchSize", batch_size) @@ -84,7 +98,7 @@ def __init__(self, target, pipeline, full_document, resume_after, self._decode_custom = True # Keep the type registry so that we support encoding custom types # in the pipeline. - self._target = target.with_options( + self._target = target.with_options( # type: ignore codec_options=target.codec_options.with_options( document_class=RawBSONDocument)) else: @@ -117,7 +131,7 @@ def _client(self): def _change_stream_options(self): """Return the options dict for the $changeStream pipeline stage.""" - options = {} + options: Dict[str, Any] = {} if self._full_document is not None: options['fullDocument'] = self._full_document @@ -144,7 +158,7 @@ def _command_options(self): def _aggregation_pipeline(self): """Return the full aggregation pipeline for this ChangeStream.""" options = self._change_stream_options() - full_pipeline = [{'$changeStream': options}] + full_pipeline: list = [{'$changeStream': options}] full_pipeline.extend(self._pipeline) return full_pipeline @@ -197,15 +211,15 @@ def _resume(self): pass self._cursor = self._create_cursor() - def close(self): + def close(self) -> None: """Close this ChangeStream.""" self._cursor.close() - def __iter__(self): + def __iter__(self) -> "ChangeStream[_DocumentType]": return self @property - def resume_token(self): + def resume_token(self) -> Optional[Mapping[str, Any]]: """The cached resume token that will be used to resume after the most recently returned change. @@ -213,7 +227,7 @@ def resume_token(self): """ return copy.deepcopy(self._resume_token) - def next(self): + def next(self) -> _DocumentType: """Advance the cursor. This method blocks until the next change document is returned or an @@ -255,7 +269,7 @@ def next(self): __next__ = next @property - def alive(self): + def alive(self) -> bool: """Does this cursor have the potential to return more data? .. note:: Even if :attr:`alive` is ``True``, :meth:`next` can raise @@ -265,7 +279,7 @@ def alive(self): """ return self._cursor.alive - def try_next(self): + def try_next(self) -> Optional[_DocumentType]: """Advance the cursor without blocking indefinitely. This method returns the next change document without waiting @@ -354,14 +368,14 @@ def try_next(self): return _bson_to_dict(change.raw, self._orig_codec_options) return change - def __enter__(self): + def __enter__(self) -> "ChangeStream": return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() -class CollectionChangeStream(ChangeStream): +class CollectionChangeStream(ChangeStream, Generic[_DocumentType]): """A change stream that watches changes on a single collection. Should not be called directly by application developers. Use @@ -378,7 +392,7 @@ def _client(self): return self._target.database.client -class DatabaseChangeStream(ChangeStream): +class DatabaseChangeStream(ChangeStream, Generic[_DocumentType]): """A change stream that watches changes on all collections in a database. Should not be called directly by application developers. Use @@ -395,7 +409,7 @@ def _client(self): return self._target.client -class ClusterChangeStream(DatabaseChangeStream): +class ClusterChangeStream(DatabaseChangeStream, Generic[_DocumentType]): """A change stream that watches changes on all collections in the cluster. Should not be called directly by application developers. Use diff --git a/pymongo/client_options.py b/pymongo/client_options.py index c2f5ae01cf..14ef0f781e 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -15,9 +15,9 @@ """Tools to parse mongo client options.""" from bson.codec_options import _parse_codec_options +from pymongo import common from pymongo.auth import _build_credentials_tuple from pymongo.common import validate_boolean -from pymongo import common from pymongo.compression_support import CompressionSettings from pymongo.errors import ConfigurationError from pymongo.monitoring import _EventListeners diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 8c61623ae4..3d4ad514e5 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -134,25 +134,23 @@ import collections import time import uuid - from collections.abc import Mapping as _Mapping +from typing import (TYPE_CHECKING, Any, Callable, ContextManager, Generic, + Mapping, Optional, TypeVar) from bson.binary import Binary from bson.int64 import Int64 from bson.son import SON from bson.timestamp import Timestamp - from pymongo.cursor import _SocketManager -from pymongo.errors import (ConfigurationError, - ConnectionFailure, - InvalidOperation, - OperationFailure, - PyMongoError, +from pymongo.errors import (ConfigurationError, ConnectionFailure, + InvalidOperation, OperationFailure, PyMongoError, WTimeoutError) from pymongo.helpers import _RETRYABLE_ERROR_CODES from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.server_type import SERVER_TYPE +from pymongo.typings import _DocumentType from pymongo.write_concern import WriteConcern @@ -172,10 +170,12 @@ class SessionOptions(object): .. versionchanged:: 3.12 Added the ``snapshot`` parameter. """ - def __init__(self, - causal_consistency=None, - default_transaction_options=None, - snapshot=False): + def __init__( + self, + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional["TransactionOptions"] = None, + snapshot: Optional[bool] = False, + ) -> None: if snapshot: if causal_consistency: raise ConfigurationError('snapshot reads do not support ' @@ -194,12 +194,12 @@ def __init__(self, self._snapshot = snapshot @property - def causal_consistency(self): + def causal_consistency(self) -> bool: """Whether causal consistency is configured.""" return self._causal_consistency @property - def default_transaction_options(self): + def default_transaction_options(self) -> Optional["TransactionOptions"]: """The default TransactionOptions to use for transactions started on this session. @@ -208,7 +208,7 @@ def default_transaction_options(self): return self._default_transaction_options @property - def snapshot(self): + def snapshot(self) -> Optional[bool]: """Whether snapshot reads are configured. .. versionadded:: 3.12 @@ -243,8 +243,13 @@ class TransactionOptions(object): .. versionadded:: 3.7 """ - def __init__(self, read_concern=None, write_concern=None, - read_preference=None, max_commit_time_ms=None): + def __init__( + self, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None + ) -> None: self._read_concern = read_concern self._write_concern = write_concern self._read_preference = read_preference @@ -274,23 +279,23 @@ def __init__(self, read_concern=None, write_concern=None, "max_commit_time_ms must be an integer or None") @property - def read_concern(self): + def read_concern(self) -> Optional[ReadConcern]: """This transaction's :class:`~pymongo.read_concern.ReadConcern`.""" return self._read_concern @property - def write_concern(self): + def write_concern(self) -> Optional[WriteConcern]: """This transaction's :class:`~pymongo.write_concern.WriteConcern`.""" return self._write_concern @property - def read_preference(self): + def read_preference(self) -> Optional[_ServerMode]: """This transaction's :class:`~pymongo.read_preferences.ReadPreference`. """ return self._read_preference @property - def max_commit_time_ms(self): + def max_commit_time_ms(self) -> Optional[int]: """The maxTimeMS to use when running a commitTransaction command. .. versionadded:: 3.9 @@ -427,7 +432,13 @@ def _within_time_limit(start_time): return time.monotonic() - start_time < _WITH_TRANSACTION_RETRY_TIME_LIMIT -class ClientSession(object): +_T = TypeVar("_T") + +if TYPE_CHECKING: + from pymongo.mongo_client import MongoClient + + +class ClientSession(Generic[_DocumentType]): """A session for ordering sequential operations. :class:`ClientSession` instances are **not thread-safe or fork-safe**. @@ -439,9 +450,11 @@ class ClientSession(object): :class:`ClientSession`, call :meth:`~pymongo.mongo_client.MongoClient.start_session`. """ - def __init__(self, client, server_session, options, implicit): + def __init__( + self, client: "MongoClient[_DocumentType]", server_session: Any, options: SessionOptions, implicit: bool + ) -> None: # A MongoClient, a _ServerSession, a SessionOptions, and a set. - self._client = client + self._client: MongoClient[_DocumentType] = client self._server_session = server_session self._options = options self._cluster_time = None @@ -451,7 +464,7 @@ def __init__(self, client, server_session, options, implicit): self._implicit = implicit self._transaction = _Transaction(None, client) - def end_session(self): + def end_session(self) -> None: """Finish this session. If a transaction has started, abort it. It is an error to use the session after the session has ended. @@ -474,39 +487,39 @@ def _check_ended(self): if self._server_session is None: raise InvalidOperation("Cannot use ended session") - def __enter__(self): + def __enter__(self) -> "ClientSession[_DocumentType]": return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self._end_session(lock=True) @property - def client(self): + def client(self) -> "MongoClient[_DocumentType]": """The :class:`~pymongo.mongo_client.MongoClient` this session was created from. """ return self._client @property - def options(self): + def options(self) -> SessionOptions: """The :class:`SessionOptions` this session was created with.""" return self._options @property - def session_id(self): + def session_id(self) -> Mapping[str, Any]: """A BSON document, the opaque server session identifier.""" self._check_ended() return self._server_session.session_id @property - def cluster_time(self): + def cluster_time(self) -> Optional[Mapping[str, Any]]: """The cluster time returned by the last operation executed in this session. """ return self._cluster_time @property - def operation_time(self): + def operation_time(self) -> Optional[Timestamp]: """The operation time returned by the last operation executed in this session. """ @@ -522,8 +535,14 @@ def _inherit_option(self, name, val): return val return getattr(self.client, name) - def with_transaction(self, callback, read_concern=None, write_concern=None, - read_preference=None, max_commit_time_ms=None): + def with_transaction( + self, + callback: Callable[["ClientSession"], _T], + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> _T: """Execute a callback in a transaction. This method starts a transaction on this session, executes ``callback`` @@ -649,8 +668,13 @@ def callback(session, custom_arg, custom_kwarg=None): # Commit succeeded. return ret - def start_transaction(self, read_concern=None, write_concern=None, - read_preference=None, max_commit_time_ms=None): + def start_transaction( + self, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> ContextManager: """Start a multi-statement transaction. Takes the same arguments as :class:`TransactionOptions`. @@ -685,7 +709,7 @@ def start_transaction(self, read_concern=None, write_concern=None, self._start_retryable_write() return _TransactionContext(self) - def commit_transaction(self): + def commit_transaction(self) -> None: """Commit a multi-statement transaction. .. versionadded:: 3.7 @@ -729,7 +753,7 @@ def commit_transaction(self): finally: self._transaction.state = _TxnState.COMMITTED - def abort_transaction(self): + def abort_transaction(self) -> None: """Abort a multi-statement transaction. .. versionadded:: 3.7 @@ -804,7 +828,7 @@ def _advance_cluster_time(self, cluster_time): if cluster_time["clusterTime"] > self._cluster_time["clusterTime"]: self._cluster_time = cluster_time - def advance_cluster_time(self, cluster_time): + def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: """Update the cluster time for this session. :Parameters: @@ -827,7 +851,7 @@ def _advance_operation_time(self, operation_time): if operation_time > self._operation_time: self._operation_time = operation_time - def advance_operation_time(self, operation_time): + def advance_operation_time(self, operation_time: Timestamp) -> None: """Update the operation time for this session. :Parameters: @@ -856,12 +880,12 @@ def _process_response(self, reply): self._transaction.recovery_token = recovery_token @property - def has_ended(self): + def has_ended(self) -> bool: """True if this session is finished.""" return self._server_session is None @property - def in_transaction(self): + def in_transaction(self) -> bool: """True if this session has an active multi-statement transaction. .. versionadded:: 3.10 diff --git a/pymongo/collation.py b/pymongo/collation.py index 873d603336..e398264ac2 100644 --- a/pymongo/collation.py +++ b/pymongo/collation.py @@ -16,6 +16,7 @@ .. _collations: http://userguide.icu-project.org/collation/concepts """ +from typing import Any, Dict, Mapping, Optional, Union from pymongo import common @@ -151,18 +152,18 @@ class Collation(object): __slots__ = ("__document",) - def __init__(self, locale, - caseLevel=None, - caseFirst=None, - strength=None, - numericOrdering=None, - alternate=None, - maxVariable=None, - normalization=None, - backwards=None, - **kwargs): + def __init__(self, locale: str, + caseLevel: Optional[bool] = None, + caseFirst: Optional[str] = None, + strength: Optional[int] = None, + numericOrdering: Optional[bool] = None, + alternate: Optional[str] = None, + maxVariable: Optional[str] = None, + normalization: Optional[bool] = None, + backwards: Optional[bool] = None, + **kwargs: Any) -> None: locale = common.validate_string('locale', locale) - self.__document = {'locale': locale} + self.__document: Dict[str, Any] = {'locale': locale} if caseLevel is not None: self.__document['caseLevel'] = common.validate_boolean( 'caseLevel', caseLevel) @@ -190,7 +191,7 @@ def __init__(self, locale, self.__document.update(kwargs) @property - def document(self): + def document(self) -> Dict[str, Any]: """The document representation of this collation. .. note:: @@ -204,16 +205,16 @@ def __repr__(self): return 'Collation(%s)' % ( ', '.join('%s=%r' % (key, document[key]) for key in document),) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Collation): return self.document == other.document return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other -def validate_collation_or_none(value): +def validate_collation_or_none(value: Optional[Union[Mapping[str, Any], Collation]]) -> Optional[Dict[str, Any]]: if value is None: return None if isinstance(value, Collation): diff --git a/pymongo/collection.py b/pymongo/collection.py index ecb82a2cac..aa2d148fbe 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -14,44 +14,45 @@ """Collection level utilities for Mongo.""" -import datetime -import warnings - from collections import abc +from typing import (TYPE_CHECKING, Any, Generic, Iterable, List, Mapping, + MutableMapping, Optional, Sequence, Tuple, Union) from bson.code import Code +from bson.codec_options import CodecOptions from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument -from bson.codec_options import CodecOptions from bson.son import SON -from pymongo import (common, - helpers, - message) +from bson.timestamp import Timestamp +from pymongo import common, helpers, message from pymongo.aggregation import (_CollectionAggregationCommand, _CollectionRawAggregationCommand) from pymongo.bulk import _Bulk -from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor -from pymongo.collation import validate_collation_or_none from pymongo.change_stream import CollectionChangeStream +from pymongo.collation import validate_collation_or_none +from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor from pymongo.cursor import Cursor, RawBatchCursor -from pymongo.errors import (ConfigurationError, - InvalidName, - InvalidOperation, +from pymongo.errors import (ConfigurationError, InvalidName, InvalidOperation, OperationFailure) from pymongo.helpers import _check_write_command_response from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS -from pymongo.operations import IndexModel -from pymongo.read_preferences import ReadPreference -from pymongo.results import (BulkWriteResult, - DeleteResult, - InsertOneResult, - InsertManyResult, - UpdateResult) +from pymongo.operations import (DeleteMany, DeleteOne, IndexModel, InsertOne, + ReplaceOne, UpdateMany, UpdateOne) +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.results import (BulkWriteResult, DeleteResult, InsertManyResult, + InsertOneResult, UpdateResult) +from pymongo.typings import _CollationIn, _DocumentIn, _DocumentType, _Pipeline from pymongo.write_concern import WriteConcern _FIND_AND_MODIFY_DOC_FIELDS = {'value': 1} +_WriteOp = Union[InsertOne, DeleteOne, DeleteMany, ReplaceOne, UpdateOne, UpdateMany] +# Hint supports index name, "myIndex", or list of index pairs: [('x', 1), ('y', -1)] +_IndexList = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] +_IndexKeyHint = Union[str, _IndexList] + + class ReturnDocument(object): """An enum used with :meth:`~pymongo.collection.Collection.find_one_and_replace` and @@ -65,13 +66,28 @@ class ReturnDocument(object): """Return the updated/replaced or inserted document.""" -class Collection(common.BaseObject): +if TYPE_CHECKING: + from pymongo.client_session import ClientSession + from pymongo.database import Database + from pymongo.read_concern import ReadConcern + + +class Collection(common.BaseObject, Generic[_DocumentType]): """A Mongo collection. """ - def __init__(self, database, name, create=False, codec_options=None, - read_preference=None, write_concern=None, read_concern=None, - session=None, **kwargs): + def __init__( + self, + database: "Database[_DocumentType]", + name: str, + create: Optional[bool] = False, + codec_options: Optional[CodecOptions] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional["ReadConcern"] = None, + session: Optional["ClientSession"] = None, + **kwargs: Any, + ) -> None: """Get / create a Mongo collection. Raises :class:`TypeError` if `name` is not an instance of @@ -169,7 +185,7 @@ def __init__(self, database, name, create=False, codec_options=None, "null character") collation = validate_collation_or_none(kwargs.pop('collation', None)) - self.__database = database + self.__database: Database[_DocumentType] = database self.__name = name self.__full_name = "%s.%s" % (self.__database.name, self.__name) if create or kwargs or collation: @@ -252,7 +268,7 @@ def __create(self, options, collation, session): write_concern=self._write_concern_for(session), collation=collation, session=session) - def __getattr__(self, name): + def __getattr__(self, name: str) -> "Collection[_DocumentType]": """Get a sub-collection of this collection by name. Raises InvalidName if an invalid collection name is used. @@ -268,7 +284,7 @@ def __getattr__(self, name): name, full_name, full_name)) return self.__getitem__(name) - def __getitem__(self, name): + def __getitem__(self, name: str) -> "Collection[_DocumentType]": return Collection(self.__database, "%s.%s" % (self.__name, name), False, @@ -280,25 +296,25 @@ def __getitem__(self, name): def __repr__(self): return "Collection(%r, %r)" % (self.__database, self.__name) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Collection): return (self.__database == other.database and self.__name == other.name) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __hash__(self): + def __hash__(self) -> int: return hash((self.__database, self.__name)) - def __bool__(self): + def __bool__(self) -> bool: raise NotImplementedError("Collection objects do not implement truth " "value testing or bool(). Please compare " "with None instead: collection is not None") @property - def full_name(self): + def full_name(self) -> str: """The full name of this :class:`Collection`. The full name is of the form `database_name.collection_name`. @@ -306,19 +322,24 @@ def full_name(self): return self.__full_name @property - def name(self): + def name(self) -> str: """The name of this :class:`Collection`.""" return self.__name @property - def database(self): + def database(self) -> "Database[_DocumentType]": """The :class:`~pymongo.database.Database` that this :class:`Collection` is a part of. """ return self.__database - def with_options(self, codec_options=None, read_preference=None, - write_concern=None, read_concern=None): + def with_options( + self, + codec_options: Optional[CodecOptions] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional["ReadConcern"] = None, + ) -> "Collection[_DocumentType]": """Get a clone of this collection changing the specified settings. >>> coll1.read_preference @@ -356,8 +377,13 @@ def with_options(self, codec_options=None, read_preference=None, write_concern or self.write_concern, read_concern or self.read_concern) - def bulk_write(self, requests, ordered=True, - bypass_document_validation=False, session=None): + def bulk_write( + self, + requests: Sequence[_WriteOp], + ordered: bool = True, + bypass_document_validation: bool = False, + session: Optional["ClientSession"] = None + ) -> BulkWriteResult: """Send a batch of write operations to the server. Requests are passed as a list of write operation instances ( @@ -470,8 +496,10 @@ def _insert_command(session, sock_info, retryable_write): if not isinstance(doc, RawBSONDocument): return doc.get('_id') - def insert_one(self, document, bypass_document_validation=False, - session=None): + def insert_one(self, document: _DocumentIn, + bypass_document_validation: bool = False, + session: Optional["ClientSession"] = None + ) -> InsertOneResult: """Insert a single document. >>> db.test.count_documents({'x': 1}) @@ -520,8 +548,12 @@ def insert_one(self, document, bypass_document_validation=False, bypass_doc_val=bypass_document_validation, session=session), write_concern.acknowledged) - def insert_many(self, documents, ordered=True, - bypass_document_validation=False, session=None): + def insert_many(self, + documents: Iterable[_DocumentIn], + ordered: bool = True, + bypass_document_validation: bool = False, + session: Optional["ClientSession"] = None + ) -> InsertManyResult: """Insert an iterable of documents. >>> db.test.count_documents({}) @@ -565,7 +597,7 @@ def insert_many(self, documents, ordered=True, or isinstance(documents, abc.Mapping) or not documents): raise TypeError("documents must be a non-empty list") - inserted_ids = [] + inserted_ids: List[ObjectId] = [] def gen(): """A generator that validates documents and handles _ids.""" for document in documents: @@ -671,9 +703,16 @@ def _update(session, sock_info, retryable_write): (write_concern or self.write_concern).acknowledged and not multi, _update, session) - def replace_one(self, filter, replacement, upsert=False, - bypass_document_validation=False, collation=None, - hint=None, session=None, let=None): + def replace_one(self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: bool = False, + bypass_document_validation: bool = False, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None + ) -> UpdateResult: """Replace a single document matching the filter. >>> for doc in db.test.find({}): @@ -755,10 +794,17 @@ def replace_one(self, filter, replacement, upsert=False, collation=collation, hint=hint, session=session, let=let), write_concern.acknowledged) - def update_one(self, filter, update, upsert=False, - bypass_document_validation=False, - collation=None, array_filters=None, hint=None, - session=None, let=None): + def update_one(self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + bypass_document_validation: bool = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None + ) -> UpdateResult: """Update a single document matching the filter. >>> for doc in db.test.find(): @@ -800,8 +846,8 @@ def update_one(self, filter, update, upsert=False, - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). :Returns: @@ -836,9 +882,17 @@ def update_one(self, filter, update, upsert=False, hint=hint, session=session, let=let), write_concern.acknowledged) - def update_many(self, filter, update, upsert=False, array_filters=None, - bypass_document_validation=False, collation=None, - hint=None, session=None, let=None): + def update_many(self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + bypass_document_validation: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None + ) -> UpdateResult: """Update one or more documents that match the filter. >>> for doc in db.test.find(): @@ -880,8 +934,8 @@ def update_many(self, filter, update, upsert=False, array_filters=None, - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). :Returns: @@ -916,7 +970,7 @@ def update_many(self, filter, update, upsert=False, array_filters=None, hint=hint, session=session, let=let), write_concern.acknowledged) - def drop(self, session=None): + def drop(self, session: Optional["ClientSession"] = None) -> None: """Alias for :meth:`~pymongo.database.Database.drop_collection`. :Parameters: @@ -1005,8 +1059,13 @@ def _delete(session, sock_info, retryable_write): (write_concern or self.write_concern).acknowledged and not multi, _delete, session) - def delete_one(self, filter, collation=None, hint=None, session=None, - let=None): + def delete_one(self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None + ) -> DeleteResult: """Delete a single document matching the filter. >>> db.test.count_documents({'x': 1}) @@ -1030,8 +1089,8 @@ def delete_one(self, filter, collation=None, hint=None, session=None, - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). :Returns: @@ -1055,8 +1114,13 @@ def delete_one(self, filter, collation=None, hint=None, session=None, collation=collation, hint=hint, session=session, let=let), write_concern.acknowledged) - def delete_many(self, filter, collation=None, hint=None, session=None, - let=None): + def delete_many(self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None + ) -> DeleteResult: """Delete one or more documents matching the filter. >>> db.test.count_documents({'x': 1}) @@ -1080,8 +1144,8 @@ def delete_many(self, filter, collation=None, hint=None, session=None, - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). :Returns: @@ -1105,7 +1169,7 @@ def delete_many(self, filter, collation=None, hint=None, session=None, collation=collation, hint=hint, session=session, let=let), write_concern.acknowledged) - def find_one(self, filter=None, *args, **kwargs): + def find_one(self, filter: Optional[Any] = None, *args: Any, **kwargs: Any) -> Optional[_DocumentType]: """Get a single document from the database. All arguments to :meth:`find` are also valid arguments for @@ -1139,7 +1203,7 @@ def find_one(self, filter=None, *args, **kwargs): return result return None - def find(self, *args, **kwargs): + def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: """Query the database. The `filter` argument is a prototype document that all results @@ -1328,7 +1392,7 @@ def find(self, *args, **kwargs): """ return Cursor(self, *args, **kwargs) - def find_raw_batches(self, *args, **kwargs): + def find_raw_batches(self, *args: Any, **kwargs: Any) -> RawBatchCursor[_DocumentType]: """Query the database and retrieve batches of raw BSON. Similar to the :meth:`find` method but returns a @@ -1396,7 +1460,7 @@ def _aggregate_one_result( batch = result['cursor']['firstBatch'] return batch[0] if batch else None - def estimated_document_count(self, **kwargs): + def estimated_document_count(self, **kwargs: Any) -> int: """Get an estimate of the number of documents in this collection using collection metadata. @@ -1445,7 +1509,7 @@ def _cmd(session, server, sock_info, read_preference): return self.__database.client._retryable_read( _cmd, self.read_preference, None) - def count_documents(self, filter, session=None, **kwargs): + def count_documents(self, filter: Mapping[str, Any], session: Optional["ClientSession"] = None, **kwargs: Any) -> int: """Count the number of documents in this collection. .. note:: For a fast count of the total documents in a collection see @@ -1523,7 +1587,7 @@ def _cmd(session, server, sock_info, read_preference): return self.__database.client._retryable_read( _cmd, self._read_preference_for(session), session) - def create_indexes(self, indexes, session=None, **kwargs): + def create_indexes(self, indexes: Sequence[IndexModel], session: Optional["ClientSession"] = None, **kwargs: Any) -> List[str]: """Create one or more indexes on this collection. >>> from pymongo import IndexModel, ASCENDING, DESCENDING @@ -1598,7 +1662,7 @@ def gen_indexes(): session=session) return names - def create_index(self, keys, session=None, **kwargs): + def create_index(self, keys: _IndexKeyHint, session: Optional["ClientSession"] = None, **kwargs: Any) -> str: """Creates an index on this collection. Takes either a single key or a list of (key, direction) pairs. @@ -1701,7 +1765,7 @@ def create_index(self, keys, session=None, **kwargs): index = IndexModel(keys, **kwargs) return self.__create_indexes([index], session, **cmd_options)[0] - def drop_indexes(self, session=None, **kwargs): + def drop_indexes(self, session: Optional["ClientSession"] = None, **kwargs: Any) -> None: """Drops all indexes on this collection. Can be used on non-existant collections or collections with no indexes. @@ -1727,7 +1791,7 @@ def drop_indexes(self, session=None, **kwargs): """ self.drop_index("*", session=session, **kwargs) - def drop_index(self, index_or_name, session=None, **kwargs): + def drop_index(self, index_or_name: _IndexKeyHint, session: Optional["ClientSession"] = None, **kwargs: Any) -> None: """Drops the specified index on this collection. Can be used on non-existant collections or collections with no @@ -1780,7 +1844,7 @@ def drop_index(self, index_or_name, session=None, **kwargs): write_concern=self._write_concern_for(session), session=session) - def list_indexes(self, session=None): + def list_indexes(self, session: Optional["ClientSession"] = None) -> CommandCursor[MutableMapping[str, Any]]: """Get a cursor over the index documents for this collection. >>> for index in db.test.list_indexes(): @@ -1829,7 +1893,7 @@ def _cmd(session, server, sock_info, read_preference): return self.__database.client._retryable_read( _cmd, read_pref, session) - def index_information(self, session=None): + def index_information(self, session: Optional["ClientSession"] = None) -> MutableMapping[str, Any]: """Get information on this collection's indexes. Returns a dictionary where the keys are index names (as @@ -1863,7 +1927,7 @@ def index_information(self, session=None): info[index.pop("name")] = index return info - def options(self, session=None): + def options(self, session: Optional["ClientSession"] = None) -> MutableMapping[str, Any]: """Get the options set on this collection. Returns a dictionary of options and their values - see @@ -1896,6 +1960,7 @@ def options(self, session=None): return {} options = result.get("options", {}) + assert options is not None if "create" in options: del options["create"] @@ -1911,7 +1976,7 @@ def _aggregate(self, aggregation_command, pipeline, cursor_class, session, cmd.get_cursor, cmd.get_read_preference(session), session, retryable=not cmd._performs_write) - def aggregate(self, pipeline, session=None, let=None, **kwargs): + def aggregate(self, pipeline: _Pipeline, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, **kwargs: Any) -> CommandCursor[_DocumentType]: """Perform an aggregation using the aggregation framework on this collection. @@ -1993,7 +2058,9 @@ def aggregate(self, pipeline, session=None, let=None, **kwargs): let=let, **kwargs) - def aggregate_raw_batches(self, pipeline, session=None, **kwargs): + def aggregate_raw_batches( + self, pipeline: _Pipeline, session: Optional["ClientSession"] = None, **kwargs: Any + ) -> RawBatchCursor[_DocumentType]: """Perform an aggregation and retrieve batches of raw BSON. Similar to the :meth:`aggregate` method but returns a @@ -2030,9 +2097,17 @@ def aggregate_raw_batches(self, pipeline, session=None, **kwargs): explicit_session=session is not None, **kwargs) - def watch(self, pipeline=None, full_document=None, resume_after=None, - max_await_time_ms=None, batch_size=None, collation=None, - start_at_operation_time=None, session=None, start_after=None): + def watch(self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional["ClientSession"] = None, + start_after: Optional[Mapping[str, Any]] = None, + ) -> CollectionChangeStream[_DocumentType]: """Watch changes on this collection. Performs an aggregation with an implicit initial ``$changeStream`` @@ -2132,7 +2207,7 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, batch_size, collation, start_at_operation_time, session, start_after) - def rename(self, new_name, session=None, **kwargs): + def rename(self, new_name: str, session: Optional["ClientSession"] = None, **kwargs: Any) -> MutableMapping[str, Any]: """Rename this collection. If operating in auth mode, client must be authorized as an @@ -2183,7 +2258,9 @@ def rename(self, new_name, session=None, **kwargs): parse_write_concern_error=True, session=s, client=self.__database.client) - def distinct(self, key, filter=None, session=None, **kwargs): + def distinct( + self, key: str, filter: Optional[Mapping[str, Any]] = None, session: Optional["ClientSession"] = None, **kwargs: Any + ) -> List: """Get a list of distinct values for `key` among all documents in this collection. @@ -2283,7 +2360,7 @@ def _find_and_modify(session, sock_info, retryable_write): raise ConfigurationError( 'arrayFilters is unsupported for unacknowledged ' 'writes.') - cmd["arrayFilters"] = array_filters + cmd["arrayFilters"] = list(array_filters) if hint is not None: if sock_info.max_wire_version < 8: raise ConfigurationError( @@ -2307,9 +2384,15 @@ def _find_and_modify(session, sock_info, retryable_write): return self.__database.client._retryable_write( write_concern.acknowledged, _find_and_modify, session) - def find_one_and_delete(self, filter, - projection=None, sort=None, hint=None, - session=None, let=None, **kwargs): + def find_one_and_delete(self, + filter: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> _DocumentType: """Finds a single document and deletes it, returning the document. >>> db.test.count_documents({'x': 1}) @@ -2357,8 +2440,8 @@ def find_one_and_delete(self, filter, as keyword arguments (for example maxTimeMS can be used with recent server versions). - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). .. versionchanged:: 4.1 @@ -2384,10 +2467,18 @@ def find_one_and_delete(self, filter, return self.__find_and_modify(filter, projection, sort, let=let, hint=hint, session=session, **kwargs) - def find_one_and_replace(self, filter, replacement, - projection=None, sort=None, upsert=False, - return_document=ReturnDocument.BEFORE, - hint=None, session=None, let=None, **kwargs): + def find_one_and_replace(self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + upsert: bool = False, + return_document: bool = ReturnDocument.BEFORE, + hint: Optional[_IndexKeyHint] = None, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> _DocumentType: """Finds a single document and replaces it, returning either the original or the replaced document. @@ -2438,8 +2529,8 @@ def find_one_and_replace(self, filter, replacement, - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with @@ -2470,11 +2561,19 @@ def find_one_and_replace(self, filter, replacement, sort, upsert, return_document, let=let, hint=hint, session=session, **kwargs) - def find_one_and_update(self, filter, update, - projection=None, sort=None, upsert=False, - return_document=ReturnDocument.BEFORE, - array_filters=None, hint=None, session=None, - let=None, **kwargs): + def find_one_and_update(self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + upsert: bool = False, + return_document: bool = ReturnDocument.BEFORE, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> _DocumentType: """Finds a single document and updates it, returning either the original or the updated document. @@ -2564,8 +2663,8 @@ def find_one_and_update(self, filter, update, - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with @@ -2600,15 +2699,15 @@ def find_one_and_update(self, filter, update, array_filters, hint=hint, let=let, session=session, **kwargs) - def __iter__(self): + def __iter__(self) -> "Collection[_DocumentType]": return self - def __next__(self): + def __next__(self) -> None: raise TypeError("'Collection' object is not iterable") next = __next__ - def __call__(self, *args, **kwargs): + def __call__(self, *args: Any, **kwargs: Any) -> None: """This is only here so that some API misusages are easier to debug. """ if "." not in self.__name: diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index 21822ac61b..b7dbf7a8e7 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -15,28 +15,38 @@ """CommandCursor class to iterate over command results.""" from collections import deque +from typing import (TYPE_CHECKING, Any, Generic, Iterator, Mapping, Optional, + Tuple) from bson import _convert_raw_document_lists_to_streams -from pymongo.cursor import _SocketManager, _CURSOR_CLOSED_ERRORS -from pymongo.errors import (ConnectionFailure, - InvalidOperation, +from pymongo.cursor import _CURSOR_CLOSED_ERRORS, _SocketManager +from pymongo.errors import (ConnectionFailure, InvalidOperation, OperationFailure) -from pymongo.message import (_CursorAddress, - _GetMore, - _RawBatchGetMore) +from pymongo.message import _CursorAddress, _GetMore, _RawBatchGetMore from pymongo.response import PinnedResponse +from pymongo.typings import _DocumentType +if TYPE_CHECKING: + from pymongo.client_session import ClientSession + from pymongo.collection import Collection -class CommandCursor(object): + +class CommandCursor(Generic[_DocumentType]): """A cursor / iterator over command cursors.""" _getmore_class = _GetMore - def __init__(self, collection, cursor_info, address, - batch_size=0, max_await_time_ms=None, session=None, - explicit_session=False): + def __init__(self, + collection: "Collection[_DocumentType]", + cursor_info: Mapping[str, Any], + address: Optional[Tuple[str, Optional[int]]], + batch_size: int = 0, + max_await_time_ms: Optional[int] = None, + session: Optional["ClientSession"] = None, + explicit_session: bool = False, + ) -> None: """Create a new command cursor.""" - self.__sock_mgr = None - self.__collection = collection + self.__sock_mgr: Any = None + self.__collection: Collection[_DocumentType] = collection self.__id = cursor_info['id'] self.__data = deque(cursor_info['firstBatch']) self.__postbatchresumetoken = cursor_info.get('postBatchResumeToken') @@ -60,7 +70,7 @@ def __init__(self, collection, cursor_info, address, and max_await_time_ms is not None): raise TypeError("max_await_time_ms must be an integer or None") - def __del__(self): + def __del__(self) -> None: self.__die() def __die(self, synchronous=False): @@ -92,12 +102,12 @@ def __end_session(self, synchronous): self.__session._end_session(lock=synchronous) self.__session = None - def close(self): + def close(self) -> None: """Explicitly close / kill this cursor. """ self.__die(True) - def batch_size(self, batch_size): + def batch_size(self, batch_size: int) -> "CommandCursor[_DocumentType]": """Limits the number of documents returned in one batch. Each batch requires a round trip to the server. It can be adjusted to optimize performance and limit data transfer. @@ -222,7 +232,7 @@ def _refresh(self): return len(self.__data) @property - def alive(self): + def alive(self) -> bool: """Does this cursor have the potential to return more data? Even if :attr:`alive` is ``True``, :meth:`next` can raise @@ -239,12 +249,12 @@ def alive(self): return bool(len(self.__data) or (not self.__killed)) @property - def cursor_id(self): + def cursor_id(self) -> int: """Returns the id of the cursor.""" return self.__id @property - def address(self): + def address(self) -> Optional[Tuple[str, Optional[int]]]: """The (host, port) of the server used, or None. .. versionadded:: 3.0 @@ -252,18 +262,19 @@ def address(self): return self.__address @property - def session(self): + def session(self) -> Optional["ClientSession"]: """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. .. versionadded:: 3.6 """ if self.__explicit_session: return self.__session + return None - def __iter__(self): + def __iter__(self) -> Iterator[_DocumentType]: return self - def next(self): + def next(self) -> _DocumentType: """Advance the cursor.""" # Block until a document is returnable. while self.alive: @@ -284,19 +295,25 @@ def _try_next(self, get_more_allowed): else: return None - def __enter__(self): + def __enter__(self) -> "CommandCursor[_DocumentType]": return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() -class RawBatchCommandCursor(CommandCursor): +class RawBatchCommandCursor(CommandCursor, Generic[_DocumentType]): _getmore_class = _RawBatchGetMore - def __init__(self, collection, cursor_info, address, - batch_size=0, max_await_time_ms=None, session=None, - explicit_session=False): + def __init__(self, + collection: "Collection[_DocumentType]", + cursor_info: Mapping[str, Any], + address: Optional[Tuple[str, Optional[int]]], + batch_size: int = 0, + max_await_time_ms: Optional[int] = None, + session: Optional["ClientSession"] = None, + explicit_session: bool = False, + ) -> None: """Create a new cursor / iterator over raw batches of BSON data. Should not be called directly by application developers - diff --git a/pymongo/common.py b/pymongo/common.py index 14789c8109..fa2fe9bf11 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -17,8 +17,9 @@ import datetime import warnings - -from collections import abc, OrderedDict +from collections import OrderedDict, abc +from typing import (Any, Callable, Dict, List, Mapping, MutableMapping, + Optional, Sequence, Tuple, Type, Union, cast) from urllib.parse import unquote_plus from bson import SON @@ -29,18 +30,18 @@ from pymongo.compression_support import (validate_compressors, validate_zlib_compression_level) from pymongo.driver_info import DriverInfo -from pymongo.server_api import ServerApi from pymongo.errors import ConfigurationError from pymongo.monitoring import _validate_event_listeners from pymongo.read_concern import ReadConcern from pymongo.read_preferences import _MONGOS_MODES, _ServerMode +from pymongo.server_api import ServerApi from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern -ORDERED_TYPES = (SON, OrderedDict) +ORDERED_TYPES: Sequence[Type] = (SON, OrderedDict) # Defaults until we connect to a server and get updated limits. MAX_BSON_SIZE = 16 * (1024 ** 2) -MAX_MESSAGE_SIZE = 2 * MAX_BSON_SIZE +MAX_MESSAGE_SIZE: int = 2 * MAX_BSON_SIZE MIN_WIRE_VERSION = 0 MAX_WIRE_VERSION = 0 MAX_WRITE_BATCH_SIZE = 1000 @@ -85,13 +86,13 @@ MAX_CONNECTING = 2 # Default value for maxIdleTimeMS. -MAX_IDLE_TIME_MS = None +MAX_IDLE_TIME_MS: Optional[int] = None # Default value for maxIdleTimeMS in seconds. -MAX_IDLE_TIME_SEC = None +MAX_IDLE_TIME_SEC: Optional[int] = None # Default value for waitQueueTimeoutMS in seconds. -WAIT_QUEUE_TIMEOUT = None +WAIT_QUEUE_TIMEOUT: Optional[int] = None # Default value for localThresholdMS. LOCAL_THRESHOLD_MS = 15 @@ -103,10 +104,10 @@ RETRY_READS = True # The error code returned when a command doesn't exist. -COMMAND_NOT_FOUND_CODES = (59,) +COMMAND_NOT_FOUND_CODES: Sequence[int] = (59,) # Error codes to ignore if GridFS calls createIndex on a secondary -UNAUTHORIZED_CODES = (13, 16547, 16548) +UNAUTHORIZED_CODES: Sequence[int] = (13, 16547, 16548) # Maximum number of sessions to send in a single endSessions command. # From the driver sessions spec. @@ -116,7 +117,7 @@ SRV_SERVICE_NAME = "mongodb" -def partition_node(node): +def partition_node(node: str) -> Tuple[str, int]: """Split a host:port string into (host, int(port)) pair.""" host = node port = 27017 @@ -128,7 +129,7 @@ def partition_node(node): return host, port -def clean_node(node): +def clean_node(node: str) -> Tuple[str, int]: """Split and normalize a node name from a hello response.""" host, port = partition_node(node) @@ -139,7 +140,7 @@ def clean_node(node): return host.lower(), port -def raise_config_error(key, dummy): +def raise_config_error(key: str, dummy: Any) -> None: """Raise ConfigurationError with the given key name.""" raise ConfigurationError("Unknown option %s" % (key,)) @@ -154,14 +155,14 @@ def raise_config_error(key, dummy): } -def validate_boolean(option, value): +def validate_boolean(option: str, value: Any) -> bool: """Validates that 'value' is True or False.""" if isinstance(value, bool): return value raise TypeError("%s must be True or False" % (option,)) -def validate_boolean_or_string(option, value): +def validate_boolean_or_string(option: str, value: Any) -> bool: """Validates that value is True, False, 'true', or 'false'.""" if isinstance(value, str): if value not in ('true', 'false'): @@ -171,7 +172,7 @@ def validate_boolean_or_string(option, value): return validate_boolean(option, value) -def validate_integer(option, value): +def validate_integer(option: str, value: Any) -> int: """Validates that 'value' is an integer (or basestring representation). """ if isinstance(value, int): @@ -185,7 +186,7 @@ def validate_integer(option, value): raise TypeError("Wrong type for %s, value must be an integer" % (option,)) -def validate_positive_integer(option, value): +def validate_positive_integer(option: str, value: Any) -> int: """Validate that 'value' is a positive integer, which does not include 0. """ val = validate_integer(option, value) @@ -195,7 +196,7 @@ def validate_positive_integer(option, value): return val -def validate_non_negative_integer(option, value): +def validate_non_negative_integer(option: str, value: Any) -> int: """Validate that 'value' is a positive integer or 0. """ val = validate_integer(option, value) @@ -205,7 +206,7 @@ def validate_non_negative_integer(option, value): return val -def validate_readable(option, value): +def validate_readable(option: str, value: Any) -> Optional[str]: """Validates that 'value' is file-like and readable. """ if value is None: @@ -217,7 +218,7 @@ def validate_readable(option, value): return value -def validate_positive_integer_or_none(option, value): +def validate_positive_integer_or_none(option: str, value: Any) -> Optional[int]: """Validate that 'value' is a positive integer or None. """ if value is None: @@ -225,7 +226,7 @@ def validate_positive_integer_or_none(option, value): return validate_positive_integer(option, value) -def validate_non_negative_integer_or_none(option, value): +def validate_non_negative_integer_or_none(option: str, value: Any) -> Optional[int]: """Validate that 'value' is a positive integer or 0 or None. """ if value is None: @@ -233,7 +234,7 @@ def validate_non_negative_integer_or_none(option, value): return validate_non_negative_integer(option, value) -def validate_string(option, value): +def validate_string(option: str, value: Any) -> str: """Validates that 'value' is an instance of `str`. """ if isinstance(value, str): @@ -242,7 +243,7 @@ def validate_string(option, value): "str" % (option,)) -def validate_string_or_none(option, value): +def validate_string_or_none(option: str, value: Any) -> Optional[str]: """Validates that 'value' is an instance of `basestring` or `None`. """ if value is None: @@ -250,7 +251,7 @@ def validate_string_or_none(option, value): return validate_string(option, value) -def validate_int_or_basestring(option, value): +def validate_int_or_basestring(option: str, value: Any) -> Union[int, str]: """Validates that 'value' is an integer or string. """ if isinstance(value, int): @@ -264,7 +265,7 @@ def validate_int_or_basestring(option, value): "integer or a string" % (option,)) -def validate_non_negative_int_or_basestring(option, value): +def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[int, str]: """Validates that 'value' is an integer or string. """ if isinstance(value, int): @@ -279,7 +280,7 @@ def validate_non_negative_int_or_basestring(option, value): "non negative integer or a string" % (option,)) -def validate_positive_float(option, value): +def validate_positive_float(option: str, value: Any) -> float: """Validates that 'value' is a float, or can be converted to one, and is positive. """ @@ -299,7 +300,7 @@ def validate_positive_float(option, value): return value -def validate_positive_float_or_zero(option, value): +def validate_positive_float_or_zero(option: str, value: Any) -> float: """Validates that 'value' is 0 or a positive float, or can be converted to 0 or a positive float. """ @@ -308,7 +309,7 @@ def validate_positive_float_or_zero(option, value): return validate_positive_float(option, value) -def validate_timeout_or_none(option, value): +def validate_timeout_or_none(option: str, value: Any) -> Optional[float]: """Validates a timeout specified in milliseconds returning a value in floating point seconds. """ @@ -317,7 +318,7 @@ def validate_timeout_or_none(option, value): return validate_positive_float(option, value) / 1000.0 -def validate_timeout_or_zero(option, value): +def validate_timeout_or_zero(option: str, value: Any) -> float: """Validates a timeout specified in milliseconds returning a value in floating point seconds for the case where None is an error and 0 is valid. Setting the timeout to nothing in the URI string is a @@ -330,7 +331,7 @@ def validate_timeout_or_zero(option, value): return validate_positive_float(option, value) / 1000.0 -def validate_timeout_or_none_or_zero(option, value): +def validate_timeout_or_none_or_zero(option: Any, value: Any) -> Optional[float]: """Validates a timeout specified in milliseconds returning a value in floating point seconds. value=0 and value="0" are treated the same as value=None which means unlimited timeout. @@ -340,7 +341,7 @@ def validate_timeout_or_none_or_zero(option, value): return validate_positive_float(option, value) / 1000.0 -def validate_max_staleness(option, value): +def validate_max_staleness(option: str, value: Any) -> int: """Validates maxStalenessSeconds according to the Max Staleness Spec.""" if value == -1 or value == "-1": # Default: No maximum staleness. @@ -348,7 +349,7 @@ def validate_max_staleness(option, value): return validate_positive_integer(option, value) -def validate_read_preference(dummy, value): +def validate_read_preference(dummy: Any, value: Any) -> _ServerMode: """Validate a read preference. """ if not isinstance(value, _ServerMode): @@ -356,7 +357,7 @@ def validate_read_preference(dummy, value): return value -def validate_read_preference_mode(dummy, value): +def validate_read_preference_mode(dummy: Any, value: Any) -> _ServerMode: """Validate read preference mode for a MongoClient. .. versionchanged:: 3.5 @@ -368,7 +369,7 @@ def validate_read_preference_mode(dummy, value): return value -def validate_auth_mechanism(option, value): +def validate_auth_mechanism(option: str, value: Any) -> str: """Validate the authMechanism URI option. """ if value not in MECHANISMS: @@ -376,7 +377,7 @@ def validate_auth_mechanism(option, value): return value -def validate_uuid_representation(dummy, value): +def validate_uuid_representation(dummy: Any, value: Any) -> int: """Validate the uuid representation option selected in the URI. """ try: @@ -387,13 +388,13 @@ def validate_uuid_representation(dummy, value): "%s" % (value, tuple(_UUID_REPRESENTATIONS))) -def validate_read_preference_tags(name, value): +def validate_read_preference_tags(name: str, value: Any) -> List[Dict[str, str]]: """Parse readPreferenceTags if passed as a client kwarg. """ if not isinstance(value, list): value = [value] - tag_sets = [] + tag_sets: List = [] for tag_set in value: if tag_set == '': tag_sets.append({}) @@ -416,10 +417,10 @@ def validate_read_preference_tags(name, value): 'AWS_SESSION_TOKEN']) -def validate_auth_mechanism_properties(option, value): +def validate_auth_mechanism_properties(option: str, value: Any) -> Dict[str, Union[bool, str]]: """Validate authMechanismProperties.""" value = validate_string(option, value) - props = {} + props: Dict[str, Any] = {} for opt in value.split(','): try: key, val = opt.split(':') @@ -443,7 +444,7 @@ def validate_auth_mechanism_properties(option, value): return props -def validate_document_class(option, value): +def validate_document_class(option: str, value: Any) -> Union[Type[MutableMapping], Type[RawBSONDocument]]: """Validate the document_class option.""" if not issubclass(value, (abc.MutableMapping, RawBSONDocument)): raise TypeError("%s must be dict, bson.son.SON, " @@ -452,7 +453,7 @@ def validate_document_class(option, value): return value -def validate_type_registry(option, value): +def validate_type_registry(option: Any, value: Any) -> Optional[TypeRegistry]: """Validate the type_registry option.""" if value is not None and not isinstance(value, TypeRegistry): raise TypeError("%s must be an instance of %s" % ( @@ -460,21 +461,21 @@ def validate_type_registry(option, value): return value -def validate_list(option, value): +def validate_list(option: str, value: Any) -> List: """Validates that 'value' is a list.""" if not isinstance(value, list): raise TypeError("%s must be a list" % (option,)) return value -def validate_list_or_none(option, value): +def validate_list_or_none(option: Any, value: Any) -> Optional[List]: """Validates that 'value' is a list or None.""" if value is None: return value return validate_list(option, value) -def validate_list_or_mapping(option, value): +def validate_list_or_mapping(option: Any, value: Any) -> None: """Validates that 'value' is a list or a document.""" if not isinstance(value, (abc.Mapping, list)): raise TypeError("%s must either be a list or an instance of dict, " @@ -482,7 +483,7 @@ def validate_list_or_mapping(option, value): "collections.Mapping" % (option,)) -def validate_is_mapping(option, value): +def validate_is_mapping(option: str, value: Any) -> None: """Validate the type of method arguments that expect a document.""" if not isinstance(value, abc.Mapping): raise TypeError("%s must be an instance of dict, bson.son.SON, or " @@ -490,7 +491,7 @@ def validate_is_mapping(option, value): "collections.Mapping" % (option,)) -def validate_is_document_type(option, value): +def validate_is_document_type(option: str, value: Any) -> None: """Validate the type of method arguments that expect a MongoDB document.""" if not isinstance(value, (abc.MutableMapping, RawBSONDocument)): raise TypeError("%s must be an instance of dict, bson.son.SON, " @@ -499,7 +500,7 @@ def validate_is_document_type(option, value): "collections.MutableMapping" % (option,)) -def validate_appname_or_none(option, value): +def validate_appname_or_none(option: str, value: Any) -> Optional[str]: """Validate the appname option.""" if value is None: return value @@ -510,7 +511,7 @@ def validate_appname_or_none(option, value): return value -def validate_driver_or_none(option, value): +def validate_driver_or_none(option: Any, value: Any) -> Optional[DriverInfo]: """Validate the driver keyword arg.""" if value is None: return value @@ -519,7 +520,7 @@ def validate_driver_or_none(option, value): return value -def validate_server_api_or_none(option, value): +def validate_server_api_or_none(option: Any, value: Any) -> Optional[ServerApi]: """Validate the server_api keyword arg.""" if value is None: return value @@ -528,7 +529,7 @@ def validate_server_api_or_none(option, value): return value -def validate_is_callable_or_none(option, value): +def validate_is_callable_or_none(option: Any, value: Any) -> Optional[Callable]: """Validates that 'value' is a callable.""" if value is None: return value @@ -537,7 +538,7 @@ def validate_is_callable_or_none(option, value): return value -def validate_ok_for_replace(replacement): +def validate_ok_for_replace(replacement: Mapping[str, Any]) -> None: """Validate a replacement document.""" validate_is_mapping("replacement", replacement) # Replacement can be {} @@ -547,7 +548,7 @@ def validate_ok_for_replace(replacement): raise ValueError('replacement can not include $ operators') -def validate_ok_for_update(update): +def validate_ok_for_update(update: Any) -> None: """Validate an update document.""" validate_list_or_mapping("update", update) # Update cannot be {}. @@ -563,7 +564,7 @@ def validate_ok_for_update(update): _UNICODE_DECODE_ERROR_HANDLERS = frozenset(['strict', 'replace', 'ignore']) -def validate_unicode_decode_error_handler(dummy, value): +def validate_unicode_decode_error_handler(dummy: Any, value: str) -> str: """Validate the Unicode decode error handler option of CodecOptions. """ if value not in _UNICODE_DECODE_ERROR_HANDLERS: @@ -573,7 +574,7 @@ def validate_unicode_decode_error_handler(dummy, value): return value -def validate_tzinfo(dummy, value): +def validate_tzinfo(dummy: Any, value: Any) -> Optional[datetime.tzinfo]: """Validate the tzinfo option """ if value is not None and not isinstance(value, datetime.tzinfo): @@ -581,7 +582,7 @@ def validate_tzinfo(dummy, value): return value -def validate_auto_encryption_opts_or_none(option, value): +def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[Any]: """Validate the driver keyword arg.""" if value is None: return value @@ -595,7 +596,7 @@ def validate_auto_encryption_opts_or_none(option, value): # Dictionary where keys are the names of public URI options, and values # are lists of aliases for that option. -URI_OPTIONS_ALIAS_MAP = { +URI_OPTIONS_ALIAS_MAP: Dict[str, List[str]] = { 'tls': ['ssl'], } @@ -603,7 +604,7 @@ def validate_auto_encryption_opts_or_none(option, value): # are functions that validate user-input values for that option. If an option # alias uses a different validator than its public counterpart, it should be # included here as a key, value pair. -URI_OPTIONS_VALIDATOR_MAP = { +URI_OPTIONS_VALIDATOR_MAP: Dict[str, Callable[[Any, Any], Any]] = { 'appname': validate_appname_or_none, 'authmechanism': validate_auth_mechanism, 'authmechanismproperties': validate_auth_mechanism_properties, @@ -644,7 +645,7 @@ def validate_auto_encryption_opts_or_none(option, value): # Dictionary where keys are the names of URI options specific to pymongo, # and values are functions that validate user-input values for those options. -NONSPEC_OPTIONS_VALIDATOR_MAP = { +NONSPEC_OPTIONS_VALIDATOR_MAP: Dict[str, Callable[[Any, Any], Any]] = { 'connect': validate_boolean_or_string, 'driver': validate_driver_or_none, 'server_api': validate_server_api_or_none, @@ -661,7 +662,7 @@ def validate_auto_encryption_opts_or_none(option, value): # Dictionary where keys are the names of keyword-only options for the # MongoClient constructor, and values are functions that validate user-input # values for those options. -KW_VALIDATORS = { +KW_VALIDATORS: Dict[str, Callable[[Any, Any], Any]] = { 'document_class': validate_document_class, 'type_registry': validate_type_registry, 'read_preference': validate_read_preference, @@ -677,14 +678,14 @@ def validate_auto_encryption_opts_or_none(option, value): # internally-used names of that URI option. Options with only one name # variant need not be included here. Options whose public and internal # names are the same need not be included here. -INTERNAL_URI_OPTION_NAME_MAP = { +INTERNAL_URI_OPTION_NAME_MAP: Dict[str, str] = { 'ssl': 'tls', } # Map from deprecated URI option names to a tuple indicating the method of # their deprecation and any additional information that may be needed to # construct the warning message. -URI_OPTIONS_DEPRECATION_MAP = { +URI_OPTIONS_DEPRECATION_MAP: Dict[str, Tuple[str, str]] = { # format: : (, ), # Supported values: # - 'renamed': should be the new option name. Note that case is @@ -704,11 +705,11 @@ def validate_auto_encryption_opts_or_none(option, value): URI_OPTIONS_VALIDATOR_MAP[optname]) # Map containing all URI option and keyword argument validators. -VALIDATORS = URI_OPTIONS_VALIDATOR_MAP.copy() +VALIDATORS: Dict[str, Callable[[Any, Any], Any]] = URI_OPTIONS_VALIDATOR_MAP.copy() VALIDATORS.update(KW_VALIDATORS) # List of timeout-related options. -TIMEOUT_OPTIONS = [ +TIMEOUT_OPTIONS: List[str] = [ 'connecttimeoutms', 'heartbeatfrequencyms', 'maxidletimems', @@ -722,7 +723,7 @@ def validate_auto_encryption_opts_or_none(option, value): _AUTH_OPTIONS = frozenset(['authmechanismproperties']) -def validate_auth_option(option, value): +def validate_auth_option(option: str, value: Any) -> Tuple[str, Any]: """Validate optional authentication parameters. """ lower, value = validate(option, value) @@ -732,7 +733,7 @@ def validate_auth_option(option, value): return option, value -def validate(option, value): +def validate(option: str, value: Any) -> Tuple[str, Any]: """Generic validation function. """ lower = option.lower() @@ -741,7 +742,7 @@ def validate(option, value): return option, value -def get_validated_options(options, warn=True): +def get_validated_options(options: Mapping[str, Any], warn: bool = True) -> MutableMapping[str, Any]: """Validate each entry in options and raise a warning if it is not valid. Returns a copy of options with invalid entries removed. @@ -751,6 +752,7 @@ def get_validated_options(options, warn=True): invalid options will be ignored. Otherwise, invalid options will cause errors. """ + validated_options: MutableMapping[str, Any] if isinstance(options, _CaseInsensitiveDictionary): validated_options = _CaseInsensitiveDictionary() get_normed_key = lambda x: x @@ -794,8 +796,8 @@ class BaseObject(object): SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO MONGODB. """ - def __init__(self, codec_options, read_preference, write_concern, - read_concern): + def __init__(self, codec_options: CodecOptions, read_preference: _ServerMode, write_concern: WriteConcern, + read_concern: ReadConcern) -> None: if not isinstance(codec_options, CodecOptions): raise TypeError("codec_options must be an instance of " @@ -819,14 +821,14 @@ def __init__(self, codec_options, read_preference, write_concern, self.__read_concern = read_concern @property - def codec_options(self): + def codec_options(self) -> CodecOptions: """Read only access to the :class:`~bson.codec_options.CodecOptions` of this instance. """ return self.__codec_options @property - def write_concern(self): + def write_concern(self) -> WriteConcern: """Read only access to the :class:`~pymongo.write_concern.WriteConcern` of this instance. @@ -844,7 +846,7 @@ def _write_concern_for(self, session): return self.write_concern @property - def read_preference(self): + def read_preference(self) -> _ServerMode: """Read only access to the read preference of this instance. .. versionchanged:: 3.0 @@ -861,7 +863,7 @@ def _read_preference_for(self, session): return self.__read_preference @property - def read_concern(self): + def read_concern(self) -> ReadConcern: """Read only access to the :class:`~pymongo.read_concern.ReadConcern` of this instance. diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index d367595288..c9cc041aff 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -13,6 +13,7 @@ # limitations under the License. import warnings +from typing import Callable try: import snappy @@ -99,7 +100,7 @@ def get_compression_context(self, compressors): return ZstdContext() -def _zlib_no_compress(data): +def _zlib_no_compress(data, level=None): """Compress data with zlib level 0.""" cobj = zlib.compressobj(0) return b"".join([cobj.compress(data), cobj.flush()]) @@ -117,6 +118,8 @@ class ZlibContext(object): compressor_id = 2 def __init__(self, level): + self.compress: Callable[[bytes], bytes] + # Jython zlib.compress doesn't support -1 if level == -1: self.compress = zlib.compress @@ -124,7 +127,7 @@ def __init__(self, level): elif level == 0: self.compress = _zlib_no_compress else: - self.compress = lambda data: zlib.compress(data, level) + self.compresss = lambda data, _: zlib.compress(data, level) class ZstdContext(object): diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 3e78c2d97c..152acaca65 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -13,29 +13,26 @@ # limitations under the License. """Cursor class to iterate over Mongo query results.""" - import copy import threading import warnings - from collections import deque +from typing import (TYPE_CHECKING, Any, Dict, Generic, Iterable, List, Mapping, + MutableMapping, Optional, Sequence, Tuple, Union, cast, overload) from bson import RE_TYPE, _convert_raw_document_lists_to_streams from bson.code import Code from bson.son import SON from pymongo import helpers -from pymongo.common import (validate_boolean, validate_is_mapping, - validate_is_document_type) from pymongo.collation import validate_collation_or_none -from pymongo.errors import (ConnectionFailure, - InvalidOperation, +from pymongo.common import (validate_boolean, validate_is_document_type, + validate_is_mapping) +from pymongo.errors import (ConnectionFailure, InvalidOperation, OperationFailure) -from pymongo.message import (_CursorAddress, - _GetMore, - _RawBatchGetMore, - _Query, - _RawBatchQuery) +from pymongo.message import (_CursorAddress, _GetMore, _Query, + _RawBatchGetMore, _RawBatchQuery) from pymongo.response import PinnedResponse +from pymongo.typings import _CollationIn, _DocumentType # These errors mean that the server has already killed the cursor so there is # no need to send killCursors. @@ -126,22 +123,47 @@ def close(self): self.sock.unpin() self.sock = None +_Sort = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] +_Hint = Union[str, _Sort] + + +if TYPE_CHECKING: + from pymongo.client_session import ClientSession + from pymongo.collection import Collection -class Cursor(object): + +class Cursor(Generic[_DocumentType]): """A cursor / iterator over Mongo query results. """ _query_class = _Query _getmore_class = _GetMore - def __init__(self, collection, filter=None, projection=None, skip=0, - limit=0, no_cursor_timeout=False, - cursor_type=CursorType.NON_TAILABLE, - sort=None, allow_partial_results=False, oplog_replay=False, - batch_size=0, - collation=None, hint=None, max_scan=None, max_time_ms=None, - max=None, min=None, return_key=None, show_record_id=None, - snapshot=None, comment=None, session=None, - allow_disk_use=None, let=None): + def __init__(self, + collection: "Collection[_DocumentType]", + filter: Optional[Mapping[str, Any]] = None, + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + skip: int = 0, + limit: int = 0, + no_cursor_timeout: bool = False, + cursor_type: int = CursorType.NON_TAILABLE, + sort: Optional[_Sort] = None, + allow_partial_results: bool = False, + oplog_replay: bool = False, + batch_size: int = 0, + collation: Optional[_CollationIn] = None, + hint: Optional[_Hint] = None, + max_scan: Optional[int] = None, + max_time_ms: Optional[int] = None, + max: Optional[_Sort] = None, + min: Optional[_Sort] = None, + return_key: Optional[bool] = None, + show_record_id: Optional[bool] = None, + snapshot: Optional[bool] = None, + comment: Any = None, + session: Optional["ClientSession"] = None, + allow_disk_use: Optional[bool] = None, + let: Optional[bool] = None + ) -> None: """Create a new cursor. Should not be called directly by application developers - see @@ -151,11 +173,12 @@ def __init__(self, collection, filter=None, projection=None, skip=0, """ # Initialize all attributes used in __del__ before possibly raising # an error to avoid attribute errors during garbage collection. - self.__collection = collection - self.__id = None + self.__collection: Collection[_DocumentType] = collection + self.__id: Any = None self.__exhaust = False - self.__sock_mgr = None + self.__sock_mgr: Any = None self.__killed = False + self.__session: Optional["ClientSession"] if session: self.__session = session @@ -164,10 +187,7 @@ def __init__(self, collection, filter=None, projection=None, skip=0, self.__session = None self.__explicit_session = False - spec = filter - if spec is None: - spec = {} - + spec: Mapping[str, Any] = filter or {} validate_is_mapping("filter", spec) if not isinstance(skip, int): raise TypeError("skip must be an instance of int") @@ -203,6 +223,7 @@ def __init__(self, collection, filter=None, projection=None, skip=0, self.__let = let self.__spec = spec + self.__has_filter = filter is not None self.__projection = projection self.__skip = skip self.__limit = limit @@ -212,9 +233,9 @@ def __init__(self, collection, filter=None, projection=None, skip=0, self.__explain = False self.__comment = comment self.__max_time_ms = max_time_ms - self.__max_await_time_ms = None - self.__max = max - self.__min = min + self.__max_await_time_ms: Optional[int] = None + self.__max: Optional[Union[SON[Any, Any], _Sort]] = max + self.__min: Optional[Union[SON[Any, Any], _Sort]] = min self.__collation = validate_collation_or_none(collation) self.__return_key = return_key self.__show_record_id = show_record_id @@ -239,7 +260,7 @@ def __init__(self, collection, filter=None, projection=None, skip=0, # it anytime we change __limit. self.__empty = False - self.__data = deque() + self.__data: deque = deque() self.__address = None self.__retrieved = 0 @@ -261,22 +282,22 @@ def __init__(self, collection, filter=None, projection=None, skip=0, self.__collname = collection.name @property - def collection(self): + def collection(self) -> "Collection[_DocumentType]": """The :class:`~pymongo.collection.Collection` that this :class:`Cursor` is iterating. """ return self.__collection @property - def retrieved(self): + def retrieved(self) -> int: """The number of documents retrieved so far. """ return self.__retrieved - def __del__(self): + def __del__(self) -> None: self.__die() - def rewind(self): + def rewind(self) -> "Cursor[_DocumentType]": """Rewind this cursor to its unevaluated state. Reset this cursor if it has been partially or completely evaluated. @@ -294,7 +315,7 @@ def rewind(self): return self - def clone(self): + def clone(self) -> "Cursor[_DocumentType]": """Get a clone of this cursor. Returns a new Cursor instance with options matching those that have @@ -318,7 +339,7 @@ def _clone(self, deepcopy=True, base=None): "batch_size", "max_scan", "query_flags", "collation", "empty", "show_record_id", "return_key", "allow_disk_use", - "snapshot", "exhaust") + "snapshot", "exhaust", "has_filter") data = dict((k, v) for k, v in self.__dict__.items() if k.startswith('_Cursor__') and k[9:] in values_to_clone) if deepcopy: @@ -360,7 +381,7 @@ def __die(self, synchronous=False): self.__session = None self.__sock_mgr = None - def close(self): + def close(self) -> None: """Explicitly close / kill this cursor. """ self.__die(True) @@ -397,7 +418,7 @@ def __query_spec(self): if operators: # Make a shallow copy so we can cleanly rewind or clone. - spec = self.__spec.copy() + spec = copy.copy(self.__spec) # Allow-listed commands must be wrapped in $query. if "$query" not in spec: @@ -429,7 +450,7 @@ def __check_okay_to_chain(self): if self.__retrieved or self.__id is not None: raise InvalidOperation("cannot set options after executing query") - def add_option(self, mask): + def add_option(self, mask: int) -> "Cursor[_DocumentType]": """Set arbitrary query flags using a bitmask. To set the tailable flag: @@ -450,7 +471,7 @@ def add_option(self, mask): self.__query_flags |= mask return self - def remove_option(self, mask): + def remove_option(self, mask: int) -> "Cursor[_DocumentType]": """Unset arbitrary query flags using a bitmask. To unset the tailable flag: @@ -466,7 +487,7 @@ def remove_option(self, mask): self.__query_flags &= ~mask return self - def allow_disk_use(self, allow_disk_use): + def allow_disk_use(self, allow_disk_use: bool) -> "Cursor[_DocumentType]": """Specifies whether MongoDB can use temporary disk files while processing a blocking sort operation. @@ -488,7 +509,7 @@ def allow_disk_use(self, allow_disk_use): self.__allow_disk_use = allow_disk_use return self - def limit(self, limit): + def limit(self, limit: int) -> "Cursor[_DocumentType]": """Limits the number of results to be returned by this cursor. Raises :exc:`TypeError` if `limit` is not an integer. Raises @@ -511,7 +532,7 @@ def limit(self, limit): self.__limit = limit return self - def batch_size(self, batch_size): + def batch_size(self, batch_size: int) -> "Cursor[_DocumentType]": """Limits the number of documents returned in one batch. Each batch requires a round trip to the server. It can be adjusted to optimize performance and limit data transfer. @@ -539,7 +560,7 @@ def batch_size(self, batch_size): self.__batch_size = batch_size return self - def skip(self, skip): + def skip(self, skip: int) -> "Cursor[_DocumentType]": """Skips the first `skip` results of this cursor. Raises :exc:`TypeError` if `skip` is not an integer. Raises @@ -560,7 +581,7 @@ def skip(self, skip): self.__skip = skip return self - def max_time_ms(self, max_time_ms): + def max_time_ms(self, max_time_ms: Optional[int]) -> "Cursor[_DocumentType]": """Specifies a time limit for a query operation. If the specified time is exceeded, the operation will be aborted and :exc:`~pymongo.errors.ExecutionTimeout` is raised. If `max_time_ms` @@ -581,7 +602,7 @@ def max_time_ms(self, max_time_ms): self.__max_time_ms = max_time_ms return self - def max_await_time_ms(self, max_await_time_ms): + def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> "Cursor[_DocumentType]": """Specifies a time limit for a getMore operation on a :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` cursor. For all other types of cursor max_await_time_ms is ignored. @@ -609,6 +630,14 @@ def max_await_time_ms(self, max_await_time_ms): return self + @overload + def __getitem__(self, index: int) -> _DocumentType: + ... + + @overload + def __getitem__(self, index: slice) -> "Cursor[_DocumentType]": + ... + def __getitem__(self, index): """Get a single document or a slice of documents from this cursor. @@ -691,7 +720,7 @@ def __getitem__(self, index): raise TypeError("index %r cannot be applied to Cursor " "instances" % index) - def max_scan(self, max_scan): + def max_scan(self, max_scan: Optional[int]) -> "Cursor[_DocumentType]": """**DEPRECATED** - Limit the number of documents to scan when performing the query. @@ -711,7 +740,7 @@ def max_scan(self, max_scan): self.__max_scan = max_scan return self - def max(self, spec): + def max(self, spec: _Sort) -> "Cursor[_DocumentType]": """Adds ``max`` operator that specifies upper bound for specific index. When using ``max``, :meth:`~hint` should also be configured to ensure @@ -734,7 +763,7 @@ def max(self, spec): self.__max = SON(spec) return self - def min(self, spec): + def min(self, spec: _Sort) -> "Cursor[_DocumentType]": """Adds ``min`` operator that specifies lower bound for specific index. When using ``min``, :meth:`~hint` should also be configured to ensure @@ -757,7 +786,7 @@ def min(self, spec): self.__min = SON(spec) return self - def sort(self, key_or_list, direction=None): + def sort(self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None) -> "Cursor[_DocumentType]": """Sorts this cursor's results. Pass a field name and a direction, either @@ -803,7 +832,7 @@ def sort(self, key_or_list, direction=None): self.__ordering = helpers._index_document(keys) return self - def distinct(self, key): + def distinct(self, key: str) -> List: """Get a list of distinct values for `key` among all documents in the result set of this query. @@ -820,7 +849,7 @@ def distinct(self, key): .. seealso:: :meth:`pymongo.collection.Collection.distinct` """ - options = {} + options: Dict[str, Any] = {} if self.__spec: options["query"] = self.__spec if self.__max_time_ms is not None: @@ -833,7 +862,7 @@ def distinct(self, key): return self.__collection.distinct( key, session=self.__session, **options) - def explain(self): + def explain(self) -> _DocumentType: """Returns an explain plan record for this cursor. .. note:: This method uses the default verbosity mode of the @@ -863,7 +892,7 @@ def __set_hint(self, index): else: self.__hint = helpers._index_document(index) - def hint(self, index): + def hint(self, index: Optional[_Hint]) -> "Cursor[_DocumentType]": """Adds a 'hint', telling Mongo the proper index to use for the query. Judicious use of hints can greatly improve query @@ -888,7 +917,7 @@ def hint(self, index): self.__set_hint(index) return self - def comment(self, comment): + def comment(self, comment: Any) -> "Cursor[_DocumentType]": """Adds a 'comment' to the cursor. http://docs.mongodb.org/manual/reference/operator/comment/ @@ -903,7 +932,7 @@ def comment(self, comment): self.__comment = comment return self - def where(self, code): + def where(self, code: Union[str, Code]) -> "Cursor[_DocumentType]": """Adds a `$where`_ clause to this query. The `code` argument must be an instance of :class:`basestring` @@ -937,10 +966,18 @@ def where(self, code): if not isinstance(code, Code): code = Code(code) - self.__spec["$where"] = code + # Avoid overwriting a filter argument that was given by the user + # when updating the spec. + spec: Dict[str, Any] + if self.__has_filter: + spec = dict(self.__spec) + else: + spec = cast(Dict, self.__spec) + spec["$where"] = code + self.__spec = spec return self - def collation(self, collation): + def collation(self, collation: Optional[_CollationIn]) -> "Cursor[_DocumentType]": """Adds a :class:`~pymongo.collation.Collation` to this query. Raises :exc:`TypeError` if `collation` is not an instance of @@ -1106,7 +1143,7 @@ def _refresh(self): return len(self.__data) @property - def alive(self): + def alive(self) -> bool: """Does this cursor have the potential to return more data? This is mostly useful with `tailable cursors @@ -1128,7 +1165,7 @@ def alive(self): return bool(len(self.__data) or (not self.__killed)) @property - def cursor_id(self): + def cursor_id(self) -> Optional[int]: """Returns the id of the cursor .. versionadded:: 2.2 @@ -1136,7 +1173,7 @@ def cursor_id(self): return self.__id @property - def address(self): + def address(self) -> Optional[Tuple[str, Any]]: """The (host, port) of the server used, or None. .. versionchanged:: 3.0 @@ -1145,18 +1182,19 @@ def address(self): return self.__address @property - def session(self): + def session(self) -> Optional["ClientSession"]: """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. .. versionadded:: 3.6 """ if self.__explicit_session: return self.__session + return None - def __iter__(self): + def __iter__(self) -> "Cursor[_DocumentType]": return self - def next(self): + def next(self) -> _DocumentType: """Advance the cursor.""" if self.__empty: raise StopIteration @@ -1167,20 +1205,20 @@ def next(self): __next__ = next - def __enter__(self): + def __enter__(self) -> "Cursor[_DocumentType]": return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() - def __copy__(self): + def __copy__(self) -> "Cursor[_DocumentType]": """Support function for `copy.copy()`. .. versionadded:: 2.4 """ return self._clone(deepcopy=False) - def __deepcopy__(self, memo): + def __deepcopy__(self, memo: Any) -> Any: """Support function for `copy.deepcopy()`. .. versionadded:: 2.4 @@ -1193,6 +1231,7 @@ def _deepcopy(self, x, memo=None): Regular expressions cannot be deep copied but as they are immutable we don't have to copy them when cloning. """ + y: Any if not hasattr(x, 'items'): y, is_list, iterator = [], True, enumerate(x) else: @@ -1220,13 +1259,13 @@ def _deepcopy(self, x, memo=None): return y -class RawBatchCursor(Cursor): +class RawBatchCursor(Cursor, Generic[_DocumentType]): """A cursor / iterator over raw batches of BSON data from a query result.""" _query_class = _RawBatchQuery _getmore_class = _RawBatchGetMore - def __init__(self, *args, **kwargs): + def __init__(self, collection: "Collection[_DocumentType]", *args: Any, **kwargs: Any) -> None: """Create a new cursor / iterator over raw batches of BSON data. Should not be called directly by application developers - @@ -1235,7 +1274,7 @@ def __init__(self, *args, **kwargs): .. seealso:: The MongoDB documentation on `cursors `_. """ - super(RawBatchCursor, self).__init__(*args, **kwargs) + super(RawBatchCursor, self).__init__(collection, *args, **kwargs) def _unpack_response(self, response, cursor_id, codec_options, user_fields=None, legacy_response=False): @@ -1247,7 +1286,7 @@ def _unpack_response(self, response, cursor_id, codec_options, _convert_raw_document_lists_to_streams(raw_response[0]) return raw_response - def explain(self): + def explain(self) -> _DocumentType: """Returns an explain plan record for this cursor. .. seealso:: The MongoDB documentation on `explain `_. @@ -1255,5 +1294,5 @@ def explain(self): clone = self._clone(deepcopy=True, base=Cursor(self.collection)) return clone.explain() - def __getitem__(self, index): + def __getitem__(self, index: Any) -> "Cursor[_DocumentType]": raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") diff --git a/pymongo/database.py b/pymongo/database.py index a6c1275126..4f5f931352 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -13,18 +13,21 @@ # limitations under the License. """Database level operations.""" +from typing import (TYPE_CHECKING, Any, Dict, Generic, List, Mapping, MutableMapping, Optional, + Sequence, Union) -from bson.codec_options import DEFAULT_CODEC_OPTIONS +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions from bson.dbref import DBRef from bson.son import SON +from bson.timestamp import Timestamp from pymongo import common from pymongo.aggregation import _DatabaseAggregationCommand from pymongo.change_stream import DatabaseChangeStream from pymongo.collection import Collection from pymongo.command_cursor import CommandCursor -from pymongo.errors import (CollectionInvalid, - InvalidName) -from pymongo.read_preferences import ReadPreference +from pymongo.errors import CollectionInvalid, InvalidName +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline def _check_name(name): @@ -39,12 +42,24 @@ def _check_name(name): "character %r" % invalid_char) -class Database(common.BaseObject): +if TYPE_CHECKING: + from pymongo.client_session import ClientSession + from pymongo.mongo_client import MongoClient + from pymongo.read_concern import ReadConcern + from pymongo.write_concern import WriteConcern + + +class Database(common.BaseObject, Generic[_DocumentType]): """A Mongo database. """ - - def __init__(self, client, name, codec_options=None, read_preference=None, - write_concern=None, read_concern=None): + def __init__(self, + client: "MongoClient[_DocumentType]", + name: str, + codec_options: Optional[CodecOptions] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional["WriteConcern"] = None, + read_concern: Optional["ReadConcern"] = None, + ) -> None: """Get a database by client and name. Raises :class:`TypeError` if `name` is not an instance of @@ -104,20 +119,24 @@ def __init__(self, client, name, codec_options=None, read_preference=None, _check_name(name) self.__name = name - self.__client = client + self.__client: MongoClient[_DocumentType] = client @property - def client(self): + def client(self) -> "MongoClient[_DocumentType]": """The client instance for this :class:`Database`.""" return self.__client @property - def name(self): + def name(self) -> str: """The name of this :class:`Database`.""" return self.__name - def with_options(self, codec_options=None, read_preference=None, - write_concern=None, read_concern=None): + def with_options(self, + codec_options: Optional[CodecOptions] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional["WriteConcern"] = None, + read_concern: Optional["ReadConcern"] = None, + ) -> "Database[_DocumentType]": """Get a clone of this database changing the specified settings. >>> db1.read_preference @@ -156,22 +175,22 @@ def with_options(self, codec_options=None, read_preference=None, write_concern or self.write_concern, read_concern or self.read_concern) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Database): return (self.__client == other.client and self.__name == other.name) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __hash__(self): + def __hash__(self) -> int: return hash((self.__client, self.__name)) def __repr__(self): return "Database(%r, %r)" % (self.__client, self.__name) - def __getattr__(self, name): + def __getattr__(self, name: str) -> Collection[_DocumentType]: """Get a collection of this database by name. Raises InvalidName if an invalid collection name is used. @@ -185,7 +204,7 @@ def __getattr__(self, name): " collection, use database[%r]." % (name, name, name)) return self.__getitem__(name) - def __getitem__(self, name): + def __getitem__(self, name: str) -> "Collection[_DocumentType]": """Get a collection of this database by name. Raises InvalidName if an invalid collection name is used. @@ -195,8 +214,13 @@ def __getitem__(self, name): """ return Collection(self, name) - def get_collection(self, name, codec_options=None, read_preference=None, - write_concern=None, read_concern=None): + def get_collection(self, + name: str, + codec_options: Optional[CodecOptions] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional["WriteConcern"] = None, + read_concern: Optional["ReadConcern"] = None, + ) -> Collection[_DocumentType]: """Get a :class:`~pymongo.collection.Collection` with the given name and options. @@ -238,9 +262,15 @@ def get_collection(self, name, codec_options=None, read_preference=None, self, name, False, codec_options, read_preference, write_concern, read_concern) - def create_collection(self, name, codec_options=None, - read_preference=None, write_concern=None, - read_concern=None, session=None, **kwargs): + def create_collection(self, + name: str, + codec_options: Optional[CodecOptions] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional["WriteConcern"] = None, + read_concern: Optional["ReadConcern"] = None, + session: Optional["ClientSession"] = None, + **kwargs: Any, + ) -> Collection[_DocumentType]: """Create a new :class:`~pymongo.collection.Collection` in this database. @@ -286,20 +316,20 @@ def create_collection(self, name, codec_options=None, timeseries collections - ``expireAfterSeconds`` (int): the number of seconds after which a document in a timeseries collection expires - - ``validator`` (dict): a document specifying validation rules or expressions + - ``validator`` (dict): a document specifying validation rules or expressions for the collection - - ``validationLevel`` (str): how strictly to apply the + - ``validationLevel`` (str): how strictly to apply the validation rules to existing documents during an update. The default level is "strict" - ``validationAction`` (str): whether to "error" on invalid documents - (the default) or just "warn" about the violations but allow invalid + (the default) or just "warn" about the violations but allow invalid documents to be inserted - ``indexOptionDefaults`` (dict): a document specifying a default configuration for indexes when creating a collection - - ``viewOn`` (str): the name of the source collection or view from which + - ``viewOn`` (str): the name of the source collection or view from which to create the view - ``pipeline`` (list): a list of aggregation pipeline stages - - ``comment`` (str): a user-provided comment to attach to this command. + - ``comment`` (str): a user-provided comment to attach to this command. This option is only supported on MongoDB >= 4.4. .. versionchanged:: 3.11 @@ -330,7 +360,11 @@ def create_collection(self, name, codec_options=None, read_preference, write_concern, read_concern, session=s, **kwargs) - def aggregate(self, pipeline, session=None, **kwargs): + def aggregate(self, + pipeline: _Pipeline, + session: Optional["ClientSession"] = None, + **kwargs: Any + ) -> CommandCursor[_DocumentType]: """Perform a database-level aggregation. See the `aggregation pipeline`_ documentation for a list of stages @@ -400,9 +434,17 @@ def aggregate(self, pipeline, session=None, **kwargs): cmd.get_cursor, cmd.get_read_preference(s), s, retryable=not cmd._performs_write) - def watch(self, pipeline=None, full_document=None, resume_after=None, - max_await_time_ms=None, batch_size=None, collation=None, - start_at_operation_time=None, session=None, start_after=None): + def watch(self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional["ClientSession"] = None, + start_after: Optional[Mapping[str, Any]] = None, + ) -> DatabaseChangeStream[_DocumentType]: """Watch changes on this database. Performs an aggregation with an implicit initial ``$changeStream`` @@ -515,9 +557,16 @@ def _command(self, sock_info, command, value=1, check=True, session=s, client=self.__client) - def command(self, command, value=1, check=True, - allowable_errors=None, read_preference=None, - codec_options=DEFAULT_CODEC_OPTIONS, session=None, **kwargs): + def command(self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[CodecOptions] = DEFAULT_CODEC_OPTIONS, + session: Optional["ClientSession"] = None, + **kwargs: Any, + ) -> Dict[str, Any]: """Issue a MongoDB command. Send command `command` to the database and return the @@ -648,7 +697,11 @@ def _list_collections(self, sock_info, session, read_preference, **kwargs): cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor - def list_collections(self, session=None, filter=None, **kwargs): + def list_collections(self, + session: Optional["ClientSession"] = None, + filter: Optional[Mapping[str, Any]] = None, + **kwargs: Any + ) -> CommandCursor[Dict[str, Any]]: """Get a cursor over the collections of this database. :Parameters: @@ -680,7 +733,11 @@ def _cmd(session, server, sock_info, read_preference): return self.__client._retryable_read( _cmd, read_pref, session) - def list_collection_names(self, session=None, filter=None, **kwargs): + def list_collection_names(self, + session: Optional["ClientSession"] = None, + filter: Optional[Mapping[str, Any]] = None, + **kwargs: Any + ) -> List[str]: """Get a list of all the collection names in this database. For example, to list all non-system collections:: @@ -717,7 +774,10 @@ def list_collection_names(self, session=None, filter=None, **kwargs): return [result["name"] for result in self.list_collections(session=session, **kwargs)] - def drop_collection(self, name_or_collection, session=None): + def drop_collection(self, + name_or_collection: Union[str, Collection], + session: Optional["ClientSession"] = None + ) -> Dict[str, Any]: """Drop a collection. :Parameters: @@ -752,9 +812,13 @@ def drop_collection(self, name_or_collection, session=None): parse_write_concern_error=True, session=session) - def validate_collection(self, name_or_collection, - scandata=False, full=False, session=None, - background=None): + def validate_collection(self, + name_or_collection: Union[str, Collection], + scandata: bool = False, + full: bool = False, + session: Optional["ClientSession"] = None, + background: Optional[bool] = None, + ) -> Dict[str, Any]: """Validate a collection. Returns a dict of validation info. Raises CollectionInvalid if @@ -827,20 +891,23 @@ def validate_collection(self, name_or_collection, return result - def __iter__(self): + def __iter__(self) -> "Database[_DocumentType]": return self - def __next__(self): + def __next__(self) -> "Database[_DocumentType]": raise TypeError("'Database' object is not iterable") next = __next__ - def __bool__(self): + def __bool__(self) -> bool: raise NotImplementedError("Database objects do not implement truth " "value testing or bool(). Please compare " "with None instead: database is not None") - def dereference(self, dbref, session=None, **kwargs): + def dereference(self, dbref: DBRef, + session: Optional["ClientSession"] = None, + **kwargs: Any + ) -> Optional[_DocumentType]: """Dereference a :class:`~bson.dbref.DBRef`, getting the document it points to. diff --git a/pymongo/driver_info.py b/pymongo/driver_info.py index 5e0843e4df..1bb599af37 100644 --- a/pymongo/driver_info.py +++ b/pymongo/driver_info.py @@ -15,6 +15,7 @@ """Advanced options for MongoDB drivers implemented on top of PyMongo.""" from collections import namedtuple +from typing import Optional class DriverInfo(namedtuple('DriverInfo', ['name', 'version', 'platform'])): @@ -26,7 +27,7 @@ class DriverInfo(namedtuple('DriverInfo', ['name', 'version', 'platform'])): like 'MyDriver', '1.2.3', 'some platform info'. Any of these strings may be None to accept PyMongo's default. """ - def __new__(cls, name, version=None, platform=None): + def __new__(cls, name: str, version: Optional[str] = None, platform: Optional[str] = None) -> "DriverInfo": self = super(DriverInfo, cls).__new__(cls, name, version, platform) for key, value in self._asdict().items(): if value is not None and not isinstance(value, str): diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 4b08492ee9..b076f490f4 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -15,15 +15,16 @@ """Support for explicit client-side field level encryption.""" import contextlib -import os -import subprocess import uuid import weakref +from typing import Any, Mapping, Optional, Sequence try: from pymongocrypt.auto_encrypter import AutoEncrypter from pymongocrypt.errors import MongoCryptError - from pymongocrypt.explicit_encrypter import ExplicitEncrypter + from pymongocrypt.explicit_encrypter import ( + ExplicitEncrypter + ) from pymongocrypt.mongocrypt import MongoCryptOptions from pymongocrypt.state_machine import MongoCryptCallback _HAVE_PYMONGOCRYPT = True @@ -32,29 +33,22 @@ MongoCryptCallback = object from bson import _dict_to_bson, decode, encode +from bson.binary import STANDARD, UUID_SUBTYPE, Binary from bson.codec_options import CodecOptions -from bson.binary import (Binary, - STANDARD, - UUID_SUBTYPE) from bson.errors import BSONError -from bson.raw_bson import (DEFAULT_RAW_BSON_OPTIONS, - RawBSONDocument, +from bson.raw_bson import (DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson) from bson.son import SON - -from pymongo.errors import (ConfigurationError, - EncryptionError, - InvalidOperation, - ServerSelectionTimeoutError) +from pymongo.daemon import _spawn_daemon from pymongo.encryption_options import AutoEncryptionOpts +from pymongo.errors import (ConfigurationError, EncryptionError, + InvalidOperation, ServerSelectionTimeoutError) from pymongo.mongo_client import MongoClient -from pymongo.pool import _configured_socket, PoolOptions +from pymongo.pool import PoolOptions, _configured_socket from pymongo.read_concern import ReadConcern from pymongo.ssl_support import get_ssl_context from pymongo.uri_parser import parse_host from pymongo.write_concern import WriteConcern -from pymongo.daemon import _spawn_daemon - _HTTPS_PORT = 443 _KMS_CONNECT_TIMEOUT = 10 # TODO: CDRIVER-3262 will define this value. @@ -80,9 +74,10 @@ def _wrap_encryption_errors(): raise EncryptionError(exc) -class _EncryptionIO(MongoCryptCallback): +class _EncryptionIO(MongoCryptCallback): # type: ignore def __init__(self, client, key_vault_coll, mongocryptd_client, opts): """Internal class to perform I/O on behalf of pymongocrypt.""" + self.client_ref: Any # Use a weak ref to break reference cycle. if client is not None: self.client_ref = weakref.ref(client) @@ -355,11 +350,17 @@ class Algorithm(object): "AEAD_AES_256_CBC_HMAC_SHA_512-Random") + class ClientEncryption(object): """Explicit client-side field level encryption.""" - def __init__(self, kms_providers, key_vault_namespace, key_vault_client, - codec_options, kms_tls_options=None): + def __init__(self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: MongoClient, + codec_options: CodecOptions, + kms_tls_options: Optional[Mapping[str, Any]] = None + ) -> None: """Explicit client-side field level encryption. The ClientEncryption class encapsulates explicit operations on a key @@ -449,12 +450,15 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, opts = AutoEncryptionOpts(kms_providers, key_vault_namespace, kms_tls_options=kms_tls_options) - self._io_callbacks = _EncryptionIO(None, key_vault_coll, None, opts) + self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO(None, key_vault_coll, None, opts) self._encryption = ExplicitEncrypter( self._io_callbacks, MongoCryptOptions(kms_providers, None)) - def create_data_key(self, kms_provider, master_key=None, - key_alt_names=None): + def create_data_key(self, + kms_provider: str, + master_key: Optional[Mapping[str, Any]] = None, + key_alt_names: Optional[Sequence[str]] = None + ) -> Binary: """Create and insert a new data key into the key vault collection. :Parameters: @@ -526,7 +530,12 @@ def create_data_key(self, kms_provider, master_key=None, kms_provider, master_key=master_key, key_alt_names=key_alt_names) - def encrypt(self, value, algorithm, key_id=None, key_alt_name=None): + def encrypt(self, + value: Any, + algorithm: str, + key_id: Optional[Binary] = None, + key_alt_name: Optional[str] = None + ) -> Binary: """Encrypt a BSON value with a given key and algorithm. Note that exactly one of ``key_id`` or ``key_alt_name`` must be @@ -557,7 +566,7 @@ def encrypt(self, value, algorithm, key_id=None, key_alt_name=None): doc, algorithm, key_id=key_id, key_alt_name=key_alt_name) return decode(encrypted_doc)['v'] - def decrypt(self, value): + def decrypt(self, value: Binary) -> Any: """Decrypt an encrypted value. :Parameters: @@ -578,17 +587,17 @@ def decrypt(self, value): return decode(decrypted_doc, codec_options=self._codec_options)['v'] - def __enter__(self): + def __enter__(self) -> "ClientEncryption": return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() def _check_closed(self): if self._encryption is None: raise InvalidOperation("Cannot use closed ClientEncryption") - def close(self): + def close(self) -> None: """Release resources. Note that using this class in a with-statement will automatically call diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index d0c2d5ce72..21a13f6a5e 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -15,6 +15,7 @@ """Support for automatic client-side field level encryption.""" import copy +from typing import TYPE_CHECKING, Any, List, Mapping, Optional try: import pymongocrypt @@ -25,19 +26,25 @@ from pymongo.errors import ConfigurationError from pymongo.uri_parser import _parse_kms_tls_options +if TYPE_CHECKING: + from pymongo.mongo_client import MongoClient + class AutoEncryptionOpts(object): """Options to configure automatic client-side field level encryption.""" - def __init__(self, kms_providers, key_vault_namespace, - key_vault_client=None, - schema_map=None, - bypass_auto_encryption=False, - mongocryptd_uri='mongodb://localhost:27020', - mongocryptd_bypass_spawn=False, - mongocryptd_spawn_path='mongocryptd', - mongocryptd_spawn_args=None, - kms_tls_options=None): + def __init__(self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: Optional["MongoClient"] = None, + schema_map: Optional[Mapping[str, Any]] = None, + bypass_auto_encryption: Optional[bool] = False, + mongocryptd_uri: str = 'mongodb://localhost:27020', + mongocryptd_bypass_spawn: bool = False, + mongocryptd_spawn_path: str = 'mongocryptd', + mongocryptd_spawn_args: Optional[List[str]] = None, + kms_tls_options: Optional[Mapping[str, Any]] = None + ) -> None: """Options to configure automatic client-side field level encryption. Automatic client-side field level encryption requires MongoDB 4.2 @@ -152,8 +159,9 @@ def __init__(self, kms_providers, key_vault_namespace, self._mongocryptd_uri = mongocryptd_uri self._mongocryptd_bypass_spawn = mongocryptd_bypass_spawn self._mongocryptd_spawn_path = mongocryptd_spawn_path - self._mongocryptd_spawn_args = (copy.copy(mongocryptd_spawn_args) or - ['--idleShutdownTimeoutSecs=60']) + if mongocryptd_spawn_args is None: + mongocryptd_spawn_args = ['--idleShutdownTimeoutSecs=60'] + self._mongocryptd_spawn_args = mongocryptd_spawn_args if not isinstance(self._mongocryptd_spawn_args, list): raise TypeError('mongocryptd_spawn_args must be a list') if not any('idleShutdownTimeoutSecs' in s diff --git a/pymongo/errors.py b/pymongo/errors.py index 0ee35827a7..89c45730c9 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -13,6 +13,8 @@ # limitations under the License. """Exceptions raised by PyMongo.""" +from typing import (Any, Iterable, List, Mapping, Optional, Sequence, Tuple, + Union) from bson.errors import * @@ -23,18 +25,21 @@ try: from ssl import CertificateError as _CertificateError except ImportError: - class _CertificateError(ValueError): + class _CertificateError(ValueError): # type: ignore pass class PyMongoError(Exception): """Base class for all PyMongo exceptions.""" - def __init__(self, message='', error_labels=None): + def __init__(self, + message: str = '', + error_labels: Optional[Iterable[str]] = None + ) -> None: super(PyMongoError, self).__init__(message) self._message = message self._error_labels = set(error_labels or []) - def has_error_label(self, label): + def has_error_label(self, label: str) -> bool: """Return True if this error contains the given label. .. versionadded:: 3.7 @@ -70,10 +75,17 @@ class AutoReconnect(ConnectionFailure): Subclass of :exc:`~pymongo.errors.ConnectionFailure`. """ - def __init__(self, message='', errors=None): + errors: Union[Mapping[str, Any], Sequence] + details: Union[Mapping[str, Any], Sequence] + + def __init__(self, + message: str = '', + errors: Optional[Union[Mapping[str, Any], Sequence]] = None + ) -> None: error_labels = None - if errors is not None and isinstance(errors, dict): - error_labels = errors.get('errorLabels') + if errors is not None: + if isinstance(errors, dict): + error_labels = errors.get('errorLabels') super(AutoReconnect, self).__init__(message, error_labels) self.errors = self.details = errors or [] @@ -109,7 +121,10 @@ class NotPrimaryError(AutoReconnect): .. versionadded:: 3.12 """ - def __init__(self, message='', errors=None): + def __init__(self, + message: str = '', + errors: Optional[Union[Mapping[str, Any], List]] = None + ) -> None: super(NotPrimaryError, self).__init__( _format_detailed_error(message, errors), errors=errors) @@ -139,7 +154,12 @@ class OperationFailure(PyMongoError): The :attr:`details` attribute. """ - def __init__(self, error, code=None, details=None, max_wire_version=None): + def __init__(self, + error: str, + code: Optional[int] = None, + details: Optional[Mapping[str, Any]] = None, + max_wire_version: Optional[int] = None, + ) -> None: error_labels = None if details is not None: error_labels = details.get('errorLabels') @@ -154,13 +174,13 @@ def _max_wire_version(self): return self.__max_wire_version @property - def code(self): + def code(self) -> Optional[int]: """The error code returned by the server, if any. """ return self.__code @property - def details(self): + def details(self) -> Optional[Mapping[str, Any]]: """The complete error document returned by the server. Depending on the error that occurred, the error document @@ -225,14 +245,17 @@ class BulkWriteError(OperationFailure): .. versionadded:: 2.7 """ - def __init__(self, results): + details: Mapping[str, Any] + + def __init__(self, results: Mapping[str, Any]) -> None: super(BulkWriteError, self).__init__( "batch op errors occurred", 65, results) - def __reduce__(self): + def __reduce__(self) -> Tuple[Any, Any]: return self.__class__, (self.details,) + class InvalidOperation(PyMongoError): """Raised when a client attempts to perform an invalid operation.""" @@ -264,12 +287,12 @@ class EncryptionError(PyMongoError): .. versionadded:: 3.9 """ - def __init__(self, cause): + def __init__(self, cause: Exception) -> None: super(EncryptionError, self).__init__(str(cause)) self.__cause = cause @property - def cause(self): + def cause(self) -> Exception: """The exception that caused this encryption or decryption error.""" return self.__cause diff --git a/pymongo/event_loggers.py b/pymongo/event_loggers.py index 7d5501c372..f0857f8f45 100644 --- a/pymongo/event_loggers.py +++ b/pymongo/event_loggers.py @@ -26,8 +26,6 @@ ``MongoClient(event_listeners=[CommandLogger()])`` """ - - import logging from pymongo import monitoring @@ -42,18 +40,18 @@ class CommandLogger(monitoring.CommandListener): logs them at the `INFO` severity level using :mod:`logging`. .. versionadded:: 3.11 """ - def started(self, event): + def started(self, event: monitoring.CommandStartedEvent) -> None: logging.info("Command {0.command_name} with request id " "{0.request_id} started on server " "{0.connection_id}".format(event)) - def succeeded(self, event): + def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: logging.info("Command {0.command_name} with request id " "{0.request_id} on server {0.connection_id} " "succeeded in {0.duration_micros} " "microseconds".format(event)) - def failed(self, event): + def failed(self, event: monitoring.CommandFailedEvent) -> None: logging.info("Command {0.command_name} with request id " "{0.request_id} on server {0.connection_id} " "failed in {0.duration_micros} " @@ -70,11 +68,11 @@ class ServerLogger(monitoring.ServerListener): .. versionadded:: 3.11 """ - def opened(self, event): + def opened(self, event: monitoring.ServerOpeningEvent) -> None: logging.info("Server {0.server_address} added to topology " "{0.topology_id}".format(event)) - def description_changed(self, event): + def description_changed(self, event: monitoring.ServerDescriptionChangedEvent) -> None: previous_server_type = event.previous_description.server_type new_server_type = event.new_description.server_type if new_server_type != previous_server_type: @@ -84,7 +82,7 @@ def description_changed(self, event): "{0.previous_description.server_type_name} to " "{0.new_description.server_type_name}".format(event)) - def closed(self, event): + def closed(self, event: monitoring.ServerClosedEvent) -> None: logging.warning("Server {0.server_address} removed from topology " "{0.topology_id}".format(event)) @@ -99,17 +97,17 @@ class HeartbeatLogger(monitoring.ServerHeartbeatListener): .. versionadded:: 3.11 """ - def started(self, event): + def started(self, event: monitoring.ServerHeartbeatStartedEvent) -> None: logging.info("Heartbeat sent to server " "{0.connection_id}".format(event)) - def succeeded(self, event): + def succeeded(self, event: monitoring.ServerHeartbeatSucceededEvent) -> None: # The reply.document attribute was added in PyMongo 3.4. logging.info("Heartbeat to server {0.connection_id} " "succeeded with reply " "{0.reply.document}".format(event)) - def failed(self, event): + def failed(self, event: monitoring.ServerHeartbeatFailedEvent) -> None: logging.warning("Heartbeat to server {0.connection_id} " "failed with error {0.reply}".format(event)) @@ -124,11 +122,11 @@ class TopologyLogger(monitoring.TopologyListener): .. versionadded:: 3.11 """ - def opened(self, event): + def opened(self, event: monitoring.TopologyOpenedEvent) -> None: logging.info("Topology with id {0.topology_id} " "opened".format(event)) - def description_changed(self, event): + def description_changed(self, event: monitoring.TopologyDescriptionChangedEvent) -> None: logging.info("Topology description updated for " "topology id {0.topology_id}".format(event)) previous_topology_type = event.previous_description.topology_type @@ -146,7 +144,7 @@ def description_changed(self, event): if not event.new_description.has_readable_server(): logging.warning("No readable servers available.") - def closed(self, event): + def closed(self, event: monitoring.TopologyClosedEvent) -> None: logging.info("Topology with id {0.topology_id} " "closed".format(event)) @@ -168,43 +166,43 @@ class ConnectionPoolLogger(monitoring.ConnectionPoolListener): .. versionadded:: 3.11 """ - def pool_created(self, event): + def pool_created(self, event: monitoring.PoolCreatedEvent) -> None: logging.info("[pool {0.address}] pool created".format(event)) def pool_ready(self, event): logging.info("[pool {0.address}] pool ready".format(event)) - def pool_cleared(self, event): + def pool_cleared(self, event: monitoring.PoolClearedEvent) -> None: logging.info("[pool {0.address}] pool cleared".format(event)) - def pool_closed(self, event): + def pool_closed(self, event: monitoring.PoolClosedEvent) -> None: logging.info("[pool {0.address}] pool closed".format(event)) - def connection_created(self, event): + def connection_created(self, event: monitoring.ConnectionCreatedEvent) -> None: logging.info("[pool {0.address}][conn #{0.connection_id}] " "connection created".format(event)) - def connection_ready(self, event): + def connection_ready(self, event: monitoring.ConnectionReadyEvent) -> None: logging.info("[pool {0.address}][conn #{0.connection_id}] " "connection setup succeeded".format(event)) - def connection_closed(self, event): + def connection_closed(self, event: monitoring.ConnectionClosedEvent) -> None: logging.info("[pool {0.address}][conn #{0.connection_id}] " "connection closed, reason: " "{0.reason}".format(event)) - def connection_check_out_started(self, event): + def connection_check_out_started(self, event: monitoring.ConnectionCheckOutStartedEvent) -> None: logging.info("[pool {0.address}] connection check out " "started".format(event)) - def connection_check_out_failed(self, event): + def connection_check_out_failed(self, event: monitoring.ConnectionCheckOutFailedEvent) -> None: logging.info("[pool {0.address}] connection check out " "failed, reason: {0.reason}".format(event)) - def connection_checked_out(self, event): + def connection_checked_out(self, event: monitoring.ConnectionCheckedOutEvent) -> None: logging.info("[pool {0.address}][conn #{0.connection_id}] " "connection checked out of pool".format(event)) - def connection_checked_in(self, event): + def connection_checked_in(self, event: monitoring.ConnectionCheckedInEvent) -> None: logging.info("[pool {0.address}][conn #{0.connection_id}] " "connection checked into pool".format(event)) diff --git a/pymongo/hello.py b/pymongo/hello.py index 0ad06e9619..ba09d80e32 100644 --- a/pymongo/hello.py +++ b/pymongo/hello.py @@ -14,10 +14,15 @@ """Helpers for the 'hello' and legacy hello commands.""" +import copy +import datetime import itertools +from typing import Any, Generic, List, Mapping, Optional, Set, Tuple +from bson.objectid import ObjectId from pymongo import common from pymongo.server_type import SERVER_TYPE +from pymongo.typings import _DocumentType class HelloCompat: @@ -56,7 +61,7 @@ def _get_server_type(doc): return SERVER_TYPE.Standalone -class Hello(object): +class Hello(Generic[_DocumentType]): """Parse a hello response from the server. .. versionadded:: 3.12 @@ -64,9 +69,9 @@ class Hello(object): __slots__ = ('_doc', '_server_type', '_is_writable', '_is_readable', '_awaitable') - def __init__(self, doc, awaitable=False): + def __init__(self, doc: _DocumentType, awaitable: bool = False) -> None: self._server_type = _get_server_type(doc) - self._doc = doc + self._doc: _DocumentType = doc self._is_writable = self._server_type in ( SERVER_TYPE.RSPrimary, SERVER_TYPE.Standalone, @@ -79,19 +84,19 @@ def __init__(self, doc, awaitable=False): self._awaitable = awaitable @property - def document(self): + def document(self) -> _DocumentType: """The complete hello command response document. .. versionadded:: 3.4 """ - return self._doc.copy() + return copy.copy(self._doc) @property - def server_type(self): + def server_type(self) -> int: return self._server_type @property - def all_hosts(self): + def all_hosts(self) -> Set[Tuple[str, int]]: """List of hosts, passives, and arbiters known to this server.""" return set(map(common.clean_node, itertools.chain( self._doc.get('hosts', []), @@ -99,12 +104,12 @@ def all_hosts(self): self._doc.get('arbiters', [])))) @property - def tags(self): + def tags(self) -> Mapping[str, Any]: """Replica set member tags or empty dict.""" return self._doc.get('tags', {}) @property - def primary(self): + def primary(self) -> Optional[Tuple[str, int]]: """This server's opinion about who the primary is, or None.""" if self._doc.get('primary'): return common.partition_node(self._doc['primary']) @@ -112,70 +117,71 @@ def primary(self): return None @property - def replica_set_name(self): + def replica_set_name(self) -> Optional[str]: """Replica set name or None.""" return self._doc.get('setName') @property - def max_bson_size(self): + def max_bson_size(self) -> int: return self._doc.get('maxBsonObjectSize', common.MAX_BSON_SIZE) @property - def max_message_size(self): + def max_message_size(self) -> int: return self._doc.get('maxMessageSizeBytes', 2 * self.max_bson_size) @property - def max_write_batch_size(self): + def max_write_batch_size(self) -> int: return self._doc.get('maxWriteBatchSize', common.MAX_WRITE_BATCH_SIZE) @property - def min_wire_version(self): + def min_wire_version(self) -> int: return self._doc.get('minWireVersion', common.MIN_WIRE_VERSION) @property - def max_wire_version(self): + def max_wire_version(self) -> int: return self._doc.get('maxWireVersion', common.MAX_WIRE_VERSION) @property - def set_version(self): + def set_version(self) -> Optional[int]: return self._doc.get('setVersion') @property - def election_id(self): + def election_id(self) -> Optional[ObjectId]: return self._doc.get('electionId') @property - def cluster_time(self): + def cluster_time(self) -> Optional[Mapping[str, Any]]: return self._doc.get('$clusterTime') @property - def logical_session_timeout_minutes(self): + def logical_session_timeout_minutes(self) -> Optional[int]: return self._doc.get('logicalSessionTimeoutMinutes') @property - def is_writable(self): + def is_writable(self) -> bool: return self._is_writable @property - def is_readable(self): + def is_readable(self) -> bool: return self._is_readable @property - def me(self): + def me(self) -> Optional[Tuple[str, int]]: me = self._doc.get('me') if me: return common.clean_node(me) + return None @property - def last_write_date(self): + def last_write_date(self) -> Optional[datetime.datetime]: return self._doc.get('lastWrite', {}).get('lastWriteDate') @property - def compressors(self): + def compressors(self) -> Optional[List[str]]: return self._doc.get('compression') @property - def sasl_supported_mechs(self): + def sasl_supported_mechs(self) -> List[str]: """Supported authentication mechanisms for the current user. For example:: @@ -187,22 +193,22 @@ def sasl_supported_mechs(self): return self._doc.get('saslSupportedMechs', []) @property - def speculative_authenticate(self): + def speculative_authenticate(self) -> Optional[Mapping[str, Any]]: """The speculativeAuthenticate field.""" return self._doc.get('speculativeAuthenticate') @property - def topology_version(self): + def topology_version(self) -> Optional[Mapping[str, Any]]: return self._doc.get('topologyVersion') @property - def awaitable(self): + def awaitable(self) -> bool: return self._awaitable @property - def service_id(self): + def service_id(self) -> Optional[ObjectId]: return self._doc.get('serviceId') @property - def hello_ok(self): + def hello_ok(self) -> bool: return self._doc.get('helloOk', False) diff --git a/pymongo/helpers.py b/pymongo/helpers.py index a9d40d8103..b2726dca6b 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -16,18 +16,14 @@ import sys import traceback - from collections import abc +from typing import Any from bson.son import SON from pymongo import ASCENDING -from pymongo.errors import (CursorNotFound, - DuplicateKeyError, - ExecutionTimeout, - NotPrimaryError, - OperationFailure, - WriteError, - WriteConcernError, +from pymongo.errors import (CursorNotFound, DuplicateKeyError, + ExecutionTimeout, NotPrimaryError, + OperationFailure, WriteConcernError, WriteError, WTimeoutError) from pymongo.hello import HelloCompat @@ -95,7 +91,7 @@ def _index_document(index_list): if not len(index_list): raise ValueError("key_or_list must not be the empty list") - index = SON() + index: SON[str, Any] = SON() for (key, value) in index_list: if not isinstance(key, str): raise TypeError( diff --git a/pymongo/message.py b/pymongo/message.py index f632214a08..ac6000cfd2 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -23,8 +23,8 @@ import datetime import random import struct - from io import BytesIO as _BytesIO +from typing import Any import bson from bson import (CodecOptions, @@ -32,30 +32,24 @@ _decode_selective, _dict_to_bson, _make_c_string) -from bson import codec_options from bson.int64 import Int64 -from bson.raw_bson import (_inflate_bson, DEFAULT_RAW_BSON_OPTIONS, - RawBSONDocument) +from bson.raw_bson import (DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, + _inflate_bson) from bson.son import SON try: - from pymongo import _cmessage + from pymongo import _cmessage # type: ignore[attr-defined] _use_c = True except ImportError: _use_c = False -from pymongo.errors import (ConfigurationError, - CursorNotFound, - DocumentTooLarge, - ExecutionTimeout, - InvalidOperation, - NotPrimaryError, - OperationFailure, - ProtocolError) +from pymongo.errors import (ConfigurationError, CursorNotFound, + DocumentTooLarge, ExecutionTimeout, + InvalidOperation, NotPrimaryError, + OperationFailure, ProtocolError) from pymongo.hello import HelloCompat from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern - MAX_INT32 = 2147483647 MIN_INT32 = -2147483648 @@ -457,6 +451,7 @@ def use_command(self, sock_info): class _CursorAddress(tuple): """The server address (host, port) of a cursor, with namespace property.""" + __namespace: Any def __new__(cls, address, namespace): self = tuple.__new__(cls, address) @@ -762,6 +757,7 @@ def unack_write(self, cmd, request_id, msg, max_doc_size, docs): """A proxy for SocketInfo.unack_write that handles event publishing. """ if self.publish: + assert self.start_time is not None duration = datetime.datetime.now() - self.start_time cmd = self._start(cmd, request_id, docs) start = datetime.datetime.now() @@ -777,6 +773,7 @@ def unack_write(self, cmd, request_id, msg, max_doc_size, docs): self._succeed(request_id, reply, duration) except Exception as exc: if self.publish: + assert self.start_time is not None duration = (datetime.datetime.now() - start) + duration if isinstance(exc, OperationFailure): failure = _convert_write_result( @@ -795,6 +792,7 @@ def write_command(self, cmd, request_id, msg, docs): """A proxy for SocketInfo.write_command that handles event publishing. """ if self.publish: + assert self.start_time is not None duration = datetime.datetime.now() - self.start_time self._start(cmd, request_id, docs) start = datetime.datetime.now() @@ -1171,7 +1169,8 @@ def raw_response(self, cursor_id=None, user_fields=None): if error_object["$err"].startswith(HelloCompat.LEGACY_ERROR): raise NotPrimaryError(error_object["$err"], error_object) elif error_object.get("code") == 50: - raise ExecutionTimeout(error_object.get("$err"), + default_msg = "operation exceeded time limit" + raise ExecutionTimeout(error_object.get("$err", default_msg), error_object.get("code"), error_object) raise OperationFailure("database error: %s" % diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 052ade3853..975fc87610 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -34,46 +34,41 @@ import contextlib import threading import weakref - from collections import defaultdict +from typing import (TYPE_CHECKING, Any, Dict, FrozenSet, Generic, List, + Mapping, Optional, Sequence, Set, Tuple, Type, Union, cast) -from bson.codec_options import DEFAULT_CODEC_OPTIONS +import bson +from bson.codec_options import (DEFAULT_CODEC_OPTIONS, CodecOptions, + TypeRegistry) from bson.son import SON -from pymongo import (common, - database, - helpers, - message, - periodic_executor, - uri_parser, - client_session) -from pymongo.change_stream import ClusterChangeStream +from bson.timestamp import Timestamp +from pymongo import (client_session, common, database, helpers, message, + periodic_executor, uri_parser) +from pymongo.change_stream import ChangeStream, ClusterChangeStream from pymongo.client_options import ClientOptions from pymongo.command_cursor import CommandCursor -from pymongo.errors import (AutoReconnect, - BulkWriteError, - ConfigurationError, - ConnectionFailure, - InvalidOperation, - NotPrimaryError, - OperationFailure, - PyMongoError, +from pymongo.errors import (AutoReconnect, BulkWriteError, ConfigurationError, + ConnectionFailure, InvalidOperation, + NotPrimaryError, OperationFailure, PyMongoError, ServerSelectionTimeoutError) from pymongo.pool import ConnectionClosedReason -from pymongo.read_preferences import ReadPreference +from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.server_selectors import writable_server_selector from pymongo.server_type import SERVER_TYPE -from pymongo.topology import (Topology, - _ErrorContext) -from pymongo.topology_description import TOPOLOGY_TYPE from pymongo.settings import TopologySettings -from pymongo.uri_parser import (_handle_option_deprecations, - _handle_security_options, - _normalize_options, - _check_options) -from pymongo.write_concern import DEFAULT_WRITE_CONCERN +from pymongo.topology import Topology, _ErrorContext +from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline +from pymongo.uri_parser import (_check_options, _handle_option_deprecations, + _handle_security_options, _normalize_options) +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern + +if TYPE_CHECKING: + from pymongo.read_concern import ReadConcern -class MongoClient(common.BaseObject): +class MongoClient(common.BaseObject, Generic[_DocumentType]): """ A client-side representation of a MongoDB cluster. @@ -89,15 +84,15 @@ class MongoClient(common.BaseObject): # No host/port; these are retrieved from TopologySettings. _constructor_args = ('document_class', 'tz_aware', 'connect') - def __init__( - self, - host=None, - port=None, - document_class=dict, - tz_aware=None, - connect=None, - type_registry=None, - **kwargs): + def __init__(self, + host: Optional[Union[str, Sequence[str]]] = None, + port: Optional[int] = None, + document_class: Type[_DocumentType] = dict, + tz_aware: Optional[bool] = None, + connect: Optional[bool] = None, + type_registry: Optional[TypeRegistry] = None, + **kwargs: Any, + ) -> None: """Client for a MongoDB instance, a replica set, or a set of mongoses. The client object is thread-safe and has connection-pooling built in. @@ -621,7 +616,7 @@ def __init__( client.__my_database__ """ - self.__init_kwargs = {'host': host, + self.__init_kwargs: Dict[str, Any] = {'host': host, 'port': port, 'document_class': document_class, 'tz_aware': tz_aware, @@ -722,7 +717,7 @@ def __init__( self.__default_database_name = dbase self.__lock = threading.Lock() - self.__kill_cursors_queue = [] + self.__kill_cursors_queue: List = [] self._event_listeners = options.pool_options._event_listeners super(MongoClient, self).__init__(options.codec_options, @@ -765,7 +760,7 @@ def target(): # We strongly reference the executor and it weakly references us via # this closure. When the client is freed, stop the executor soon. - self_ref = weakref.ref(self, executor.close) + self_ref: Any = weakref.ref(self, executor.close) self._kill_cursors_executor = executor if connect: @@ -798,9 +793,17 @@ def _server_property(self, attr_name): return getattr(server.description, attr_name) - def watch(self, pipeline=None, full_document=None, resume_after=None, - max_await_time_ms=None, batch_size=None, collation=None, - start_at_operation_time=None, session=None, start_after=None): + def watch(self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[client_session.ClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + ) -> ChangeStream[_DocumentType]: """Watch changes on this cluster. Performs an aggregation with an implicit initial ``$changeStream`` @@ -891,7 +894,7 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, start_after) @property - def topology_description(self): + def topology_description(self) -> TopologyDescription: """The description of the connected MongoDB deployment. >>> client.topology_description @@ -913,7 +916,7 @@ def topology_description(self): return self._topology.description @property - def address(self): + def address(self) -> Optional[Tuple[str, int]]: """(host, port) of the current standalone, primary, or mongos, or None. Accessing :attr:`address` raises :exc:`~.errors.InvalidOperation` if @@ -940,7 +943,7 @@ def address(self): return self._server_property('address') @property - def primary(self): + def primary(self) -> Optional[Tuple[str, int]]: """The (host, port) of the current primary of the replica set. Returns ``None`` if this client is not connected to a replica set, @@ -953,7 +956,7 @@ def primary(self): return self._topology.get_primary() @property - def secondaries(self): + def secondaries(self) -> Set[Tuple[str, int]]: """The secondary members known to this client. A sequence of (host, port) pairs. Empty if this client is not @@ -966,7 +969,7 @@ def secondaries(self): return self._topology.get_secondaries() @property - def arbiters(self): + def arbiters(self) -> Set[Tuple[str, int]]: """Arbiters in the replica set. A sequence of (host, port) pairs. Empty if this client is not @@ -976,7 +979,7 @@ def arbiters(self): return self._topology.get_arbiters() @property - def is_primary(self): + def is_primary(self) -> bool: """If this client is connected to a server that can accept writes. True if the current server is a standalone, mongos, or the primary of @@ -987,7 +990,7 @@ def is_primary(self): return self._server_property('is_writable') @property - def is_mongos(self): + def is_mongos(self) -> bool: """If this client is connected to mongos. If the client is not connected, this will block until a connection is established or raise ServerSelectionTimeoutError if no server is available.. @@ -995,7 +998,7 @@ def is_mongos(self): return self._server_property('server_type') == SERVER_TYPE.Mongos @property - def nodes(self): + def nodes(self) -> FrozenSet[Tuple[str, Optional[int]]]: """Set of all currently connected servers. .. warning:: When connected to a replica set the value of :attr:`nodes` @@ -1009,7 +1012,7 @@ def nodes(self): return frozenset(s.address for s in description.known_servers) @property - def options(self): + def options(self) -> ClientOptions: """The configuration options for this client. :Returns: @@ -1040,7 +1043,7 @@ def _end_sessions(self, session_ids): # command. pass - def close(self): + def close(self) -> None: """Cleanup client resources and disconnect from MongoDB. End all server sessions created by this client by sending one or more @@ -1214,7 +1217,7 @@ def _retry_with_session(self, retryable, func, session, bulk): def _retry_internal(self, retryable, func, session, bulk): """Internal retryable write helper.""" max_wire_version = 0 - last_error = None + last_error: Optional[Exception] = None retrying = False def is_retrying(): @@ -1239,6 +1242,7 @@ def is_retrying(): if is_retrying(): # A retry is not possible because this server does # not support sessions raise the last error. + assert last_error is not None raise last_error retryable = False return func(session, sock_info, retryable) @@ -1247,6 +1251,7 @@ def is_retrying(): # The application may think the write was never attempted # if we raise ServerSelectionTimeoutError on the retry # attempt. Raise the original exception instead. + assert last_error is not None raise last_error # A ServerSelectionTimeoutError error indicates that there may # be a persistent outage. Attempting to retry in this case will @@ -1280,7 +1285,7 @@ def _retryable_read(self, func, read_pref, session, address=None, retryable = (retryable and self.options.retry_reads and not (session and session.in_transaction)) - last_error = None + last_error: Optional[Exception] = None retrying = False while True: @@ -1292,6 +1297,7 @@ def _retryable_read(self, func, read_pref, session, address=None, if retrying and not retryable: # A retry is not possible because this server does # not support retryable reads, raise the last error. + assert last_error is not None raise last_error return func(session, server, sock_info, read_pref) except ServerSelectionTimeoutError: @@ -1299,6 +1305,7 @@ def _retryable_read(self, func, read_pref, session, address=None, # The application may think the write was never attempted # if we raise ServerSelectionTimeoutError on the retry # attempt. Raise the original exception instead. + assert last_error is not None raise last_error # A ServerSelectionTimeoutError error indicates that there may # be a persistent outage. Attempting to retry in this case will @@ -1322,15 +1329,15 @@ def _retryable_write(self, retryable, func, session): with self._tmp_session(session) as s: return self._retry_with_session(retryable, func, s, None) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, self.__class__): return self._topology == other._topology return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __hash__(self): + def __hash__(self) -> int: return hash(self._topology) def _repr_helper(self): @@ -1366,7 +1373,7 @@ def option_repr(option, value): def __repr__(self): return ("MongoClient(%s)" % (self._repr_helper(),)) - def __getattr__(self, name): + def __getattr__(self, name: str) -> database.Database[_DocumentType]: """Get a database by name. Raises :class:`~pymongo.errors.InvalidName` if an invalid @@ -1381,7 +1388,7 @@ def __getattr__(self, name): " database, use client[%r]." % (name, name, name)) return self.__getitem__(name) - def __getitem__(self, name): + def __getitem__(self, name: str) -> database.Database[_DocumentType]: """Get a database by name. Raises :class:`~pymongo.errors.InvalidName` if an invalid @@ -1539,9 +1546,10 @@ def __start_session(self, implicit, **kwargs): self, server_session, opts, implicit) def start_session(self, - causal_consistency=None, - default_transaction_options=None, - snapshot=False): + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional[client_session.TransactionOptions] = None, + snapshot: Optional[bool] = False, + ) -> client_session.ClientSession[_DocumentType]: """Start a logical session. This method takes the same parameters as @@ -1630,7 +1638,9 @@ def _process_response(self, reply, session): if session is not None: session._process_response(reply) - def server_info(self, session=None): + def server_info(self, + session: Optional[client_session.ClientSession] = None + ) -> Dict[str, Any]: """Get information about the MongoDB server we're connected to. :Parameters: @@ -1644,7 +1654,10 @@ def server_info(self, session=None): read_preference=ReadPreference.PRIMARY, session=session) - def list_databases(self, session=None, **kwargs): + def list_databases(self, + session: Optional[client_session.ClientSession] = None, + **kwargs: Any + ) -> CommandCursor[Dict[str, Any]]: """Get a cursor over the databases of the connected server. :Parameters: @@ -1673,7 +1686,9 @@ def list_databases(self, session=None, **kwargs): } return CommandCursor(admin["$cmd"], cursor, None) - def list_database_names(self, session=None): + def list_database_names(self, + session: Optional[client_session.ClientSession] = None + ) -> List[str]: """Get a list of the names of all databases on the connected server. :Parameters: @@ -1685,7 +1700,10 @@ def list_database_names(self, session=None): return [doc["name"] for doc in self.list_databases(session, nameOnly=True)] - def drop_database(self, name_or_database, session=None): + def drop_database(self, + name_or_database: Union[str, database.Database], + session: Optional[client_session.ClientSession] = None + ) -> None: """Drop a database. Raises :class:`TypeError` if `name_or_database` is not an instance of @@ -1727,8 +1745,13 @@ def drop_database(self, name_or_database, session=None): parse_write_concern_error=True, session=session) - def get_default_database(self, default=None, codec_options=None, - read_preference=None, write_concern=None, read_concern=None): + def get_default_database(self, + default: Optional[str] = None, + codec_options: Optional[CodecOptions] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional["ReadConcern"] = None, + ) -> database.Database[_DocumentType]: """Get the database named in the MongoDB connection URI. >>> uri = 'mongodb://host/my_database' @@ -1773,12 +1796,18 @@ def get_default_database(self, default=None, codec_options=None, raise ConfigurationError( 'No default database name defined or provided.') + name = cast(str, self.__default_database_name or default) return database.Database( - self, self.__default_database_name or default, codec_options, + self, name, codec_options, read_preference, write_concern, read_concern) - def get_database(self, name=None, codec_options=None, read_preference=None, - write_concern=None, read_concern=None): + def get_database(self, + name: Optional[str] = None, + codec_options: Optional[CodecOptions] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional["ReadConcern"] = None, + ) -> database.Database[_DocumentType]: """Get a :class:`~pymongo.database.Database` with the given name and options. @@ -1838,16 +1867,16 @@ def _database_default_options(self, name): read_preference=ReadPreference.PRIMARY, write_concern=DEFAULT_WRITE_CONCERN) - def __enter__(self): + def __enter__(self) -> "MongoClient[_DocumentType]": return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() - def __iter__(self): + def __iter__(self) -> "MongoClient[_DocumentType]": return self - def __next__(self): + def __next__(self) -> None: raise TypeError("'MongoClient' object is not iterable") next = __next__ diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 039ec51942..388ba61687 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -18,10 +18,10 @@ import threading import time import weakref +from typing import Any, Mapping, cast from pymongo import common, periodic_executor -from pymongo.errors import (NotPrimaryError, - OperationFailure, +from pymongo.errors import (NotPrimaryError, OperationFailure, _OperationCancelled) from pymongo.hello import Hello from pymongo.periodic_executor import _shutdown_executors @@ -50,7 +50,7 @@ def target(): monitor = self_ref() if monitor is None: return False # Stop the executor. - monitor._run() + monitor._run() # type:ignore[attr-defined] return True executor = periodic_executor.PeriodicExecutor( @@ -214,8 +214,8 @@ def _check_server(self): return self._check_once() except (OperationFailure, NotPrimaryError) as exc: # Update max cluster time even when hello fails. - self._topology.receive_cluster_time( - exc.details.get('$clusterTime')) + details = cast(Mapping[str, Any], exc.details) + self._topology.receive_cluster_time(details.get('$clusterTime')) raise except ReferenceError: raise diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index b877e19a23..6f57200a3b 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -180,12 +180,21 @@ def connection_checked_in(self, event): handler first. """ +import datetime from collections import abc, namedtuple +from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional -from pymongo.hello import HelloCompat +from bson.objectid import ObjectId +from pymongo.hello import Hello, HelloCompat from pymongo.helpers import _handle_exception +from pymongo.typings import _Address -_Listeners = namedtuple('Listeners', +if TYPE_CHECKING: + from pymongo.server_description import ServerDescription + from pymongo.topology_description import TopologyDescription + + +_Listeners = namedtuple('_Listeners', ('command_listeners', 'server_listeners', 'server_heartbeat_listeners', 'topology_listeners', 'cmap_listeners')) @@ -193,6 +202,9 @@ def connection_checked_in(self, event): _LISTENERS = _Listeners([], [], [], [], []) +_DocumentOut = Mapping[str, Any] + + class _EventListener(object): """Abstract base class for all event listeners.""" @@ -204,7 +216,7 @@ class CommandListener(_EventListener): and `CommandFailedEvent`. """ - def started(self, event): + def started(self, event: "CommandStartedEvent") -> None: """Abstract method to handle a `CommandStartedEvent`. :Parameters: @@ -212,7 +224,7 @@ def started(self, event): """ raise NotImplementedError - def succeeded(self, event): + def succeeded(self, event: "CommandSucceededEvent") -> None: """Abstract method to handle a `CommandSucceededEvent`. :Parameters: @@ -220,7 +232,7 @@ def succeeded(self, event): """ raise NotImplementedError - def failed(self, event): + def failed(self, event: "CommandFailedEvent") -> None: """Abstract method to handle a `CommandFailedEvent`. :Parameters: @@ -245,7 +257,7 @@ class ConnectionPoolListener(_EventListener): .. versionadded:: 3.9 """ - def pool_created(self, event): + def pool_created(self, event: "PoolCreatedEvent") -> None: """Abstract method to handle a :class:`PoolCreatedEvent`. Emitted when a Connection Pool is created. @@ -255,7 +267,7 @@ def pool_created(self, event): """ raise NotImplementedError - def pool_ready(self, event): + def pool_ready(self, event: "PoolReadyEvent") -> None: """Abstract method to handle a :class:`PoolReadyEvent`. Emitted when a Connection Pool is marked ready. @@ -267,7 +279,7 @@ def pool_ready(self, event): """ raise NotImplementedError - def pool_cleared(self, event): + def pool_cleared(self, event: "PoolClearedEvent") -> None: """Abstract method to handle a `PoolClearedEvent`. Emitted when a Connection Pool is cleared. @@ -277,7 +289,7 @@ def pool_cleared(self, event): """ raise NotImplementedError - def pool_closed(self, event): + def pool_closed(self, event: "PoolClosedEvent") -> None: """Abstract method to handle a `PoolClosedEvent`. Emitted when a Connection Pool is closed. @@ -287,7 +299,7 @@ def pool_closed(self, event): """ raise NotImplementedError - def connection_created(self, event): + def connection_created(self, event: "ConnectionCreatedEvent") -> None: """Abstract method to handle a :class:`ConnectionCreatedEvent`. Emitted when a Connection Pool creates a Connection object. @@ -297,7 +309,7 @@ def connection_created(self, event): """ raise NotImplementedError - def connection_ready(self, event): + def connection_ready(self, event: "ConnectionReadyEvent") -> None: """Abstract method to handle a :class:`ConnectionReadyEvent`. Emitted when a Connection has finished its setup, and is now ready to @@ -308,7 +320,7 @@ def connection_ready(self, event): """ raise NotImplementedError - def connection_closed(self, event): + def connection_closed(self, event: "ConnectionClosedEvent") -> None: """Abstract method to handle a :class:`ConnectionClosedEvent`. Emitted when a Connection Pool closes a Connection. @@ -318,7 +330,7 @@ def connection_closed(self, event): """ raise NotImplementedError - def connection_check_out_started(self, event): + def connection_check_out_started(self, event: "ConnectionCheckOutStartedEvent") -> None: """Abstract method to handle a :class:`ConnectionCheckOutStartedEvent`. Emitted when the driver starts attempting to check out a connection. @@ -328,7 +340,7 @@ def connection_check_out_started(self, event): """ raise NotImplementedError - def connection_check_out_failed(self, event): + def connection_check_out_failed(self, event: "ConnectionCheckOutFailedEvent") -> None: """Abstract method to handle a :class:`ConnectionCheckOutFailedEvent`. Emitted when the driver's attempt to check out a connection fails. @@ -338,7 +350,7 @@ def connection_check_out_failed(self, event): """ raise NotImplementedError - def connection_checked_out(self, event): + def connection_checked_out(self, event: "ConnectionCheckedOutEvent") -> None: """Abstract method to handle a :class:`ConnectionCheckedOutEvent`. Emitted when the driver successfully checks out a Connection. @@ -348,7 +360,7 @@ def connection_checked_out(self, event): """ raise NotImplementedError - def connection_checked_in(self, event): + def connection_checked_in(self, event: "ConnectionCheckedInEvent") -> None: """Abstract method to handle a :class:`ConnectionCheckedInEvent`. Emitted when the driver checks in a Connection back to the Connection @@ -369,7 +381,7 @@ class ServerHeartbeatListener(_EventListener): .. versionadded:: 3.3 """ - def started(self, event): + def started(self, event: "ServerHeartbeatStartedEvent") -> None: """Abstract method to handle a `ServerHeartbeatStartedEvent`. :Parameters: @@ -377,7 +389,7 @@ def started(self, event): """ raise NotImplementedError - def succeeded(self, event): + def succeeded(self, event: "ServerHeartbeatSucceededEvent") -> None: """Abstract method to handle a `ServerHeartbeatSucceededEvent`. :Parameters: @@ -385,7 +397,7 @@ def succeeded(self, event): """ raise NotImplementedError - def failed(self, event): + def failed(self, event: "ServerHeartbeatFailedEvent") -> None: """Abstract method to handle a `ServerHeartbeatFailedEvent`. :Parameters: @@ -402,7 +414,7 @@ class TopologyListener(_EventListener): .. versionadded:: 3.3 """ - def opened(self, event): + def opened(self, event: "TopologyOpenedEvent") -> None: """Abstract method to handle a `TopologyOpenedEvent`. :Parameters: @@ -410,7 +422,7 @@ def opened(self, event): """ raise NotImplementedError - def description_changed(self, event): + def description_changed(self, event: "TopologyDescriptionChangedEvent") -> None: """Abstract method to handle a `TopologyDescriptionChangedEvent`. :Parameters: @@ -418,7 +430,7 @@ def description_changed(self, event): """ raise NotImplementedError - def closed(self, event): + def closed(self, event: "TopologyClosedEvent") -> None: """Abstract method to handle a `TopologyClosedEvent`. :Parameters: @@ -435,7 +447,7 @@ class ServerListener(_EventListener): .. versionadded:: 3.3 """ - def opened(self, event): + def opened(self, event: "ServerOpeningEvent") -> None: """Abstract method to handle a `ServerOpeningEvent`. :Parameters: @@ -443,7 +455,7 @@ def opened(self, event): """ raise NotImplementedError - def description_changed(self, event): + def description_changed(self, event: "ServerDescriptionChangedEvent") -> None: """Abstract method to handle a `ServerDescriptionChangedEvent`. :Parameters: @@ -451,7 +463,7 @@ def description_changed(self, event): """ raise NotImplementedError - def closed(self, event): + def closed(self, event: "ServerClosedEvent") -> None: """Abstract method to handle a `ServerClosedEvent`. :Parameters: @@ -478,7 +490,7 @@ def _validate_event_listeners(option, listeners): return listeners -def register(listener): +def register(listener: _EventListener) -> None: """Register a global event listener. :Parameters: @@ -525,8 +537,14 @@ class _CommandEvent(object): __slots__ = ("__cmd_name", "__rqst_id", "__conn_id", "__op_id", "__service_id") - def __init__(self, command_name, request_id, connection_id, operation_id, - service_id=None): + def __init__( + self, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + ) -> None: self.__cmd_name = command_name self.__rqst_id = request_id self.__conn_id = connection_id @@ -534,22 +552,22 @@ def __init__(self, command_name, request_id, connection_id, operation_id, self.__service_id = service_id @property - def command_name(self): + def command_name(self) -> str: """The command name.""" return self.__cmd_name @property - def request_id(self): + def request_id(self) -> int: """The request id for this operation.""" return self.__rqst_id @property - def connection_id(self): + def connection_id(self) -> _Address: """The address (host, port) of the server this command was sent to.""" return self.__conn_id @property - def service_id(self): + def service_id(self) -> Optional[ObjectId]: """The service_id this command was sent to, or ``None``. .. versionadded:: 3.12 @@ -557,7 +575,7 @@ def service_id(self): return self.__service_id @property - def operation_id(self): + def operation_id(self) -> Optional[int]: """An id for this series of events or None.""" return self.__op_id @@ -576,28 +594,36 @@ class CommandStartedEvent(_CommandEvent): """ __slots__ = ("__cmd", "__db") - def __init__(self, command, database_name, *args, service_id=None): + def __init__( + self, + command: _DocumentOut, + database_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + ) -> None: if not command: raise ValueError("%r is not a valid command" % (command,)) # Command name must be first key. command_name = next(iter(command)) super(CommandStartedEvent, self).__init__( - command_name, *args, service_id=service_id) + command_name, request_id, connection_id, operation_id, service_id=service_id) cmd_name, cmd_doc = command_name.lower(), command[command_name] if (cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, command)): - self.__cmd = {} + self.__cmd: Mapping[str, Any] = {} else: self.__cmd = command self.__db = database_name @property - def command(self): + def command(self) -> _DocumentOut: """The command document.""" return self.__cmd @property - def database_name(self): + def database_name(self) -> str: """The name of the database this command was run against.""" return self.__db @@ -625,8 +651,16 @@ class CommandSucceededEvent(_CommandEvent): """ __slots__ = ("__duration_micros", "__reply") - def __init__(self, duration, reply, command_name, - request_id, connection_id, operation_id, service_id=None): + def __init__( + self, + duration: datetime.timedelta, + reply: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + ) -> None: super(CommandSucceededEvent, self).__init__( command_name, request_id, connection_id, operation_id, service_id=service_id) @@ -634,17 +668,17 @@ def __init__(self, duration, reply, command_name, cmd_name = command_name.lower() if (cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, reply)): - self.__reply = {} + self.__reply: Mapping[str, Any] = {} else: self.__reply = reply @property - def duration_micros(self): + def duration_micros(self) -> int: """The duration of this operation in microseconds.""" return self.__duration_micros @property - def reply(self): + def reply(self) -> _DocumentOut: """The server failure document for this operation.""" return self.__reply @@ -672,18 +706,27 @@ class CommandFailedEvent(_CommandEvent): """ __slots__ = ("__duration_micros", "__failure") - def __init__(self, duration, failure, *args, service_id=None): - super(CommandFailedEvent, self).__init__(*args, service_id=service_id) + def __init__( + self, + duration: datetime.timedelta, + failure: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + ) -> None: + super(CommandFailedEvent, self).__init__(command_name, request_id, connection_id, operation_id, service_id=service_id) self.__duration_micros = _to_micros(duration) self.__failure = failure @property - def duration_micros(self): + def duration_micros(self) -> int: """The duration of this operation in microseconds.""" return self.__duration_micros @property - def failure(self): + def failure(self) -> _DocumentOut: """The server failure document for this operation.""" return self.__failure @@ -700,11 +743,11 @@ class _PoolEvent(object): """Base class for pool events.""" __slots__ = ("__address",) - def __init__(self, address): + def __init__(self, address: _Address) -> None: self.__address = address @property - def address(self): + def address(self) -> _Address: """The address (host, port) pair of the server the pool is attempting to connect to. """ @@ -725,12 +768,12 @@ class PoolCreatedEvent(_PoolEvent): """ __slots__ = ("__options",) - def __init__(self, address, options): + def __init__(self, address: _Address, options: Dict[str, Any]) -> None: super(PoolCreatedEvent, self).__init__(address) self.__options = options @property - def options(self): + def options(self) -> Dict[str, Any]: """Any non-default pool options that were set on this Connection Pool. """ return self.__options @@ -764,12 +807,12 @@ class PoolClearedEvent(_PoolEvent): """ __slots__ = ("__service_id",) - def __init__(self, address, service_id=None): + def __init__(self, address: _Address, service_id: Optional[ObjectId] = None) -> None: super(PoolClearedEvent, self).__init__(address) self.__service_id = service_id @property - def service_id(self): + def service_id(self) -> Optional[ObjectId]: """Connections with this service_id are cleared. When service_id is ``None``, all connections in the pool are cleared. @@ -839,19 +882,19 @@ class _ConnectionEvent(object): """Private base class for some connection events.""" __slots__ = ("__address", "__connection_id") - def __init__(self, address, connection_id): + def __init__(self, address: _Address, connection_id: int) -> None: self.__address = address self.__connection_id = connection_id @property - def address(self): + def address(self) -> _Address: """The address (host, port) pair of the server this connection is attempting to connect to. """ return self.__address @property - def connection_id(self): + def connection_id(self) -> int: """The ID of the Connection.""" return self.__connection_id @@ -958,19 +1001,19 @@ class ConnectionCheckOutFailedEvent(object): """ __slots__ = ("__address", "__reason") - def __init__(self, address, reason): + def __init__(self, address: _Address, reason: str) -> None: self.__address = address self.__reason = reason @property - def address(self): + def address(self) -> _Address: """The address (host, port) pair of the server this connection is attempting to connect to. """ return self.__address @property - def reason(self): + def reason(self) -> str: """A reason explaining why connection check out failed. The reason must be one of the strings from the @@ -1014,17 +1057,17 @@ class _ServerEvent(object): __slots__ = ("__server_address", "__topology_id") - def __init__(self, server_address, topology_id): + def __init__(self, server_address: _Address, topology_id: ObjectId) -> None: self.__server_address = server_address self.__topology_id = topology_id @property - def server_address(self): + def server_address(self) -> _Address: """The address (host, port) pair of the server""" return self.__server_address @property - def topology_id(self): + def topology_id(self) -> ObjectId: """A unique identifier for the topology this server is a part of.""" return self.__topology_id @@ -1041,19 +1084,19 @@ class ServerDescriptionChangedEvent(_ServerEvent): __slots__ = ('__previous_description', '__new_description') - def __init__(self, previous_description, new_description, *args): + def __init__(self, previous_description: "ServerDescription", new_description: "ServerDescription", *args: Any) -> None: super(ServerDescriptionChangedEvent, self).__init__(*args) self.__previous_description = previous_description self.__new_description = new_description @property - def previous_description(self): + def previous_description(self) -> "ServerDescription": """The previous :class:`~pymongo.server_description.ServerDescription`.""" return self.__previous_description @property - def new_description(self): + def new_description(self) -> "ServerDescription": """The new :class:`~pymongo.server_description.ServerDescription`.""" return self.__new_description @@ -1087,11 +1130,11 @@ class TopologyEvent(object): __slots__ = ('__topology_id') - def __init__(self, topology_id): + def __init__(self, topology_id: ObjectId) -> None: self.__topology_id = topology_id @property - def topology_id(self): + def topology_id(self) -> ObjectId: """A unique identifier for the topology this server is a part of.""" return self.__topology_id @@ -1108,19 +1151,19 @@ class TopologyDescriptionChangedEvent(TopologyEvent): __slots__ = ('__previous_description', '__new_description') - def __init__(self, previous_description, new_description, *args): + def __init__(self, previous_description: "TopologyDescription", new_description: "TopologyDescription", *args: Any) -> None: super(TopologyDescriptionChangedEvent, self).__init__(*args) self.__previous_description = previous_description self.__new_description = new_description @property - def previous_description(self): + def previous_description(self) -> "TopologyDescription": """The previous :class:`~pymongo.topology_description.TopologyDescription`.""" return self.__previous_description @property - def new_description(self): + def new_description(self) -> "TopologyDescription": """The new :class:`~pymongo.topology_description.TopologyDescription`.""" return self.__new_description @@ -1154,11 +1197,11 @@ class _ServerHeartbeatEvent(object): __slots__ = ('__connection_id') - def __init__(self, connection_id): + def __init__(self, connection_id: _Address) -> None: self.__connection_id = connection_id @property - def connection_id(self): + def connection_id(self) -> _Address: """The address (host, port) of the server this heartbeat was sent to.""" return self.__connection_id @@ -1184,24 +1227,24 @@ class ServerHeartbeatSucceededEvent(_ServerHeartbeatEvent): __slots__ = ('__duration', '__reply', '__awaited') - def __init__(self, duration, reply, connection_id, awaited=False): + def __init__(self, duration: float, reply: Hello, connection_id: _Address, awaited: bool = False) -> None: super(ServerHeartbeatSucceededEvent, self).__init__(connection_id) self.__duration = duration self.__reply = reply self.__awaited = awaited @property - def duration(self): + def duration(self) -> float: """The duration of this heartbeat in microseconds.""" return self.__duration @property - def reply(self): + def reply(self) -> Hello: """An instance of :class:`~pymongo.hello.Hello`.""" return self.__reply @property - def awaited(self): + def awaited(self) -> bool: """Whether the heartbeat was awaited. If true, then :meth:`duration` reflects the sum of the round trip time @@ -1225,24 +1268,24 @@ class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): __slots__ = ('__duration', '__reply', '__awaited') - def __init__(self, duration, reply, connection_id, awaited=False): + def __init__(self, duration: float, reply: Exception, connection_id: _Address, awaited: bool = False) -> None: super(ServerHeartbeatFailedEvent, self).__init__(connection_id) self.__duration = duration self.__reply = reply self.__awaited = awaited @property - def duration(self): + def duration(self) -> float: """The duration of this heartbeat in microseconds.""" return self.__duration @property - def reply(self): + def reply(self) -> Exception: """A subclass of :exc:`Exception`.""" return self.__reply @property - def awaited(self): + def awaited(self) -> bool: """Whether the heartbeat was awaited. If true, then :meth:`duration` reflects the sum of the round trip time diff --git a/pymongo/network.py b/pymongo/network.py index a14e9924a4..48e5084e31 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -20,21 +20,16 @@ import struct import time - from bson import _decode_all_selective - from pymongo import helpers, message from pymongo.common import MAX_MESSAGE_SIZE -from pymongo.compression_support import decompress, _NO_COMPRESSION -from pymongo.errors import (NotPrimaryError, - OperationFailure, - ProtocolError, +from pymongo.compression_support import _NO_COMPRESSION, decompress +from pymongo.errors import (NotPrimaryError, OperationFailure, ProtocolError, _OperationCancelled) from pymongo.message import _UNPACK_REPLY, _OpMsg from pymongo.monitoring import _is_speculative_authenticate from pymongo.socket_checker import _errno_from_exception - _UNPACK_HEADER = struct.Struct(" None: """Create an InsertOne instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -43,21 +45,25 @@ def _add_to_bulk(self, bulkobj): def __repr__(self): return "InsertOne(%r)" % (self._doc,) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if type(other) == type(self): return other._doc == self._doc return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other +_IndexList = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] +_IndexKeyHint = Union[str, _IndexList] + + class DeleteOne(object): """Represents a delete_one operation.""" __slots__ = ("_filter", "_collation", "_hint") - def __init__(self, filter, collation=None, hint=None): + def __init__(self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None) -> None: """Create a DeleteOne instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -95,13 +101,13 @@ def _add_to_bulk(self, bulkobj): def __repr__(self): return "DeleteOne(%r, %r)" % (self._filter, self._collation) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if type(other) == type(self): return ((other._filter, other._collation) == (self._filter, self._collation)) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other @@ -110,7 +116,7 @@ class DeleteMany(object): __slots__ = ("_filter", "_collation", "_hint") - def __init__(self, filter, collation=None, hint=None): + def __init__(self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None) -> None: """Create a DeleteMany instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -148,13 +154,13 @@ def _add_to_bulk(self, bulkobj): def __repr__(self): return "DeleteMany(%r, %r)" % (self._filter, self._collation) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if type(other) == type(self): return ((other._filter, other._collation) == (self._filter, self._collation)) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other @@ -163,8 +169,8 @@ class ReplaceOne(object): __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_hint") - def __init__(self, filter, replacement, upsert=False, collation=None, - hint=None): + def __init__(self, filter: Mapping[str, Any], replacement: Mapping[str, Any], upsert: bool = False, collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None) -> None: """Create a ReplaceOne instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -207,7 +213,7 @@ def _add_to_bulk(self, bulkobj): bulkobj.add_replace(self._filter, self._doc, self._upsert, collation=self._collation, hint=self._hint) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if type(other) == type(self): return ( (other._filter, other._doc, other._upsert, other._collation, @@ -215,7 +221,7 @@ def __eq__(self, other): self._collation, other._hint)) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self): @@ -241,7 +247,6 @@ def __init__(self, filter, doc, upsert, collation, array_filters, hint): if not isinstance(hint, str): hint = helpers._index_document(hint) - self._filter = filter self._doc = doc self._upsert = upsert @@ -272,8 +277,8 @@ class UpdateOne(_UpdateOp): __slots__ = () - def __init__(self, filter, update, upsert=False, collation=None, - array_filters=None, hint=None): + def __init__(self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], upsert: bool = False, collation: Optional[_CollationIn] = None, + array_filters: Optional[List[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None) -> None: """Represents an update_one operation. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -319,8 +324,8 @@ class UpdateMany(_UpdateOp): __slots__ = () - def __init__(self, filter, update, upsert=False, collation=None, - array_filters=None, hint=None): + def __init__(self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], upsert: bool = False, collation: Optional[_CollationIn] = None, + array_filters: Optional[List[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None) -> None: """Create an UpdateMany instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -366,7 +371,7 @@ class IndexModel(object): __slots__ = ("__document",) - def __init__(self, keys, **kwargs): + def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: """Create an Index instance. For use with :meth:`~pymongo.collection.Collection.create_indexes`. @@ -437,7 +442,7 @@ def __init__(self, keys, **kwargs): self.__document['collation'] = collation @property - def document(self): + def document(self) -> Dict[str, Any]: """An index document suitable for passing to the createIndexes command. """ diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index e1690ee9b1..36e094c4cb 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -17,6 +17,7 @@ import threading import time import weakref +from typing import Any, Optional class PeriodicExecutor(object): @@ -41,7 +42,7 @@ def __init__(self, interval, min_interval, target, name=None): self._min_interval = min_interval self._target = target self._stopped = False - self._thread = None + self._thread: Optional[threading.Thread] = None self._name = name self._skip_sleep = False @@ -52,7 +53,7 @@ def __repr__(self): return '<%s(name=%s) object at 0x%x>' % ( self.__class__.__name__, self._name, id(self)) - def open(self): + def open(self) -> None: """Start. Multiple calls have no effect. Not safe to call from multiple threads at once. @@ -64,13 +65,14 @@ def open(self): # join should not block indefinitely because there is no # other work done outside the while loop in self._run. try: + assert self._thread is not None self._thread.join() except ReferenceError: # Thread terminated. pass self._thread_will_exit = False self._stopped = False - started = False + started: Any = False try: started = self._thread and self._thread.is_alive() except ReferenceError: @@ -84,7 +86,7 @@ def open(self): _register_executor(self) thread.start() - def close(self, dummy=None): + def close(self, dummy: Any = None) -> None: """Stop. To restart, call open(). The dummy parameter allows an executor's close method to be a weakref @@ -92,7 +94,7 @@ def close(self, dummy=None): """ self._stopped = True - def join(self, timeout=None): + def join(self, timeout: Optional[int] = None) -> None: if self._thread is not None: try: self._thread.join(timeout) @@ -100,14 +102,14 @@ def join(self, timeout=None): # Thread already terminated, or not yet started. pass - def wake(self): + def wake(self) -> None: """Execute the target function soon.""" self._event = True - def update_interval(self, new_interval): + def update_interval(self, new_interval: int) -> None: self._interval = new_interval - def skip_sleep(self): + def skip_sleep(self) -> None: self._skip_sleep = True def __should_stop(self): diff --git a/pymongo/pool.py b/pymongo/pool.py index 70920d5b23..c53c9f4736 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -18,50 +18,38 @@ import ipaddress import os import platform -import ssl import socket +import ssl import sys import threading import time import weakref +from typing import Any from bson import DEFAULT_CODEC_OPTIONS from bson.son import SON -from pymongo import auth, helpers, __version__ +from pymongo import __version__, auth, helpers from pymongo.client_session import _validate_session_write_concern -from pymongo.common import (MAX_BSON_SIZE, - MAX_CONNECTING, - MAX_IDLE_TIME_SEC, - MAX_MESSAGE_SIZE, - MAX_POOL_SIZE, - MAX_WIRE_VERSION, - MAX_WRITE_BATCH_SIZE, - MIN_POOL_SIZE, - ORDERED_TYPES, +from pymongo.common import (MAX_BSON_SIZE, MAX_CONNECTING, MAX_IDLE_TIME_SEC, + MAX_MESSAGE_SIZE, MAX_POOL_SIZE, MAX_WIRE_VERSION, + MAX_WRITE_BATCH_SIZE, MIN_POOL_SIZE, ORDERED_TYPES, WAIT_QUEUE_TIMEOUT) -from pymongo.errors import (AutoReconnect, - _CertificateError, - ConnectionFailure, - ConfigurationError, - InvalidOperation, - DocumentTooLarge, - NetworkTimeout, - NotPrimaryError, - OperationFailure, - PyMongoError) -from pymongo.hello import HelloCompat, Hello +from pymongo.errors import (AutoReconnect, ConfigurationError, + ConnectionFailure, DocumentTooLarge, + InvalidOperation, NetworkTimeout, NotPrimaryError, + OperationFailure, PyMongoError, _CertificateError) +from pymongo.hello import Hello, HelloCompat from pymongo.monitoring import (ConnectionCheckOutFailedReason, ConnectionClosedReason) -from pymongo.network import (command, - receive_message) +from pymongo.network import command, receive_message from pymongo.read_preferences import ReadPreference from pymongo.server_api import _add_to_command from pymongo.server_type import SERVER_TYPE from pymongo.socket_checker import SocketChecker -from pymongo.ssl_support import ( - SSLError as _SSLError, - HAS_SNI as _HAVE_SNI, - IPADDR_SAFE as _IPADDR_SAFE) +from pymongo.ssl_support import HAS_SNI as _HAVE_SNI +from pymongo.ssl_support import IPADDR_SAFE as _IPADDR_SAFE +from pymongo.ssl_support import SSLError as _SSLError + # For SNI support. According to RFC6066, section 3, IPv4 and IPv6 literals are # not permitted for SNI hostname. @@ -73,7 +61,7 @@ def is_ip_address(address): return False try: - from fcntl import fcntl, F_GETFD, F_SETFD, FD_CLOEXEC + from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl def _set_non_inheritable_non_atomic(fd): """Set the close-on-exec flag on the given file descriptor.""" flags = fcntl(fd, F_GETFD) @@ -82,7 +70,7 @@ def _set_non_inheritable_non_atomic(fd): # Windows, various platforms we don't claim to support # (Jython, IronPython, ...), systems that don't provide # everything we need from fcntl, etc. - def _set_non_inheritable_non_atomic(dummy): + def _set_non_inheritable_non_atomic(fd): """Dummy function for platforms that don't provide fcntl.""" pass @@ -145,7 +133,7 @@ def _set_keepalive_times(sock): _set_tcp_option(sock, 'TCP_KEEPINTVL', _MAX_TCP_KEEPINTVL) _set_tcp_option(sock, 'TCP_KEEPCNT', _MAX_TCP_KEEPCNT) -_METADATA = SON([ +_METADATA: SON[str, Any] = SON([ ('driver', SON([('name', 'PyMongo'), ('version', __version__)])), ]) @@ -205,7 +193,7 @@ def _set_keepalive_times(sock): if platform.python_implementation().startswith('PyPy'): _METADATA['platform'] = ' '.join( (platform.python_implementation(), - '.'.join(map(str, sys.pypy_version_info)), + '.'.join(map(str, sys.pypy_version_info)), # type: ignore '(Python %s)' % '.'.join(map(str, sys.version_info)))) elif sys.platform.startswith('java'): _METADATA['platform'] = ' '.join( @@ -688,7 +676,7 @@ def command(self, dbname, spec, session = _validate_session_write_concern(session, write_concern) # Ensure command name remains in first place. - if not isinstance(spec, ORDERED_TYPES): + if not isinstance(spec, ORDERED_TYPES): # type:ignore[arg-type] spec = SON(spec) if not (write_concern is None or write_concern.acknowledged or @@ -1088,7 +1076,7 @@ def __init__(self, address, options, handshake=True): # LIFO pool. Sockets are ordered on idle time. Sockets claimed # and returned to pool from the left side. Stale sockets removed # from the right side. - self.sockets = collections.deque() + self.sockets: collections.deque = collections.deque() self.lock = threading.Lock() self.active_sockets = 0 # Monotonically increasing connection ID required for CMAP Events. @@ -1165,8 +1153,8 @@ def _reset(self, close, pause=True, service_id=None): if service_id is None: sockets, self.sockets = self.sockets, collections.deque() else: - discard = collections.deque() - keep = collections.deque() + discard: collections.deque = collections.deque() + keep: collections.deque = collections.deque() for sock_info in self.sockets: if sock_info.service_id == service_id: discard.append(sock_info) diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index f7c53a59e5..c5a5f0936d 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -20,29 +20,28 @@ import ssl as _stdlibssl import sys as _sys import time as _time - from errno import EINTR as _EINTR - from ipaddress import ip_address as _ip_address -from cryptography.x509 import load_der_x509_certificate as _load_der_x509_certificate -from OpenSSL import crypto as _crypto, SSL as _SSL -from service_identity.pyopenssl import ( - verify_hostname as _verify_hostname, - verify_ip_address as _verify_ip_address) +from cryptography.x509 import \ + load_der_x509_certificate as _load_der_x509_certificate +from OpenSSL import SSL as _SSL +from OpenSSL import crypto as _crypto from service_identity import ( - CertificateError as _SICertificateError, - VerificationError as _SIVerificationError) - -from pymongo.errors import ( - _CertificateError, - ConfigurationError as _ConfigurationError) -from pymongo.ocsp_support import ( - _load_trusted_ca_certs, - _ocsp_callback) + CertificateError as _SICertificateError +) +from service_identity import VerificationError as _SIVerificationError +from service_identity.pyopenssl import ( # + verify_hostname as _verify_hostname +) +from service_identity.pyopenssl import verify_ip_address as _verify_ip_address + +from pymongo.errors import ConfigurationError as _ConfigurationError +from pymongo.errors import _CertificateError from pymongo.ocsp_cache import _OCSPCache -from pymongo.socket_checker import ( - _errno_from_exception, SocketChecker as _SocketChecker) +from pymongo.ocsp_support import _load_trusted_ca_certs, _ocsp_callback +from pymongo.socket_checker import SocketChecker as _SocketChecker +from pymongo.socket_checker import _errno_from_exception try: import certifi @@ -132,7 +131,7 @@ def recv(self, *args, **kwargs): def recv_into(self, *args, **kwargs): try: - return self._call(super(_sslConn, self).recv_into, *args, **kwargs) + return self._call(super(_sslConn, self).recv_into, *args, **kwargs) # type: ignore except _SSL.SysCallError as exc: # Suppress ragged EOFs to match the stdlib. if self.suppress_ragged_eofs and _ragged_eof(exc): @@ -147,7 +146,7 @@ def sendall(self, buf, flags=0): while total_sent < total_length: try: sent = self._call( - super(_sslConn, self).send, view[total_sent:], flags) + super(_sslConn, self).send, view[total_sent:], flags) # type: ignore # XXX: It's not clear if this can actually happen. PyOpenSSL # doesn't appear to have any interrupt handling, nor any interrupt # errors for OpenSSL connections. @@ -296,7 +295,7 @@ def _load_wincerts(self, store): """Attempt to load CA certs from Windows trust store.""" cert_store = self._ctx.get_cert_store() oid = _stdlibssl.Purpose.SERVER_AUTH.oid - for cert, encoding, trust in _stdlibssl.enum_certificates(store): + for cert, encoding, trust in _stdlibssl.enum_certificates(store): # type: ignore if encoding == "x509_asn": if trust is True or oid in trust: cert_store.add_cert( diff --git a/pymongo/read_concern.py b/pymongo/read_concern.py index 7e9cc4485c..aaf67ef5a6 100644 --- a/pymongo/read_concern.py +++ b/pymongo/read_concern.py @@ -14,6 +14,8 @@ """Tools for working with read concerns.""" +from typing import Any, Dict, Optional + class ReadConcern(object): """ReadConcern @@ -29,7 +31,7 @@ class ReadConcern(object): """ - def __init__(self, level=None): + def __init__(self, level: Optional[str] = None) -> None: if level is None or isinstance(level, str): self.__level = level else: @@ -37,18 +39,18 @@ def __init__(self, level=None): 'level must be a string or None.') @property - def level(self): + def level(self) -> Optional[str]: """The read concern level.""" return self.__level @property - def ok_for_legacy(self): + def ok_for_legacy(self) -> bool: """Return ``True`` if this read concern is compatible with old wire protocol versions.""" return self.level is None or self.level == 'local' @property - def document(self): + def document(self) -> Dict[str, Any]: """The document representation of this read concern. .. note:: @@ -60,7 +62,7 @@ def document(self): doc['level'] = self.level return doc - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, ReadConcern): return self.document == other.document return NotImplemented diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 2471d5834c..cc1317fb88 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -15,13 +15,13 @@ """Utilities for choosing which member of a replica set to read from.""" from collections import abc +from typing import Any, Dict, Mapping, Optional, Sequence from pymongo import max_staleness_selectors from pymongo.errors import ConfigurationError from pymongo.server_selectors import (member_with_tags_server_selector, secondary_with_tags_server_selector) - _PRIMARY = 0 _PRIMARY_PREFERRED = 1 _SECONDARY = 2 @@ -44,9 +44,9 @@ def _validate_tag_sets(tag_sets): if tag_sets is None: return tag_sets - if not isinstance(tag_sets, list): + if not isinstance(tag_sets, (list, tuple)): raise TypeError(( - "Tag sets %r invalid, must be a list") % (tag_sets,)) + "Tag sets %r invalid, must be a sequence") % (tag_sets,)) if len(tag_sets) == 0: raise ValueError(( "Tag sets %r invalid, must be None or contain at least one set of" @@ -59,7 +59,7 @@ def _validate_tag_sets(tag_sets): "bson.son.SON or other type that inherits from " "collection.Mapping" % (tags,)) - return tag_sets + return list(tag_sets) def _invalid_max_staleness_msg(max_staleness): @@ -93,6 +93,10 @@ def _validate_hedge(hedge): return hedge +_Hedge = Mapping[str, Any] +_TagSets = Sequence[Mapping[str, Any]] + + class _ServerMode(object): """Base class for all read preferences. """ @@ -100,7 +104,7 @@ class _ServerMode(object): __slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness", "__hedge") - def __init__(self, mode, tag_sets=None, max_staleness=-1, hedge=None): + def __init__(self, mode: int, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: self.__mongos_mode = _MONGOS_MODES[mode] self.__mode = mode self.__tag_sets = _validate_tag_sets(tag_sets) @@ -108,22 +112,22 @@ def __init__(self, mode, tag_sets=None, max_staleness=-1, hedge=None): self.__hedge = _validate_hedge(hedge) @property - def name(self): + def name(self) -> str: """The name of this read preference. """ return self.__class__.__name__ @property - def mongos_mode(self): + def mongos_mode(self) -> str: """The mongos mode of this read preference. """ return self.__mongos_mode @property - def document(self): + def document(self) -> Dict[str, Any]: """Read preference as a document. """ - doc = {'mode': self.__mongos_mode} + doc: Dict[str, Any] = {'mode': self.__mongos_mode} if self.__tag_sets not in (None, [{}]): doc['tags'] = self.__tag_sets if self.__max_staleness != -1: @@ -133,13 +137,13 @@ def document(self): return doc @property - def mode(self): + def mode(self) -> int: """The mode of this read preference instance. """ return self.__mode @property - def tag_sets(self): + def tag_sets(self) -> _TagSets: """Set ``tag_sets`` to a list of dictionaries like [{'dc': 'ny'}] to read only from members whose ``dc`` tag has the value ``"ny"``. To specify a priority-order for tag sets, provide a list of @@ -154,14 +158,14 @@ def tag_sets(self): return list(self.__tag_sets) if self.__tag_sets else [{}] @property - def max_staleness(self): + def max_staleness(self) -> int: """The maximum estimated length of time (in seconds) a replica set secondary can fall behind the primary in replication before it will no longer be selected for operations, or -1 for no maximum.""" return self.__max_staleness @property - def hedge(self): + def hedge(self) -> Optional[_Hedge]: """The read preference ``hedge`` parameter. A dictionary that configures how the server will perform hedged reads. @@ -185,7 +189,7 @@ def hedge(self): return self.__hedge @property - def min_wire_version(self): + def min_wire_version(self) -> int: """The wire protocol version the server must support. Some read preferences impose version requirements on all servers (e.g. @@ -201,7 +205,7 @@ def __repr__(self): return "%s(tag_sets=%r, max_staleness=%r, hedge=%r)" % ( self.name, self.__tag_sets, self.__max_staleness, self.__hedge) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, _ServerMode): return (self.mode == other.mode and self.tag_sets == other.tag_sets and @@ -209,7 +213,7 @@ def __eq__(self, other): self.hedge == other.hedge) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other def __getstate__(self): @@ -243,17 +247,17 @@ class Primary(_ServerMode): __slots__ = () - def __init__(self): + def __init__(self) -> None: super(Primary, self).__init__(_PRIMARY) - def __call__(self, selection): + def __call__(self, selection: Any) -> Any: """Apply this read preference to a Selection.""" return selection.primary_selection def __repr__(self): return "Primary()" - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, _ServerMode): return other.mode == _PRIMARY return NotImplemented @@ -289,11 +293,11 @@ class PrimaryPreferred(_ServerMode): __slots__ = () - def __init__(self, tag_sets=None, max_staleness=-1, hedge=None): + def __init__(self, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: super(PrimaryPreferred, self).__init__( _PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) - def __call__(self, selection): + def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" if selection.primary: return selection.primary_selection @@ -329,11 +333,11 @@ class Secondary(_ServerMode): __slots__ = () - def __init__(self, tag_sets=None, max_staleness=-1, hedge=None): + def __init__(self, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: super(Secondary, self).__init__( _SECONDARY, tag_sets, max_staleness, hedge) - def __call__(self, selection): + def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" return secondary_with_tags_server_selector( self.tag_sets, @@ -370,11 +374,11 @@ class SecondaryPreferred(_ServerMode): __slots__ = () - def __init__(self, tag_sets=None, max_staleness=-1, hedge=None): + def __init__(self, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: super(SecondaryPreferred, self).__init__( _SECONDARY_PREFERRED, tag_sets, max_staleness, hedge) - def __call__(self, selection): + def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" secondaries = secondary_with_tags_server_selector( self.tag_sets, @@ -412,11 +416,11 @@ class Nearest(_ServerMode): __slots__ = () - def __init__(self, tag_sets=None, max_staleness=-1, hedge=None): + def __init__(self, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: super(Nearest, self).__init__( _NEAREST, tag_sets, max_staleness, hedge) - def __call__(self, selection): + def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" return member_with_tags_server_selector( self.tag_sets, @@ -467,7 +471,7 @@ def __getattr__(self, name): Secondary, SecondaryPreferred, Nearest) -def make_read_preference(mode, tag_sets, max_staleness=-1): +def make_read_preference(mode: int, tag_sets: Optional[_TagSets], max_staleness: int = -1) -> _ServerMode: if mode == _PRIMARY: if tag_sets not in (None, [{}]): raise ConfigurationError("Read preference primary " @@ -476,7 +480,7 @@ def make_read_preference(mode, tag_sets, max_staleness=-1): raise ConfigurationError("Read preference primary cannot be " "combined with maxStalenessSeconds") return Primary() - return _ALL_READ_PREFERENCES[mode](tag_sets, max_staleness) + return _ALL_READ_PREFERENCES[mode](tag_sets, max_staleness) # type: ignore _MODES = ( @@ -545,7 +549,7 @@ class ReadPreference(object): NEAREST = Nearest() -def read_pref_mode_from_name(name): +def read_pref_mode_from_name(name: str) -> int: """Get the read preference mode from mongos/uri name. """ return _MONGOS_MODES.index(name) @@ -553,10 +557,12 @@ def read_pref_mode_from_name(name): class MovingAverage(object): """Tracks an exponentially-weighted moving average.""" - def __init__(self): + average: Optional[float] + + def __init__(self) -> None: self.average = None - def add_sample(self, sample): + def add_sample(self, sample: float) -> None: if sample < 0: # Likely system time change while waiting for hello response # and not using time.monotonic. Ignore it, the next one will @@ -569,9 +575,9 @@ def add_sample(self, sample): # average with alpha = 0.2. self.average = 0.8 * self.average + 0.2 * sample - def get(self): + def get(self) -> Optional[float]: """Get the calculated average, or None if no samples yet.""" return self.average - def reset(self): + def reset(self) -> None: self.average = None diff --git a/pymongo/results.py b/pymongo/results.py index 0374803249..637bf73b0f 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -13,6 +13,7 @@ # limitations under the License. """Result class definitions.""" +from typing import Any, Dict, List, Mapping, Optional, Sequence, cast from pymongo.errors import InvalidOperation @@ -22,7 +23,7 @@ class _WriteResult(object): __slots__ = ("__acknowledged",) - def __init__(self, acknowledged): + def __init__(self, acknowledged: bool) -> None: self.__acknowledged = acknowledged def _raise_if_unacknowledged(self, property_name): @@ -34,7 +35,7 @@ def _raise_if_unacknowledged(self, property_name): "error." % (property_name,)) @property - def acknowledged(self): + def acknowledged(self) -> bool: """Is this the result of an acknowledged write operation? The :attr:`acknowledged` attribute will be ``False`` when using @@ -59,12 +60,12 @@ class InsertOneResult(_WriteResult): __slots__ = ("__inserted_id", "__acknowledged") - def __init__(self, inserted_id, acknowledged): + def __init__(self, inserted_id: Any, acknowledged: bool) -> None: self.__inserted_id = inserted_id super(InsertOneResult, self).__init__(acknowledged) @property - def inserted_id(self): + def inserted_id(self) -> Any: """The inserted document's _id.""" return self.__inserted_id @@ -75,12 +76,12 @@ class InsertManyResult(_WriteResult): __slots__ = ("__inserted_ids", "__acknowledged") - def __init__(self, inserted_ids, acknowledged): + def __init__(self, inserted_ids: List[Any], acknowledged: bool) -> None: self.__inserted_ids = inserted_ids super(InsertManyResult, self).__init__(acknowledged) @property - def inserted_ids(self): + def inserted_ids(self) -> List: """A list of _ids of the inserted documents, in the order provided. .. note:: If ``False`` is passed for the `ordered` parameter to @@ -99,17 +100,17 @@ class UpdateResult(_WriteResult): __slots__ = ("__raw_result", "__acknowledged") - def __init__(self, raw_result, acknowledged): + def __init__(self, raw_result: Dict[str, Any], acknowledged: bool) -> None: self.__raw_result = raw_result super(UpdateResult, self).__init__(acknowledged) @property - def raw_result(self): + def raw_result(self) -> Dict[str, Any]: """The raw result document returned by the server.""" return self.__raw_result @property - def matched_count(self): + def matched_count(self) -> int: """The number of documents matched for this update.""" self._raise_if_unacknowledged("matched_count") if self.upserted_id is not None: @@ -117,13 +118,13 @@ def matched_count(self): return self.__raw_result.get("n", 0) @property - def modified_count(self): + def modified_count(self) -> int: """The number of documents modified. """ self._raise_if_unacknowledged("modified_count") - return self.__raw_result.get("nModified") + return cast(int, self.__raw_result.get("nModified")) @property - def upserted_id(self): + def upserted_id(self) -> Any: """The _id of the inserted document if an upsert took place. Otherwise ``None``. """ @@ -137,17 +138,17 @@ class DeleteResult(_WriteResult): __slots__ = ("__raw_result", "__acknowledged") - def __init__(self, raw_result, acknowledged): + def __init__(self, raw_result: Dict[str, Any], acknowledged: bool) -> None: self.__raw_result = raw_result super(DeleteResult, self).__init__(acknowledged) @property - def raw_result(self): + def raw_result(self) -> Dict[str, Any]: """The raw result document returned by the server.""" return self.__raw_result @property - def deleted_count(self): + def deleted_count(self) -> int: """The number of documents deleted.""" self._raise_if_unacknowledged("deleted_count") return self.__raw_result.get("n", 0) @@ -158,7 +159,7 @@ class BulkWriteResult(_WriteResult): __slots__ = ("__bulk_api_result", "__acknowledged") - def __init__(self, bulk_api_result, acknowledged): + def __init__(self, bulk_api_result: Dict[str, Any], acknowledged: bool) -> None: """Create a BulkWriteResult instance. :Parameters: @@ -171,44 +172,45 @@ def __init__(self, bulk_api_result, acknowledged): super(BulkWriteResult, self).__init__(acknowledged) @property - def bulk_api_result(self): + def bulk_api_result(self) -> Dict[str, Any]: """The raw bulk API result.""" return self.__bulk_api_result @property - def inserted_count(self): + def inserted_count(self) -> int: """The number of documents inserted.""" self._raise_if_unacknowledged("inserted_count") - return self.__bulk_api_result.get("nInserted") + return cast(int, self.__bulk_api_result.get("nInserted")) @property - def matched_count(self): + def matched_count(self) -> int: """The number of documents matched for an update.""" self._raise_if_unacknowledged("matched_count") - return self.__bulk_api_result.get("nMatched") + return cast(int, self.__bulk_api_result.get("nMatched")) @property - def modified_count(self): + def modified_count(self) -> int: """The number of documents modified.""" self._raise_if_unacknowledged("modified_count") - return self.__bulk_api_result.get("nModified") + return cast(int, self.__bulk_api_result.get("nModified")) @property - def deleted_count(self): + def deleted_count(self) -> int: """The number of documents deleted.""" self._raise_if_unacknowledged("deleted_count") - return self.__bulk_api_result.get("nRemoved") + return cast(int, self.__bulk_api_result.get("nRemoved")) @property - def upserted_count(self): + def upserted_count(self) -> int: """The number of documents upserted.""" self._raise_if_unacknowledged("upserted_count") - return self.__bulk_api_result.get("nUpserted") + return cast(int, self.__bulk_api_result.get("nUpserted")) @property - def upserted_ids(self): + def upserted_ids(self) -> Optional[Dict[int, Any]]: """A map of operation index to the _id of the upserted document.""" self._raise_if_unacknowledged("upserted_ids") if self.__bulk_api_result: return dict((upsert["index"], upsert["_id"]) for upsert in self.bulk_api_result["upserted"]) + return None diff --git a/pymongo/saslprep.py b/pymongo/saslprep.py index 08a780c055..99445b06f0 100644 --- a/pymongo/saslprep.py +++ b/pymongo/saslprep.py @@ -13,13 +13,13 @@ # limitations under the License. """An implementation of RFC4013 SASLprep.""" - +from typing import Any, Optional try: import stringprep except ImportError: HAVE_STRINGPREP = False - def saslprep(data): + def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) -> str: """SASLprep dummy""" if isinstance(data, str): raise TypeError( @@ -29,6 +29,7 @@ def saslprep(data): else: HAVE_STRINGPREP = True import unicodedata + # RFC4013 section 2.3 prohibited output. _PROHIBITED = ( # A strict reading of RFC 4013 requires table c12 here, but @@ -44,7 +45,7 @@ def saslprep(data): stringprep.in_table_c8, stringprep.in_table_c9) - def saslprep(data, prohibit_unassigned_code_points=True): + def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) -> str: """An implementation of RFC4013 SASLprep. :Parameters: @@ -60,6 +61,8 @@ def saslprep(data, prohibit_unassigned_code_points=True): :Returns: The SASLprep'ed version of `data`. """ + prohibited: Any + if not isinstance(data, str): return data diff --git a/pymongo/server.py b/pymongo/server.py index cb9442d000..74093b05ed 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -17,11 +17,10 @@ from datetime import datetime from bson import _decode_all_selective - from pymongo.errors import NotPrimaryError, OperationFailure from pymongo.helpers import _check_command_response from pymongo.message import _convert_exception, _OpMsg -from pymongo.response import Response, PinnedResponse +from pymongo.response import PinnedResponse, Response from pymongo.server_type import SERVER_TYPE _CURSOR_DOC_FIELDS = {'cursor': {'firstBatch': 1, 'nextBatch': 1}} @@ -59,6 +58,8 @@ def close(self): Reconnect with open(). """ if self._publish: + assert self._listener is not None + assert self._events is not None self._events.put((self._listener.publish_server_closed, (self._description.address, self._topology_id))) self._monitor.close() @@ -169,6 +170,8 @@ def run_operation(self, sock_info, operation, read_preference, listeners, docs = _decode_all_selective( decrypted, operation.codec_options, user_fields) + response: Response + if client._should_pin_cursor(operation.session) or operation.exhaust: sock_info.pin_cursor() if isinstance(reply, _OpMsg): diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 2cbf6d63cd..0a9b799165 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -15,10 +15,13 @@ """Represent one server the driver is connected to.""" import time +from typing import Any, Dict, Mapping, Optional, Set, Tuple, cast from bson import EPOCH_NAIVE -from pymongo.server_type import SERVER_TYPE +from bson.objectid import ObjectId from pymongo.hello import Hello +from pymongo.server_type import SERVER_TYPE +from pymongo.typings import _Address class ServerDescription(object): @@ -41,11 +44,12 @@ class ServerDescription(object): '_topology_version') def __init__( - self, - address, - hello=None, - round_trip_time=None, - error=None): + self, + address: _Address, + hello: Optional[Hello] = None, + round_trip_time: Optional[float] = None, + error: Optional[Exception] = None, + ) -> None: self._address = address if not hello: hello = Hello({}) @@ -72,9 +76,11 @@ def __init__( self._error = error self._topology_version = hello.topology_version if error: - if hasattr(error, 'details') and isinstance(error.details, dict): - self._topology_version = error.details.get('topologyVersion') + details = getattr(error, 'details', None) + if isinstance(details, dict): + self._topology_version = details.get('topologyVersion') + self._last_write_date: Optional[float] if hello.last_write_date: # Convert from datetime to seconds. delta = hello.last_write_date - EPOCH_NAIVE @@ -83,17 +89,17 @@ def __init__( self._last_write_date = None @property - def address(self): + def address(self) -> _Address: """The address (host, port) of this server.""" return self._address @property - def server_type(self): + def server_type(self) -> int: """The type of this server.""" return self._server_type @property - def server_type_name(self): + def server_type_name(self) -> str: """The server type as a human readable string. .. versionadded:: 3.4 @@ -101,78 +107,78 @@ def server_type_name(self): return SERVER_TYPE._fields[self._server_type] @property - def all_hosts(self): + def all_hosts(self) -> Set[Tuple[str, int]]: """List of hosts, passives, and arbiters known to this server.""" return self._all_hosts @property - def tags(self): + def tags(self) -> Mapping[str, Any]: return self._tags @property - def replica_set_name(self): + def replica_set_name(self) -> Optional[str]: """Replica set name or None.""" return self._replica_set_name @property - def primary(self): + def primary(self) -> Optional[Tuple[str, int]]: """This server's opinion about who the primary is, or None.""" return self._primary @property - def max_bson_size(self): + def max_bson_size(self) -> int: return self._max_bson_size @property - def max_message_size(self): + def max_message_size(self) -> int: return self._max_message_size @property - def max_write_batch_size(self): + def max_write_batch_size(self) -> int: return self._max_write_batch_size @property - def min_wire_version(self): + def min_wire_version(self) -> int: return self._min_wire_version @property - def max_wire_version(self): + def max_wire_version(self) -> int: return self._max_wire_version @property - def set_version(self): + def set_version(self) -> Optional[int]: return self._set_version @property - def election_id(self): + def election_id(self) -> Optional[ObjectId]: return self._election_id @property - def cluster_time(self): + def cluster_time(self)-> Optional[Mapping[str, Any]]: return self._cluster_time @property - def election_tuple(self): + def election_tuple(self) -> Tuple[Optional[int], Optional[ObjectId]]: return self._set_version, self._election_id @property - def me(self): + def me(self) -> Optional[Tuple[str, int]]: return self._me @property - def logical_session_timeout_minutes(self): + def logical_session_timeout_minutes(self) -> Optional[int]: return self._ls_timeout_minutes @property - def last_write_date(self): + def last_write_date(self) -> Optional[float]: return self._last_write_date @property - def last_update_time(self): + def last_update_time(self) -> float: return self._last_update_time @property - def round_trip_time(self): + def round_trip_time(self) -> Optional[float]: """The current average latency or None.""" # This override is for unittesting only! if self._address in self._host_to_round_trip_time: @@ -181,28 +187,28 @@ def round_trip_time(self): return self._round_trip_time @property - def error(self): + def error(self) -> Optional[Exception]: """The last error attempting to connect to the server, or None.""" return self._error @property - def is_writable(self): + def is_writable(self) -> bool: return self._is_writable @property - def is_readable(self): + def is_readable(self) -> bool: return self._is_readable @property - def mongos(self): + def mongos(self) -> bool: return self._server_type == SERVER_TYPE.Mongos @property - def is_server_type_known(self): + def is_server_type_known(self) -> bool: return self.server_type != SERVER_TYPE.Unknown @property - def retryable_writes_supported(self): + def retryable_writes_supported(self) -> bool: """Checks if this server supports retryable writes.""" return (( self._ls_timeout_minutes is not None and @@ -210,20 +216,20 @@ def retryable_writes_supported(self): or self._server_type == SERVER_TYPE.LoadBalancer) @property - def retryable_reads_supported(self): + def retryable_reads_supported(self) -> bool: """Checks if this server supports retryable writes.""" return self._max_wire_version >= 6 @property - def topology_version(self): + def topology_version(self) -> Optional[Mapping[str, Any]]: return self._topology_version - def to_unknown(self, error=None): + def to_unknown(self, error: Optional[Exception] = None) -> "ServerDescription": unknown = ServerDescription(self.address, error=error) unknown._topology_version = self.topology_version return unknown - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, ServerDescription): return ((self._address == other.address) and (self._server_type == other.server_type) and @@ -242,7 +248,7 @@ def __eq__(self, other): return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self): @@ -254,4 +260,4 @@ def __repr__(self): self.round_trip_time, errmsg) # For unittesting only. Use under no circumstances! - _host_to_round_trip_time = {} + _host_to_round_trip_time: Dict = {} diff --git a/pymongo/server_type.py b/pymongo/server_type.py index 101f9dba4c..ee53b6b97d 100644 --- a/pymongo/server_type.py +++ b/pymongo/server_type.py @@ -14,10 +14,19 @@ """Type codes for MongoDB servers.""" -from collections import namedtuple +from typing import NamedTuple -SERVER_TYPE = namedtuple('ServerType', - ['Unknown', 'Mongos', 'RSPrimary', 'RSSecondary', - 'RSArbiter', 'RSOther', 'RSGhost', - 'Standalone', 'LoadBalancer'])(*range(9)) +class _ServerType(NamedTuple): + Unknown: int + Mongos: int + RSPrimary: int + RSSecondary: int + RSArbiter: int + RSOther: int + RSGhost: int + Standalone: int + LoadBalancer: int + + +SERVER_TYPE = _ServerType(*range(9)) diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py index 48f168be48..9eb3d5f084 100644 --- a/pymongo/socket_checker.py +++ b/pymongo/socket_checker.py @@ -16,7 +16,9 @@ import errno import select +import socket import sys +from typing import Any, Optional # PYTHON-2320: Jython does not fully support poll on SSL sockets, # https://bugs.jython.org/issue2900 @@ -34,17 +36,19 @@ def _errno_from_exception(exc): class SocketChecker(object): - def __init__(self): + def __init__(self) -> None: + self._poller: Optional[select.poll] if _HAVE_POLL: self._poller = select.poll() else: self._poller = None - def select(self, sock, read=False, write=False, timeout=0): + def select(self, sock: Any, read: bool = False, write: bool = False, timeout: int = 0) -> bool: """Select for reads or writes with a timeout in seconds (or None). Returns True if the socket is readable/writable, False on timeout. """ + res: Any while True: try: if self._poller: @@ -74,12 +78,12 @@ def select(self, sock, read=False, write=False, timeout=0): # ready: subsets of the first three arguments. Return # True if any of the lists are not empty. return any(res) - except (_SelectError, IOError) as exc: + except (_SelectError, IOError) as exc: # type: ignore if _errno_from_exception(exc) in (errno.EINTR, errno.EAGAIN): continue raise - def socket_closed(self, sock): + def socket_closed(self, sock: Any) -> bool: """Return True if we know socket has been closed, False otherwise. """ try: diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index 69e075aec4..d9ee7b7c8a 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -26,6 +26,7 @@ from pymongo.common import CONNECT_TIMEOUT from pymongo.errors import ConfigurationError + # dnspython can return bytes or str from various parts # of its API depending on version. We always want str. def maybe_decode(text): @@ -38,7 +39,7 @@ def maybe_decode(text): def _resolve(*args, **kwargs): if hasattr(resolver, 'resolve'): # dnspython >= 2 - return resolver.resolve(*args, **kwargs) + return resolver.resolve(*args, **kwargs) # type: ignore # dnspython 1.X return resolver.query(*args, **kwargs) diff --git a/pymongo/ssl_context.py b/pymongo/ssl_context.py index 2f35676f87..e546105141 100644 --- a/pymongo/ssl_context.py +++ b/pymongo/ssl_context.py @@ -32,6 +32,7 @@ SSLError = _ssl.SSLError from ssl import SSLContext + if hasattr(_ssl, "VERIFY_CRL_CHECK_LEAF"): from ssl import VERIFY_CRL_CHECK_LEAF # Python 3.7 uses OpenSSL's hostname matching implementation diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 5826f95801..b3428197b7 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -24,7 +24,7 @@ import pymongo.pyopenssl_context as _ssl except ImportError: try: - import pymongo.ssl_context as _ssl + import pymongo.ssl_context as _ssl # type: ignore[no-redef] except ImportError: HAVE_SSL = False @@ -74,7 +74,7 @@ def get_ssl_context(certfile, passphrase, ca_certs, crlfile, raise ConfigurationError( "tlsCRLFile cannot be used with PyOpenSSL") # Match the server's behavior. - ctx.verify_flags = getattr(_ssl, "VERIFY_CRL_CHECK_LEAF", 0) + setattr(ctx, 'verify_flags', getattr(_ssl, "VERIFY_CRL_CHECK_LEAF", 0)) ctx.load_verify_locations(crlfile) if ca_certs is not None: ctx.load_verify_locations(ca_certs) @@ -83,11 +83,11 @@ def get_ssl_context(certfile, passphrase, ca_certs, crlfile, ctx.verify_mode = verify_mode return ctx else: - class SSLError(Exception): + class SSLError(Exception): # type: ignore pass HAS_SNI = False IPADDR_SAFE = False - def get_ssl_context(*dummy): + def get_ssl_context(*dummy): # type: ignore """No ssl module, raise ConfigurationError.""" raise ConfigurationError("The ssl module is not available.") diff --git a/pymongo/topology.py b/pymongo/topology.py index 021a1dee60..b2d31ed314 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -21,35 +21,27 @@ import time import warnings import weakref +from typing import Any -from pymongo import (common, - helpers, - periodic_executor) +from pymongo import common, helpers, periodic_executor from pymongo.client_session import _ServerSessionPool -from pymongo.errors import (ConnectionFailure, - ConfigurationError, - NetworkTimeout, - NotPrimaryError, - OperationFailure, - PyMongoError, - ServerSelectionTimeoutError, - WriteError, - InvalidOperation) +from pymongo.errors import (ConfigurationError, ConnectionFailure, + InvalidOperation, NetworkTimeout, NotPrimaryError, + OperationFailure, PyMongoError, + ServerSelectionTimeoutError, WriteError) from pymongo.hello import Hello from pymongo.monitor import SrvMonitor from pymongo.pool import PoolOptions from pymongo.server import Server from pymongo.server_description import ServerDescription -from pymongo.server_selectors import (any_server_selector, +from pymongo.server_selectors import (Selection, any_server_selector, arbiter_server_selector, - secondary_server_selector, readable_server_selector, - writable_server_selector, - Selection) -from pymongo.topology_description import (updated_topology_description, - _updated_topology_description_srv_polling, - TopologyDescription, - SRV_POLLING_TOPOLOGIES, TOPOLOGY_TYPE) + secondary_server_selector, + writable_server_selector) +from pymongo.topology_description import ( + SRV_POLLING_TOPOLOGIES, TOPOLOGY_TYPE, TopologyDescription, + _updated_topology_description_srv_polling, updated_topology_description) def process_events_queue(queue_ref): @@ -80,12 +72,13 @@ def __init__(self, topology_settings): # Create events queue if there are publishers. self._events = None - self.__events_executor = None + self.__events_executor: Any = None if self._publish_server or self._publish_tp: self._events = queue.Queue(maxsize=100) if self._publish_tp: + assert self._events is not None self._events.put((self._listeners.publish_topology_opened, (self._topology_id,))) self._settings = topology_settings @@ -99,6 +92,7 @@ def __init__(self, topology_settings): self._description = topology_description if self._publish_tp: + assert self._events is not None initial_td = TopologyDescription(TOPOLOGY_TYPE.Unknown, {}, None, None, None, self._settings) self._events.put(( @@ -107,6 +101,7 @@ def __init__(self, topology_settings): for seed in topology_settings.seeds: if self._publish_server: + assert self._events is not None self._events.put((self._listeners.publish_server_opened, (seed, self._topology_id))) @@ -296,6 +291,7 @@ def _process_change(self, server_description, reset_pool=False): suppress_event = ((self._publish_server or self._publish_tp) and sd_old == server_description) if self._publish_server and not suppress_event: + assert self._events is not None self._events.put(( self._listeners.publish_server_description_changed, (sd_old, server_description, @@ -306,6 +302,7 @@ def _process_change(self, server_description, reset_pool=False): self._receive_cluster_time_no_lock(server_description.cluster_time) if self._publish_tp and not suppress_event: + assert self._events is not None self._events.put(( self._listeners.publish_topology_description_changed, (td_old, self._description, self._topology_id))) @@ -354,6 +351,7 @@ def _process_srv_update(self, seedlist): self._update_servers() if self._publish_tp: + assert self._events is not None self._events.put(( self._listeners.publish_topology_description_changed, (td_old, self._description, self._topology_id))) @@ -485,6 +483,7 @@ def close(self): # Publish only after releasing the lock. if self._publish_tp: + assert self._events is not None self._events.put((self._listeners.publish_topology_closed, (self._topology_id,))) if self._publish_server or self._publish_tp: diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index c13d00a64c..241ef5afbe 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -14,34 +14,48 @@ """Represent a deployment of MongoDB servers.""" -from collections import namedtuple from random import sample +from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple +from bson.objectid import ObjectId from pymongo import common from pymongo.errors import ConfigurationError -from pymongo.read_preferences import ReadPreference, _AggWritePref +from pymongo.read_preferences import ReadPreference, _AggWritePref, _ServerMode from pymongo.server_description import ServerDescription from pymongo.server_selectors import Selection from pymongo.server_type import SERVER_TYPE +from pymongo.typings import _Address # Enumeration for various kinds of MongoDB cluster topologies. -TOPOLOGY_TYPE = namedtuple('TopologyType', [ - 'Single', 'ReplicaSetNoPrimary', 'ReplicaSetWithPrimary', 'Sharded', - 'Unknown', 'LoadBalanced'])(*range(6)) +class _TopologyType(NamedTuple): + Single: int + ReplicaSetNoPrimary: int + ReplicaSetWithPrimary: int + Sharded: int + Unknown: int + LoadBalanced: int + + +TOPOLOGY_TYPE = _TopologyType(*range(6)) # Topologies compatible with SRV record polling. -SRV_POLLING_TOPOLOGIES = (TOPOLOGY_TYPE.Unknown, TOPOLOGY_TYPE.Sharded) +SRV_POLLING_TOPOLOGIES: Tuple[int, int] = (TOPOLOGY_TYPE.Unknown, TOPOLOGY_TYPE.Sharded) + + +_ServerSelector = Callable[[List[ServerDescription]], List[ServerDescription]] class TopologyDescription(object): - def __init__(self, - topology_type, - server_descriptions, - replica_set_name, - max_set_version, - max_election_id, - topology_settings): + def __init__( + self, + topology_type: int, + server_descriptions: Dict[_Address, ServerDescription], + replica_set_name: Optional[str], + max_set_version: Optional[int], + max_election_id: Optional[ObjectId], + topology_settings: Any, + ) -> None: """Representation of a deployment of MongoDB servers. :Parameters: @@ -81,7 +95,7 @@ def __init__(self, for s in readable_servers): self._ls_timeout_minutes = None else: - self._ls_timeout_minutes = min(s.logical_session_timeout_minutes + self._ls_timeout_minutes = min(s.logical_session_timeout_minutes # type: ignore for s in readable_servers) def _init_incompatible_err(self): @@ -104,23 +118,23 @@ def _init_incompatible_err(self): if server_too_new: self._incompatible_err = ( - "Server at %s:%d requires wire version %d, but this " + "Server at %s:%d requires wire version %d, but this " # type: ignore "version of PyMongo only supports up to %d." - % (s.address[0], s.address[1], + % (s.address[0], s.address[1] or 0, s.min_wire_version, common.MAX_SUPPORTED_WIRE_VERSION)) elif server_too_old: self._incompatible_err = ( - "Server at %s:%d reports wire version %d, but this " + "Server at %s:%d reports wire version %d, but this " # type: ignore "version of PyMongo requires at least %d (MongoDB %s)." - % (s.address[0], s.address[1], + % (s.address[0], s.address[1] or 0, s.max_wire_version, common.MIN_SUPPORTED_WIRE_VERSION, common.MIN_SUPPORTED_SERVER_VERSION)) break - def check_compatible(self): + def check_compatible(self) -> None: """Raise ConfigurationError if any server is incompatible. A server is incompatible if its wire protocol version range does not @@ -129,15 +143,15 @@ def check_compatible(self): if self._incompatible_err: raise ConfigurationError(self._incompatible_err) - def has_server(self, address): + def has_server(self, address: _Address) -> bool: return address in self._server_descriptions - def reset_server(self, address): + def reset_server(self, address: _Address) -> "TopologyDescription": """A copy of this description, with one server marked Unknown.""" unknown_sd = self._server_descriptions[address].to_unknown() return updated_topology_description(self, unknown_sd) - def reset(self): + def reset(self) -> "TopologyDescription": """A copy of this description, with all servers marked Unknown.""" if self._topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary: topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary @@ -156,18 +170,18 @@ def reset(self): self._max_election_id, self._topology_settings) - def server_descriptions(self): + def server_descriptions(self) -> Dict[_Address, ServerDescription]: """Dict of (address, :class:`~pymongo.server_description.ServerDescription`).""" return self._server_descriptions.copy() @property - def topology_type(self): + def topology_type(self) -> int: """The type of this topology.""" return self._topology_type @property - def topology_type_name(self): + def topology_type_name(self) -> str: """The topology type as a human readable string. .. versionadded:: 3.4 @@ -175,44 +189,44 @@ def topology_type_name(self): return TOPOLOGY_TYPE._fields[self._topology_type] @property - def replica_set_name(self): + def replica_set_name(self) -> Optional[str]: """The replica set name.""" return self._replica_set_name @property - def max_set_version(self): + def max_set_version(self) -> Optional[int]: """Greatest setVersion seen from a primary, or None.""" return self._max_set_version @property - def max_election_id(self): + def max_election_id(self) -> Optional[ObjectId]: """Greatest electionId seen from a primary, or None.""" return self._max_election_id @property - def logical_session_timeout_minutes(self): + def logical_session_timeout_minutes(self) -> Optional[int]: """Minimum logical session timeout, or None.""" return self._ls_timeout_minutes @property - def known_servers(self): + def known_servers(self) -> List[ServerDescription]: """List of Servers of types besides Unknown.""" return [s for s in self._server_descriptions.values() if s.is_server_type_known] @property - def has_known_servers(self): + def has_known_servers(self) -> bool: """Whether there are any Servers of types besides Unknown.""" return any(s for s in self._server_descriptions.values() if s.is_server_type_known) @property - def readable_servers(self): + def readable_servers(self) -> List[ServerDescription]: """List of readable Servers.""" return [s for s in self._server_descriptions.values() if s.is_readable] @property - def common_wire_version(self): + def common_wire_version(self) -> Optional[int]: """Minimum of all servers' max wire versions, or None.""" servers = self.known_servers if servers: @@ -221,11 +235,11 @@ def common_wire_version(self): return None @property - def heartbeat_frequency(self): + def heartbeat_frequency(self) -> int: return self._topology_settings.heartbeat_frequency @property - def srv_max_hosts(self): + def srv_max_hosts(self) -> int: return self._topology_settings._srv_max_hosts def _apply_local_threshold(self, selection): @@ -238,7 +252,12 @@ def _apply_local_threshold(self, selection): return [s for s in selection.server_descriptions if (s.round_trip_time - fastest) <= threshold] - def apply_selector(self, selector, address=None, custom_selector=None): + def apply_selector( + self, + selector: Any, + address: Optional[_Address] = None, + custom_selector: Optional[_ServerSelector] = None + ) -> List[ServerDescription]: """List of servers matching the provided selector(s). :Parameters: @@ -288,7 +307,7 @@ def apply_selector(self, selector, address=None, custom_selector=None): custom_selector(selection.server_descriptions)) return self._apply_local_threshold(selection) - def has_readable_server(self, read_preference=ReadPreference.PRIMARY): + def has_readable_server(self, read_preference: _ServerMode =ReadPreference.PRIMARY) -> bool: """Does this topology have any readable servers available matching the given read preference? @@ -305,7 +324,7 @@ def has_readable_server(self, read_preference=ReadPreference.PRIMARY): common.validate_read_preference("read_preference", read_preference) return any(self.apply_selector(read_preference)) - def has_writable_server(self): + def has_writable_server(self) -> bool: """Does this topology have a writable server available? .. note:: When connected directly to a single server this method @@ -336,7 +355,9 @@ def __repr__(self): } -def updated_topology_description(topology_description, server_description): +def updated_topology_description( + topology_description: TopologyDescription, server_description: ServerDescription +) -> "TopologyDescription": """Return an updated copy of a TopologyDescription. :Parameters: diff --git a/pymongo/typings.py b/pymongo/typings.py new file mode 100644 index 0000000000..ae5aec3213 --- /dev/null +++ b/pymongo/typings.py @@ -0,0 +1,29 @@ +# Copyright 2022-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Type aliases used by PyMongo""" +from typing import (TYPE_CHECKING, Any, Dict, List, Mapping, MutableMapping, Optional, + Tuple, Type, TypeVar, Union) + +if TYPE_CHECKING: + from bson.raw_bson import RawBSONDocument + from pymongo.collation import Collation + + +# Common Shared Types. +_Address = Tuple[str, Optional[int]] +_CollationIn = Union[Mapping[str, Any], "Collation"] +_DocumentIn = Union[MutableMapping[str, Any], "RawBSONDocument"] +_Pipeline = List[Mapping[str, Any]] +_DocumentType = TypeVar('_DocumentType', Mapping[str, Any], MutableMapping[str, Any], Dict[str, Any]) diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 8c43d51770..c213f4217c 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -15,20 +15,19 @@ """Tools to parse and validate a MongoDB URI.""" import re -import warnings import sys - +import warnings +from typing import (Any, Dict, List, Mapping, MutableMapping, Optional, Tuple, + Union, cast) from urllib.parse import unquote_plus from pymongo.client_options import _parse_ssl_options -from pymongo.common import ( - SRV_SERVICE_NAME, - get_validated_options, INTERNAL_URI_OPTION_NAME_MAP, - URI_OPTIONS_DEPRECATION_MAP, _CaseInsensitiveDictionary) +from pymongo.common import (INTERNAL_URI_OPTION_NAME_MAP, SRV_SERVICE_NAME, + URI_OPTIONS_DEPRECATION_MAP, + _CaseInsensitiveDictionary, get_validated_options) from pymongo.errors import ConfigurationError, InvalidURI from pymongo.srv_resolver import _HAVE_DNSPYTHON, _SrvResolver - SCHEME = 'mongodb://' SCHEME_LEN = len(SCHEME) SRV_SCHEME = 'mongodb+srv://' @@ -52,7 +51,7 @@ def _unquoted_percent(s): return True return False -def parse_userinfo(userinfo): +def parse_userinfo(userinfo: str) -> Tuple[str, str]: """Validates the format of user information in a MongoDB URI. Reserved characters that are gen-delimiters (":", "/", "?", "#", "[", "]", "@") as per RFC 3986 must be escaped. @@ -76,7 +75,7 @@ def parse_userinfo(userinfo): return unquote_plus(user), unquote_plus(passwd) -def parse_ipv6_literal_host(entity, default_port): +def parse_ipv6_literal_host(entity: str, default_port: Optional[int]) -> Tuple[str, Optional[Union[str, int]]]: """Validates an IPv6 literal host:port string. Returns a 2-tuple of IPv6 literal followed by port where @@ -98,7 +97,7 @@ def parse_ipv6_literal_host(entity, default_port): return entity[1: i], entity[i + 2:] -def parse_host(entity, default_port=DEFAULT_PORT): +def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> Tuple[str, Optional[int]]: """Validates a host string Returns a 2-tuple of host followed by port where port is default_port @@ -111,7 +110,7 @@ def parse_host(entity, default_port=DEFAULT_PORT): specified in entity. """ host = entity - port = default_port + port: Optional[Union[str, int]] = default_port if entity[0] == '[': host, port = parse_ipv6_literal_host(entity, default_port) elif entity.endswith(".sock"): @@ -279,7 +278,7 @@ def _normalize_options(options): return options -def validate_options(opts, warn=False): +def validate_options(opts: Mapping[str, Any], warn: bool = False) -> MutableMapping[str, Any]: """Validates and normalizes options passed in a MongoDB URI. Returns a new dictionary of validated and normalized options. If warn is @@ -295,7 +294,7 @@ def validate_options(opts, warn=False): return get_validated_options(opts, warn) -def split_options(opts, validate=True, warn=False, normalize=True): +def split_options(opts: str, validate: bool = True, warn: bool = False, normalize: bool = True) -> MutableMapping[str, Any]: """Takes the options portion of a MongoDB URI, validates each option and returns the options in a dictionary. @@ -340,7 +339,7 @@ def split_options(opts, validate=True, warn=False, normalize=True): return options -def split_hosts(hosts, default_port=DEFAULT_PORT): +def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> List[Tuple[str, Optional[int]]]: """Takes a string of the form host1[:port],host2[:port]... and splits it into (host, port) tuples. If [:port] isn't present the default_port is used. @@ -393,9 +392,16 @@ def _check_options(nodes, options): 'Cannot specify replicaSet with loadBalanced=true') -def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, - normalize=True, connect_timeout=None, srv_service_name=None, - srv_max_hosts=None): +def parse_uri( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + connect_timeout: Optional[float] = None, + srv_service_name: Optional[str] = None, + srv_max_hosts: Optional[int] = None +) -> Dict[str, Any]: """Parse and validate a MongoDB URI. Returns a dict of the form:: diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index 2075240f0a..5168948ee3 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -14,6 +14,8 @@ """Tools for working with write concerns.""" +from typing import Any, Dict, Optional, Union + from pymongo.errors import ConfigurationError @@ -45,8 +47,8 @@ class WriteConcern(object): __slots__ = ("__document", "__acknowledged", "__server_default") - def __init__(self, w=None, wtimeout=None, j=None, fsync=None): - self.__document = {} + def __init__(self, w: Optional[Union[int, str]] = None, wtimeout: Optional[int] = None, j: Optional[bool] = None, fsync: Optional[bool] = None) -> None: + self.__document: Dict[str, Any] = {} self.__acknowledged = True if wtimeout is not None: @@ -84,12 +86,12 @@ def __init__(self, w=None, wtimeout=None, j=None, fsync=None): self.__server_default = not self.__document @property - def is_server_default(self): + def is_server_default(self) -> bool: """Does this WriteConcern match the server default.""" return self.__server_default @property - def document(self): + def document(self) -> Dict[str, Any]: """The document representation of this write concern. .. note:: @@ -99,7 +101,7 @@ def document(self): return self.__document.copy() @property - def acknowledged(self): + def acknowledged(self) -> bool: """If ``True`` write operations will wait for acknowledgement before returning. """ @@ -109,12 +111,12 @@ def __repr__(self): return ("WriteConcern(%s)" % ( ", ".join("%s=%s" % kvt for kvt in self.__document.items()),)) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, WriteConcern): return self.__document == other.document return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: if isinstance(other, WriteConcern): return self.__document != other.document return NotImplemented diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index b752453f13..84c6baf60d 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -25,7 +25,7 @@ try: import simplejson as json except ImportError: - import json # type: ignore + import json # type: ignore[no-redef] sys.path[0:0] = [""] diff --git a/test/test_cursor.py b/test/test_cursor.py index 8bea12228d..0b8ba049c2 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -889,7 +889,7 @@ def test_clone(self): # Every attribute should be the same. cursor2 = cursor.clone() - self.assertEqual(cursor.__dict__, cursor2.__dict__) + self.assertDictEqual(cursor.__dict__, cursor2.__dict__) # Shallow copies can so can mutate cursor2 = copy.copy(cursor) diff --git a/test/test_grid_file.py b/test/test_grid_file.py index a53e40c4c9..6d7cc7ba3b 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -238,7 +238,7 @@ def test_grid_out_cursor_options(self): cursor_dict.pop('_Cursor__session') cursor_clone_dict = cursor_clone.__dict__.copy() cursor_clone_dict.pop('_Cursor__session') - self.assertEqual(cursor_dict, cursor_clone_dict) + self.assertDictEqual(cursor_dict, cursor_clone_dict) self.assertRaises(NotImplementedError, cursor.add_option, 0) self.assertRaises(NotImplementedError, cursor.remove_option, 0) diff --git a/tools/clean.py b/tools/clean.py index 55896781a4..53729d6406 100644 --- a/tools/clean.py +++ b/tools/clean.py @@ -33,7 +33,7 @@ pass try: - from pymongo import _cmessage # type: ignore + from pymongo import _cmessage # type: ignore[attr-defined] sys.exit("could still import _cmessage") except ImportError: pass From 51691246e9b2ef8446f3716c9ba7bab1a9f4e1ad Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 3 Feb 2022 15:25:14 -0800 Subject: [PATCH 0061/1588] PYTHON-2858 Use OP_MSG to authenticate if server supports OP_MSG (#843) --- test/mockupdb/test_handshake.py | 39 ++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index 34028a637f..c15aaff9b8 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -50,7 +50,7 @@ def respond(r): appname='my app', # For _check_handshake_data() **dict([k_map.get((k, v), (k, v)) for k, v in kwargs.items()])) - + self.addCleanup(client.close) # We have an autoresponder luckily, so no need for `go()`. @@ -217,5 +217,42 @@ def test_handshake_not_either(self): with self.assertRaisesRegex(AssertionError, "does not match"): test_hello_with_option(self, OpMsg) + def test_handshake_max_wire(self): + server = MockupDB() + primary_response = {"hello": 1, "ok": 1, + "minWireVersion": 0, "maxWireVersion": 6} + self.found_auth_msg = False + + def responder(request): + if request.matches(OpMsg, saslStart=1): + self.found_auth_msg = True + # Immediately closes the connection with + # OperationFailure: Server returned an invalid nonce. + request.reply(OpMsgReply(**primary_response, + **{'payload': + b'r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0' + b'1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky' + b'tXdF9r,' + b's=4dcxugMJq2P4hQaDbGXZR8uR3ei' + b'PHrSmh4uhkg==,i=15000', + "saslSupportedMechs": [ + "SCRAM-SHA-1"]})) + else: + return request.reply(**primary_response) + + server.autoresponds(responder) + self.addCleanup(server.stop) + server.run() + client = MongoClient(server.uri, + username='username', + password='password', + ) + self.addCleanup(client.close) + self.assertRaises(OperationFailure, client.db.collection.find_one, + {"a": 1}) + self.assertTrue(self.found_auth_msg, "Could not find authentication " + "command with correct protocol") + + if __name__ == '__main__': unittest.main() From 561ee7cf77fcbdefb9e2f46691f2b2ba4c65198b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 7 Feb 2022 17:33:16 -0800 Subject: [PATCH 0062/1588] PYTHON-3110 Remove use of example.com in CSFLE tests (#848) --- test/test_encryption.py | 51 ++++++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index 8e47d44525..af4165f1d1 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1205,8 +1205,8 @@ def setUp(self): kms_tls_options=KMS_TLS_OPTS) kms_providers_invalid = copy.deepcopy(kms_providers) - kms_providers_invalid['azure']['identityPlatformEndpoint'] = 'example.com:443' - kms_providers_invalid['gcp']['endpoint'] = 'example.com:443' + kms_providers_invalid['azure']['identityPlatformEndpoint'] = 'doesnotexist.invalid:443' + kms_providers_invalid['gcp']['endpoint'] = 'doesnotexist.invalid:443' kms_providers_invalid['kmip']['endpoint'] = 'doesnotexist.local:5698' self.client_encryption_invalid = ClientEncryption( kms_providers=kms_providers_invalid, @@ -1214,7 +1214,8 @@ def setUp(self): key_vault_client=client_context.client, codec_options=OPTS, kms_tls_options=KMS_TLS_OPTS) - self._kmip_host_error = '' + self._kmip_host_error = None + self._invalid_host_error = None def tearDown(self): self.client_encryption.close() @@ -1295,9 +1296,9 @@ def test_06_aws_endpoint_invalid_host(self): "region": "us-east-1", "key": ("arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "example.com" + "endpoint": "doesnotexist.invalid" } - with self.assertRaisesRegex(EncryptionError, 'parse error'): + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): self.client_encryption.create_data_key( 'aws', master_key=master_key) @@ -1309,8 +1310,8 @@ def test_07_azure(self): self.run_test_expected_success('azure', master_key) # The full error should be something like: - # "Invalid JSON in KMS response. HTTP status=404. Error: Got parse error at '<', position 0: 'SPECIAL_EXPECTED'" - with self.assertRaisesRegex(EncryptionError, 'parse error'): + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): self.client_encryption_invalid.create_data_key( 'azure', master_key=master_key) @@ -1326,8 +1327,8 @@ def test_08_gcp_valid_endpoint(self): self.run_test_expected_success('gcp', master_key) # The full error should be something like: - # "Invalid JSON in KMS response. HTTP status=404. Error: Got parse error at '<', position 0: 'SPECIAL_EXPECTED'" - with self.assertRaisesRegex(EncryptionError, 'parse error'): + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): self.client_encryption_invalid.create_data_key( 'gcp', master_key=master_key) @@ -1339,7 +1340,7 @@ def test_09_gcp_invalid_endpoint(self): "location": "global", "keyRing": "key-ring-csfle", "keyName": "key-name-csfle", - "endpoint": "example.com:443"} + "endpoint": "doesnotexist.invalid:443"} # The full error should be something like: # "Invalid KMS response, no access_token returned. HTTP status=200" @@ -1347,22 +1348,30 @@ def test_09_gcp_invalid_endpoint(self): self.client_encryption.create_data_key( 'gcp', master_key=master_key) - def kmip_host_error(self): - if self._kmip_host_error: - return self._kmip_host_error + def dns_error(self, host, port): # The full error should be something like: # "[Errno 8] nodename nor servname provided, or not known" - try: - socket.getaddrinfo('doesnotexist.local', 5698, socket.AF_INET, - socket.SOCK_STREAM) - except Exception as exc: - self._kmip_host_error = re.escape(str(exc)) - return self._kmip_host_error + with self.assertRaises(Exception) as ctx: + socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM) + return re.escape(str(ctx.exception)) + + @property + def invalid_host_error(self): + if self._invalid_host_error is None: + self._invalid_host_error = self.dns_error( + 'doesnotexist.invalid', 443) + return self._invalid_host_error + + @property + def kmip_host_error(self): + if self._kmip_host_error is None: + self._kmip_host_error = self.dns_error('doesnotexist.local', 5698) + return self._kmip_host_error def test_10_kmip_invalid_endpoint(self): key = {'keyId': '1'} self.run_test_expected_success('kmip', key) - with self.assertRaisesRegex(EncryptionError, self.kmip_host_error()): + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): self.client_encryption_invalid.create_data_key('kmip', key) def test_11_kmip_master_key_endpoint(self): @@ -1379,7 +1388,7 @@ def test_11_kmip_master_key_endpoint(self): def test_12_kmip_master_key_invalid_endpoint(self): key = {'keyId': '1', 'endpoint': 'doesnotexist.local:5698'} - with self.assertRaisesRegex(EncryptionError, self.kmip_host_error()): + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): self.client_encryption.create_data_key('kmip', key) From f4cef373283a95b00bc7b78626ae8fda23a472ed Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 7 Feb 2022 19:33:41 -0600 Subject: [PATCH 0063/1588] PYTHON-3064 Add typings to test package (#844) --- .github/workflows/test-python.yml | 3 +- bson/son.py | 2 +- mypy.ini | 10 ++ pymongo/socket_checker.py | 5 +- pymongo/srv_resolver.py | 2 +- pymongo/typings.py | 4 +- test/__init__.py | 29 ++++-- test/auth_aws/test_auth_aws.py | 1 + test/mockupdb/test_cursor_namespace.py | 6 ++ test/mockupdb/test_getmore_sharded.py | 2 +- test/mockupdb/test_handshake.py | 4 +- test/mockupdb/test_mixed_version_sharded.py | 4 +- test/mockupdb/test_op_msg.py | 2 + test/mockupdb/test_op_msg_read_preference.py | 5 +- test/mockupdb/test_reset_and_request_check.py | 12 ++- test/mockupdb/test_slave_okay_sharded.py | 2 +- test/performance/perf_test.py | 9 ++ test/test_auth.py | 6 +- test/test_binary.py | 6 ++ test/test_bson.py | 24 +++-- test/test_bulk.py | 91 ++++++++++--------- test/test_change_stream.py | 40 ++++++++ test/test_client.py | 16 ++-- test/test_cmap.py | 8 +- test/test_code.py | 3 +- test/test_collation.py | 6 ++ test/test_collection.py | 53 ++++++----- test/test_command_monitoring_legacy.py | 2 + test/test_common.py | 10 +- ...nnections_survive_primary_stepdown_spec.py | 6 +- test/test_crud_v1.py | 6 +- test/test_cursor.py | 20 ++-- test/test_custom_types.py | 43 +++++++-- test/test_database.py | 23 +++-- test/test_dbref.py | 7 +- test/test_decimal128.py | 1 + test/test_discovery_and_monitoring.py | 2 + test/test_encryption.py | 39 +++++--- test/test_examples.py | 7 ++ test/test_grid_file.py | 3 + test/test_gridfs.py | 35 +++++-- test/test_gridfs_bucket.py | 6 +- test/test_gridfs_spec.py | 3 + test/test_json_util.py | 4 +- test/test_max_staleness.py | 2 +- test/test_monitor.py | 2 +- test/test_monitoring.py | 14 ++- test/test_objectid.py | 4 +- test/test_ocsp_cache.py | 12 ++- test/test_raw_bson.py | 3 + test/test_read_concern.py | 1 + test/test_read_preferences.py | 22 +++-- test/test_read_write_concern_spec.py | 17 ++-- test/test_retryable_writes.py | 5 + test/test_sdam_monitoring_spec.py | 5 + test/test_server_selection.py | 5 +- test/test_server_selection_in_window.py | 2 +- test/test_session.py | 16 +++- test/test_srv_polling.py | 16 ++-- test/test_ssl.py | 34 ++++--- test/test_streaming_protocol.py | 2 +- test/test_transactions.py | 18 ++-- test/test_uri_parser.py | 4 +- test/test_write_concern.py | 2 +- test/unified_format.py | 9 +- test/utils.py | 27 ++---- test/utils_spec_runner.py | 9 +- 67 files changed, 542 insertions(+), 261 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 3ad5aa79fe..ca1845e2cd 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -46,4 +46,5 @@ jobs: pip install -e ".[zstd, srv]" - name: Run mypy run: | - mypy --install-types --non-interactive bson gridfs tools + mypy --install-types --non-interactive bson gridfs tools pymongo + mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index test diff --git a/bson/son.py b/bson/son.py index 7207367f3d..bb39644637 100644 --- a/bson/son.py +++ b/bson/son.py @@ -28,7 +28,7 @@ # This is essentially the same as re._pattern_type RE_TYPE: Type[Pattern[Any]] = type(re.compile("")) -_Key = TypeVar("_Key", bound=str) +_Key = TypeVar("_Key") _Value = TypeVar("_Value") _T = TypeVar("_T") diff --git a/mypy.ini b/mypy.ini index 926bf95745..91b1121cd5 100644 --- a/mypy.ini +++ b/mypy.ini @@ -11,6 +11,9 @@ warn_unused_configs = true warn_unused_ignores = true warn_redundant_casts = true +[mypy-gevent.*] +ignore_missing_imports = True + [mypy-kerberos.*] ignore_missing_imports = True @@ -29,5 +32,12 @@ ignore_missing_imports = True [mypy-snappy.*] ignore_missing_imports = True +[mypy-test.*] +allow_redefinition = true +allow_untyped_globals = true + [mypy-winkerberos.*] ignore_missing_imports = True + +[mypy-xmlrunner.*] +ignore_missing_imports = True diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py index 9eb3d5f084..42db7b9373 100644 --- a/pymongo/socket_checker.py +++ b/pymongo/socket_checker.py @@ -16,9 +16,8 @@ import errno import select -import socket import sys -from typing import Any, Optional +from typing import Any, Optional, Union # PYTHON-2320: Jython does not fully support poll on SSL sockets, # https://bugs.jython.org/issue2900 @@ -43,7 +42,7 @@ def __init__(self) -> None: else: self._poller = None - def select(self, sock: Any, read: bool = False, write: bool = False, timeout: int = 0) -> bool: + def select(self, sock: Any, read: bool = False, write: bool = False, timeout: Optional[float] = 0) -> bool: """Select for reads or writes with a timeout in seconds (or None). Returns True if the socket is readable/writable, False on timeout. diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index d9ee7b7c8a..989e79131c 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -39,7 +39,7 @@ def maybe_decode(text): def _resolve(*args, **kwargs): if hasattr(resolver, 'resolve'): # dnspython >= 2 - return resolver.resolve(*args, **kwargs) # type: ignore + return resolver.resolve(*args, **kwargs) # dnspython 1.X return resolver.query(*args, **kwargs) diff --git a/pymongo/typings.py b/pymongo/typings.py index ae5aec3213..767eed36c5 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -14,7 +14,7 @@ """Type aliases used by PyMongo""" from typing import (TYPE_CHECKING, Any, Dict, List, Mapping, MutableMapping, Optional, - Tuple, Type, TypeVar, Union) + Sequence, Tuple, Type, TypeVar, Union) if TYPE_CHECKING: from bson.raw_bson import RawBSONDocument @@ -25,5 +25,5 @@ _Address = Tuple[str, Optional[int]] _CollationIn = Union[Mapping[str, Any], "Collation"] _DocumentIn = Union[MutableMapping[str, Any], "RawBSONDocument"] -_Pipeline = List[Mapping[str, Any]] +_Pipeline = Sequence[Mapping[str, Any]] _DocumentType = TypeVar('_DocumentType', Mapping[str, Any], MutableMapping[str, Any], Dict[str, Any]) diff --git a/test/__init__.py b/test/__init__.py index ab53b7fdc5..c02eb97949 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -40,6 +40,7 @@ from contextlib import contextmanager from functools import wraps +from typing import Dict, no_type_check from unittest import SkipTest import pymongo @@ -48,7 +49,9 @@ from bson.son import SON from pymongo import common, message from pymongo.common import partition_node +from pymongo.database import Database from pymongo.hello import HelloCompat +from pymongo.mongo_client import MongoClient from pymongo.server_api import ServerApi from pymongo.ssl_support import HAVE_SSL, _ssl from pymongo.uri_parser import parse_uri @@ -86,7 +89,7 @@ os.path.join(CERT_PATH, 'client.pem')) CA_PEM = os.environ.get('CA_PEM', os.path.join(CERT_PATH, 'ca.pem')) -TLS_OPTIONS = dict(tls=True) +TLS_OPTIONS: Dict = dict(tls=True) if CLIENT_PEM: TLS_OPTIONS['tlsCertificateKeyFile'] = CLIENT_PEM if CA_PEM: @@ -102,13 +105,13 @@ # Remove after PYTHON-2712 from pymongo import pool pool._MOCK_SERVICE_ID = True - res = parse_uri(SINGLE_MONGOS_LB_URI) + res = parse_uri(SINGLE_MONGOS_LB_URI or "") host, port = res['nodelist'][0] db_user = res['username'] or db_user db_pwd = res['password'] or db_pwd elif TEST_SERVERLESS: TEST_LOADBALANCER = True - res = parse_uri(SINGLE_MONGOS_LB_URI) + res = parse_uri(SINGLE_MONGOS_LB_URI or "") host, port = res['nodelist'][0] db_user = res['username'] or db_user db_pwd = res['password'] or db_pwd @@ -184,6 +187,7 @@ def enable(self): def __enter__(self): self.enable() + @no_type_check def disable(self): common.HEARTBEAT_FREQUENCY = self.old_heartbeat_frequency common.MIN_HEARTBEAT_INTERVAL = self.old_min_heartbeat_interval @@ -224,6 +228,8 @@ def _all_users(db): class ClientContext(object): + client: MongoClient + MULTI_MONGOS_LB_URI = MULTI_MONGOS_LB_URI def __init__(self): @@ -247,9 +253,9 @@ def __init__(self): self.tls = False self.tlsCertificateKeyFile = False self.server_is_resolvable = is_server_resolvable() - self.default_client_options = {} + self.default_client_options: Dict = {} self.sessions_enabled = False - self.client = None + self.client = None # type: ignore self.conn_lock = threading.Lock() self.is_data_lake = False self.load_balancer = TEST_LOADBALANCER @@ -340,6 +346,7 @@ def _init_client(self): try: self.cmd_line = self.client.admin.command('getCmdLineOpts') except pymongo.errors.OperationFailure as e: + assert e.details is not None msg = e.details.get('errmsg', '') if e.code == 13 or 'unauthorized' in msg or 'login' in msg: # Unauthorized. @@ -418,6 +425,7 @@ def _init_client(self): else: self.server_parameters = self.client.admin.command( 'getParameter', '*') + assert self.cmd_line is not None if 'enableTestCommands=1' in self.cmd_line['argv']: self.test_commands_enabled = True elif 'parsed' in self.cmd_line: @@ -436,7 +444,8 @@ def _init_client(self): self.mongoses.append(address) if not self.serverless: # Check for another mongos on the next port. - next_address = address[0], address[1] + 1 + assert address is not None + next_address = address[0], address[1] + 1 mongos_client = self._connect( *next_address, **self.default_client_options) if mongos_client: @@ -496,6 +505,7 @@ def _check_user_provided(self): try: return db_user in _all_users(client.admin) except pymongo.errors.OperationFailure as e: + assert e.details is not None msg = e.details.get('errmsg', '') if e.code == 18 or 'auth fails' in msg: # Auth failed. @@ -505,6 +515,7 @@ def _check_user_provided(self): def _server_started_with_auth(self): # MongoDB >= 2.0 + assert self.cmd_line is not None if 'parsed' in self.cmd_line: parsed = self.cmd_line['parsed'] # MongoDB >= 2.6 @@ -525,6 +536,7 @@ def _server_started_with_ipv6(self): if not socket.has_ipv6: return False + assert self.cmd_line is not None if 'parsed' in self.cmd_line: if not self.cmd_line['parsed'].get('net', {}).get('ipv6'): return False @@ -932,6 +944,9 @@ def fail_point(self, command_args): class IntegrationTest(PyMongoTestCase): """Base class for TestCases that need a connection to MongoDB to pass.""" + client: MongoClient + db: Database + credentials: Dict[str, str] @classmethod @client_context.require_connection @@ -1073,7 +1088,7 @@ def run(self, test): if HAVE_XML: - class PymongoXMLTestRunner(XMLTestRunner): + class PymongoXMLTestRunner(XMLTestRunner): # type: ignore[misc] def run(self, test): setup() result = super(PymongoXMLTestRunner, self).run(test) diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index 0522201097..f096d0569a 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -26,6 +26,7 @@ class TestAuthAWS(unittest.TestCase): + uri: str @classmethod def setUpClass(cls): diff --git a/test/mockupdb/test_cursor_namespace.py b/test/mockupdb/test_cursor_namespace.py index 10605601cf..a52e2fb4e7 100644 --- a/test/mockupdb/test_cursor_namespace.py +++ b/test/mockupdb/test_cursor_namespace.py @@ -21,6 +21,9 @@ class TestCursorNamespace(unittest.TestCase): + server: MockupDB + client: MongoClient + @classmethod def setUpClass(cls): cls.server = MockupDB(auto_ismaster={'maxWireVersion': 6}) @@ -69,6 +72,9 @@ def op(): class TestKillCursorsNamespace(unittest.TestCase): + server: MockupDB + client: MongoClient + @classmethod def setUpClass(cls): cls.server = MockupDB(auto_ismaster={'maxWireVersion': 6}) diff --git a/test/mockupdb/test_getmore_sharded.py b/test/mockupdb/test_getmore_sharded.py index 5461a13e35..0d91583378 100644 --- a/test/mockupdb/test_getmore_sharded.py +++ b/test/mockupdb/test_getmore_sharded.py @@ -27,7 +27,7 @@ def test_getmore_sharded(self): servers = [MockupDB(), MockupDB()] # Collect queries to either server in one queue. - q = Queue() + q: Queue = Queue() for server in servers: server.subscribe(q.put) server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index c15aaff9b8..29313de8c2 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -48,7 +48,7 @@ def respond(r): ServerApiVersion.V1))} client = MongoClient("mongodb://"+primary.address_string, appname='my app', # For _check_handshake_data() - **dict([k_map.get((k, v), (k, v)) for k, v + **dict([k_map.get((k, v), (k, v)) for k, v # type: ignore[arg-type] in kwargs.items()])) self.addCleanup(client.close) @@ -58,7 +58,7 @@ def respond(r): # We do this checking here rather than in the autoresponder `respond()` # because it runs in another Python thread so there are some funky things - # with error handling within that thread, and we want to be able to use + # with error handling within that thread, and we want to be able to use # self.assertRaises(). self.handshake_req.assert_matches(protocol(hello, **kwargs)) _check_handshake_data(self.handshake_req) diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index 2b6ea6a513..c3af907404 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -30,7 +30,7 @@ def setup_server(self, upgrade): self.mongos_old, self.mongos_new = MockupDB(), MockupDB() # Collect queries to either server in one queue. - self.q = Queue() + self.q: Queue = Queue() for server in self.mongos_old, self.mongos_new: server.subscribe(self.q.put) server.autoresponds('getlasterror') @@ -59,7 +59,7 @@ def create_mixed_version_sharded_test(upgrade): def test(self): self.setup_server(upgrade) start = time.time() - servers_used = set() + servers_used: set = set() while len(servers_used) < 2: go(upgrade.function, self.client) request = self.q.get(timeout=1) diff --git a/test/mockupdb/test_op_msg.py b/test/mockupdb/test_op_msg.py index 35e70cebfc..78397a3336 100755 --- a/test/mockupdb/test_op_msg.py +++ b/test/mockupdb/test_op_msg.py @@ -233,6 +233,8 @@ class TestOpMsg(unittest.TestCase): + server: MockupDB + client: MongoClient @classmethod def setUpClass(cls): diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index d9adfe17eb..eb3a14fa01 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -14,6 +14,7 @@ import copy import itertools +from typing import Any from mockupdb import MockupDB, going, CommandBase from pymongo import MongoClient, ReadPreference @@ -27,6 +28,8 @@ class OpMsgReadPrefBase(unittest.TestCase): single_mongod = False + primary: MockupDB + secondary: MockupDB @classmethod def setUpClass(cls): @@ -142,7 +145,7 @@ def test(self): tag_sets=None) client = self.setup_client(read_preference=pref) - + expected_pref: Any if operation.op_type == 'always-use-secondary': expected_server = self.secondary expected_pref = ReadPreference.SECONDARY diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py index 86c2085e39..48f9486544 100755 --- a/test/mockupdb/test_reset_and_request_check.py +++ b/test/mockupdb/test_reset_and_request_check.py @@ -27,7 +27,7 @@ class TestResetAndRequestCheck(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestResetAndRequestCheck, self).__init__(*args, **kwargs) - self.ismaster_time = 0 + self.ismaster_time = 0.0 self.client = None self.server = None @@ -45,7 +45,7 @@ def responder(request): kwargs = {'socketTimeoutMS': 100} # Disable retryable reads when pymongo supports it. kwargs['retryReads'] = False - self.client = MongoClient(self.server.uri, **kwargs) + self.client = MongoClient(self.server.uri, **kwargs) # type: ignore wait_until(lambda: self.client.nodes, 'connect to standalone') def tearDown(self): @@ -56,6 +56,8 @@ def _test_disconnect(self, operation): # Application operation fails. Test that client resets server # description and does *not* schedule immediate check. self.setup_server() + assert self.server is not None + assert self.client is not None # Network error on application operation. with self.assertRaises(ConnectionFailure): @@ -81,6 +83,8 @@ def _test_timeout(self, operation): # Application operation times out. Test that client does *not* reset # server description and does *not* schedule immediate check. self.setup_server() + assert self.server is not None + assert self.client is not None with self.assertRaises(ConnectionFailure): with going(operation.function, self.client): @@ -91,6 +95,7 @@ def _test_timeout(self, operation): # Server is *not* Unknown. topology = self.client._topology server = topology.select_server_by_address(self.server.address, 0) + assert server is not None self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) after = self.ismaster_time @@ -99,6 +104,8 @@ def _test_timeout(self, operation): def _test_not_master(self, operation): # Application operation gets a "not master" error. self.setup_server() + assert self.server is not None + assert self.client is not None with self.assertRaises(ConnectionFailure): with going(operation.function, self.client): @@ -110,6 +117,7 @@ def _test_not_master(self, operation): # Server is rediscovered. topology = self.client._topology server = topology.select_server_by_address(self.server.address, 0) + assert server is not None self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) after = self.ismaster_time diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py index 63bb0fe303..07e05bfece 100644 --- a/test/mockupdb/test_slave_okay_sharded.py +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -37,7 +37,7 @@ def setup_server(self): self.mongos1, self.mongos2 = MockupDB(), MockupDB() # Collect queries to either server in one queue. - self.q = Queue() + self.q: Queue = Queue() for server in self.mongos1, self.mongos2: server.subscribe(self.q.put) server.run() diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index 84c6baf60d..7effa1c1ee 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -67,6 +67,10 @@ def __exit__(self, *args): class PerformanceTest(object): + dataset: Any + data_size: Any + do_task: Any + fail: Any @classmethod def setUpClass(cls): @@ -386,6 +390,7 @@ def mp_map(map_func, files): def insert_json_file(filename): + assert proc_client is not None with open(filename, 'r') as data: coll = proc_client.perftest.corpus coll.insert_many([json.loads(line) for line in data]) @@ -398,11 +403,13 @@ def insert_json_file_with_file_id(filename): doc = json.loads(line) doc['file'] = filename documents.append(doc) + assert proc_client is not None coll = proc_client.perftest.corpus coll.insert_many(documents) def read_json_file(filename): + assert proc_client is not None coll = proc_client.perftest.corpus temp = tempfile.TemporaryFile(mode='w') try: @@ -414,6 +421,7 @@ def read_json_file(filename): def insert_gridfs_file(filename): + assert proc_client is not None bucket = GridFSBucket(proc_client.perftest) with open(filename, 'rb') as gfile: @@ -421,6 +429,7 @@ def insert_gridfs_file(filename): def read_gridfs_file(filename): + assert proc_client is not None bucket = GridFSBucket(proc_client.perftest) temp = tempfile.TemporaryFile() diff --git a/test/test_auth.py b/test/test_auth.py index 35f198574b..5b4ef0c51f 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -76,6 +76,8 @@ def run(self): class TestGSSAPI(unittest.TestCase): + mech_properties: str + service_realm_required: bool @classmethod def setUpClass(cls): @@ -116,6 +118,7 @@ def test_credentials_hashing(self): @ignore_deprecations def test_gssapi_simple(self): + assert GSSAPI_PRINCIPAL is not None if GSSAPI_PASS is not None: uri = ('mongodb://%s:%s@%s:%d/?authMechanism=' 'GSSAPI' % (quote_plus(GSSAPI_PRINCIPAL), @@ -264,6 +267,8 @@ def test_sasl_plain(self): authMechanism='PLAIN') client.ldap.test.find_one() + assert SASL_USER is not None + assert SASL_PASS is not None uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;' 'authSource=%s' % (quote_plus(SASL_USER), quote_plus(SASL_PASS), @@ -540,7 +545,6 @@ def test_cache(self): self.assertIsInstance(iterations, int) def test_scram_threaded(self): - coll = client_context.client.db.test coll.drop() coll.insert_one({'_id': 1}) diff --git a/test/test_binary.py b/test/test_binary.py index e6b681fc51..4bbda0c9d4 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -41,6 +41,8 @@ class TestBinary(unittest.TestCase): + csharp_data: bytes + java_data: bytes @classmethod def setUpClass(cls): @@ -354,6 +356,8 @@ def test_buffer_protocol(self): class TestUuidSpecExplicitCoding(unittest.TestCase): + uuid: uuid.UUID + @classmethod def setUpClass(cls): super(TestUuidSpecExplicitCoding, cls).setUpClass() @@ -457,6 +461,8 @@ def test_decoding_4(self): class TestUuidSpecImplicitCoding(IntegrationTest): + uuid: uuid.UUID + @classmethod def setUpClass(cls): super(TestUuidSpecImplicitCoding, cls).setUpClass() diff --git a/test/test_bson.py b/test/test_bson.py index eb4f4e47c2..7052042ca8 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -186,7 +186,7 @@ def test_encode_then_decode_any_mapping_legacy(self): decoder=lambda *args: BSON(args[0]).decode(*args[1:])) def test_encoding_defaultdict(self): - dct = collections.defaultdict(dict, [('foo', 'bar')]) + dct = collections.defaultdict(dict, [('foo', 'bar')]) # type: ignore[arg-type] encode(dct) self.assertEqual(dct, collections.defaultdict(dict, [('foo', 'bar')])) @@ -302,7 +302,7 @@ def test_basic_decode(self): def test_decode_all_buffer_protocol(self): docs = [{'foo': 'bar'}, {}] - bs = b"".join(map(encode, docs)) + bs = b"".join(map(encode, docs)) # type: ignore[arg-type] self.assertEqual(docs, decode_all(bytearray(bs))) self.assertEqual(docs, decode_all(memoryview(bs))) self.assertEqual(docs, decode_all(memoryview(b'1' + bs + b'1')[1:-1])) @@ -530,7 +530,9 @@ def test_large_datetime_truncation(self): def test_aware_datetime(self): aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone")) - as_utc = (aware - aware.utcoffset()).replace(tzinfo=utc) + offset = aware.utcoffset() + assert offset is not None + as_utc = (aware - offset).replace(tzinfo=utc) self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45, tzinfo=utc), as_utc) after = decode(encode({"date": aware}), CodecOptions(tz_aware=True))[ @@ -591,7 +593,9 @@ def test_local_datetime(self): def test_naive_decode(self): aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone")) - naive_utc = (aware - aware.utcoffset()).replace(tzinfo=None) + offset = aware.utcoffset() + assert offset is not None + naive_utc = (aware - offset).replace(tzinfo=None) self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45), naive_utc) after = decode(encode({"date": aware}))["date"] self.assertEqual(None, after.tzinfo) @@ -603,9 +607,9 @@ def test_dst(self): @unittest.skip('Disabled due to http://bugs.python.org/issue25222') def test_bad_encode(self): - evil_list = {'a': []} + evil_list: dict = {'a': []} evil_list['a'].append(evil_list) - evil_dict = {} + evil_dict: dict = {} evil_dict['a'] = evil_dict for evil_data in [evil_dict, evil_list]: self.assertRaises(Exception, encode, evil_data) @@ -1039,8 +1043,8 @@ def round_trip_pickle(self, obj, pickled_with_older): def test_regex_pickling(self): reg = Regex(".?") - pickled_with_3 = (b'\x80\x04\x959\x00\x00\x00\x00\x00\x00\x00\x8c\n' - b'bson.regex\x94\x8c\x05Regex\x94\x93\x94)\x81\x94}' + pickled_with_3 = (b'\x80\x04\x959\x00\x00\x00\x00\x00\x00\x00\x8c\n' + b'bson.regex\x94\x8c\x05Regex\x94\x93\x94)\x81\x94}' b'\x94(\x8c\x07pattern\x94\x8c\x02.?\x94\x8c\x05flag' b's\x94K\x00ub.') self.round_trip_pickle(reg, pickled_with_3) @@ -1083,8 +1087,8 @@ def test_minkey_pickling(self): def test_maxkey_pickling(self): maxk = MaxKey() - pickled_with_3 = (b'\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c' - b'\x0cbson.max_key\x94\x8c\x06MaxKey\x94\x93\x94)' + pickled_with_3 = (b'\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c' + b'\x0cbson.max_key\x94\x8c\x06MaxKey\x94\x93\x94)' b'\x81\x94.') self.round_trip_pickle(maxk, pickled_with_3) diff --git a/test/test_bulk.py b/test/test_bulk.py index 08740a437e..a895dfddc3 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -16,13 +16,15 @@ import sys import uuid -from bson.binary import UuidRepresentation -from bson.codec_options import CodecOptions + +from pymongo.mongo_client import MongoClient sys.path[0:0] = [""] -from bson import Binary +from bson.binary import Binary, UuidRepresentation +from bson.codec_options import CodecOptions from bson.objectid import ObjectId +from pymongo.collection import Collection from pymongo.common import partition_node from pymongo.errors import (BulkWriteError, ConfigurationError, @@ -40,6 +42,8 @@ class BulkTestBase(IntegrationTest): + coll: Collection + coll_w0: Collection @classmethod def setUpClass(cls): @@ -280,6 +284,7 @@ def test_upsert(self): upsert=True)]) self.assertEqualResponse(expected, result.bulk_api_result) self.assertEqual(1, result.upserted_count) + assert result.upserted_ids is not None self.assertEqual(1, len(result.upserted_ids)) self.assertTrue(isinstance(result.upserted_ids.get(0), ObjectId)) @@ -341,11 +346,11 @@ def test_bulk_write_invalid_arguments(self): # The requests argument must be a list. generator = (InsertOne({}) for _ in range(10)) with self.assertRaises(TypeError): - self.coll.bulk_write(generator) + self.coll.bulk_write(generator) # type: ignore[arg-type] # Document is not wrapped in a bulk write operation. with self.assertRaises(TypeError): - self.coll.bulk_write([{}]) + self.coll.bulk_write([{}]) # type: ignore[list-item] def test_upsert_large(self): big = 'a' * (client_context.max_bson_size - 37) @@ -425,7 +430,7 @@ def test_upsert_uuid_unspecified(self): def test_upsert_uuid_standard_subdocuments(self): options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) coll = self.coll.with_options(codec_options=options) - ids = [ + ids: list = [ {'f': Binary(bytes(i)), 'f2': uuid.uuid4()} for i in range(3) ] @@ -472,7 +477,7 @@ def test_single_ordered_batch(self): def test_single_error_ordered_batch(self): self.coll.create_index('a', unique=True) self.addCleanup(self.coll.drop_index, [('a', 1)]) - requests = [ + requests: list = [ InsertOne({'b': 1, 'a': 1}), UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), InsertOne({'b': 3, 'a': 2}), @@ -506,7 +511,7 @@ def test_single_error_ordered_batch(self): def test_multiple_error_ordered_batch(self): self.coll.create_index('a', unique=True) self.addCleanup(self.coll.drop_index, [('a', 1)]) - requests = [ + requests: list = [ InsertOne({'b': 1, 'a': 1}), UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), UpdateOne({'b': 3}, {'$set': {'a': 2}}, upsert=True), @@ -542,7 +547,7 @@ def test_multiple_error_ordered_batch(self): result) def test_single_unordered_batch(self): - requests = [ + requests: list = [ InsertOne({'a': 1}), UpdateOne({'a': 1}, {'$set': {'b': 1}}), UpdateOne({'a': 2}, {'$set': {'b': 2}}, upsert=True), @@ -564,7 +569,7 @@ def test_single_unordered_batch(self): def test_single_error_unordered_batch(self): self.coll.create_index('a', unique=True) self.addCleanup(self.coll.drop_index, [('a', 1)]) - requests = [ + requests: list = [ InsertOne({'b': 1, 'a': 1}), UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), InsertOne({'b': 3, 'a': 2}), @@ -599,7 +604,7 @@ def test_single_error_unordered_batch(self): def test_multiple_error_unordered_batch(self): self.coll.create_index('a', unique=True) self.addCleanup(self.coll.drop_index, [('a', 1)]) - requests = [ + requests: list = [ InsertOne({'b': 1, 'a': 1}), UpdateOne({'b': 2}, {'$set': {'a': 3}}, upsert=True), UpdateOne({'b': 3}, {'$set': {'a': 4}}, upsert=True), @@ -662,7 +667,7 @@ def test_large_inserts_ordered(self): self.coll.delete_many({}) big = 'x' * (1024 * 1024 * 4) - result = self.coll.bulk_write([ + write_result = self.coll.bulk_write([ InsertOne({'a': 1, 'big': big}), InsertOne({'a': 2, 'big': big}), InsertOne({'a': 3, 'big': big}), @@ -671,7 +676,7 @@ def test_large_inserts_ordered(self): InsertOne({'a': 6, 'big': big}), ]) - self.assertEqual(6, result.inserted_count) + self.assertEqual(6, write_result.inserted_count) self.assertEqual(6, self.coll.count_documents({})) def test_large_inserts_unordered(self): @@ -685,12 +690,12 @@ def test_large_inserts_unordered(self): try: self.coll.bulk_write(requests, ordered=False) except BulkWriteError as exc: - result = exc.details + details = exc.details self.assertEqual(exc.code, 65) else: self.fail("Error not raised") - self.assertEqual(2, result['nInserted']) + self.assertEqual(2, details['nInserted']) self.coll.delete_many({}) @@ -741,7 +746,7 @@ def tearDown(self): self.coll.delete_many({}) def test_no_results_ordered_success(self): - requests = [ + requests: list = [ InsertOne({'a': 1}), UpdateOne({'a': 3}, {'$set': {'b': 1}}, upsert=True), InsertOne({'a': 2}), @@ -755,7 +760,7 @@ def test_no_results_ordered_success(self): 'removed {"_id": 1}') def test_no_results_ordered_failure(self): - requests = [ + requests: list = [ InsertOne({'_id': 1}), UpdateOne({'_id': 3}, {'$set': {'b': 1}}, upsert=True), InsertOne({'_id': 2}), @@ -771,7 +776,7 @@ def test_no_results_ordered_failure(self): self.assertEqual({'_id': 1}, self.coll.find_one({'_id': 1})) def test_no_results_unordered_success(self): - requests = [ + requests: list = [ InsertOne({'a': 1}), UpdateOne({'a': 3}, {'$set': {'b': 1}}, upsert=True), InsertOne({'a': 2}), @@ -785,7 +790,7 @@ def test_no_results_unordered_success(self): 'removed {"_id": 1}') def test_no_results_unordered_failure(self): - requests = [ + requests: list = [ InsertOne({'_id': 1}), UpdateOne({'_id': 3}, {'$set': {'b': 1}}, upsert=True), InsertOne({'_id': 2}), @@ -832,13 +837,15 @@ def test_no_remove(self): class TestBulkWriteConcern(BulkTestBase): + w: Optional[int] + secondary: MongoClient @classmethod def setUpClass(cls): super(TestBulkWriteConcern, cls).setUpClass() cls.w = client_context.w cls.secondary = None - if cls.w > 1: + if cls.w is not None and cls.w > 1: for member in client_context.hello['hosts']: if member != client_context.hello['primary']: cls.secondary = single_client(*partition_node(member)) @@ -886,7 +893,7 @@ def test_write_concern_failure_ordered(self): try: self.cause_wtimeout(requests, ordered=True) except BulkWriteError as exc: - result = exc.details + details = exc.details self.assertEqual(exc.code, 65) else: self.fail("Error not raised") @@ -899,13 +906,13 @@ def test_write_concern_failure_ordered(self): 'nRemoved': 0, 'upserted': [], 'writeErrors': []}, - result) + details) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(result['writeConcernErrors']) > 0) + self.assertTrue(len(details['writeConcernErrors']) > 0) - failed = result['writeConcernErrors'][0] + failed = details['writeConcernErrors'][0] self.assertEqual(64, failed['code']) self.assertTrue(isinstance(failed['errmsg'], str)) @@ -924,7 +931,7 @@ def test_write_concern_failure_ordered(self): try: self.cause_wtimeout(requests, ordered=True) except BulkWriteError as exc: - result = exc.details + details = exc.details self.assertEqual(exc.code, 65) else: self.fail("Error not raised") @@ -941,10 +948,10 @@ def test_write_concern_failure_ordered(self): 'code': 11000, 'errmsg': '...', 'op': {'_id': '...', 'a': 1}}]}, - result) + details) - self.assertTrue(len(result['writeConcernErrors']) > 1) - failed = result['writeErrors'][0] + self.assertTrue(len(details['writeConcernErrors']) > 1) + failed = details['writeErrors'][0] self.assertTrue("duplicate" in failed['errmsg']) @client_context.require_replica_set @@ -966,17 +973,17 @@ def test_write_concern_failure_unordered(self): try: self.cause_wtimeout(requests, ordered=False) except BulkWriteError as exc: - result = exc.details + details = exc.details self.assertEqual(exc.code, 65) else: self.fail("Error not raised") - self.assertEqual(2, result['nInserted']) - self.assertEqual(1, result['nUpserted']) - self.assertEqual(0, len(result['writeErrors'])) + self.assertEqual(2, details['nInserted']) + self.assertEqual(1, details['nUpserted']) + self.assertEqual(0, len(details['writeErrors'])) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(result['writeConcernErrors']) > 1) + self.assertTrue(len(details['writeConcernErrors']) > 1) self.coll.delete_many({}) self.coll.create_index('a', unique=True) @@ -984,7 +991,7 @@ def test_write_concern_failure_unordered(self): # Fail due to write concern support as well # as duplicate key error on unordered batch. - requests = [ + requests: list = [ InsertOne({'a': 1}), UpdateOne({'a': 3}, {'$set': {'a': 3, 'b': 1}}, upsert=True), InsertOne({'a': 1}), @@ -993,29 +1000,29 @@ def test_write_concern_failure_unordered(self): try: self.cause_wtimeout(requests, ordered=False) except BulkWriteError as exc: - result = exc.details + details = exc.details self.assertEqual(exc.code, 65) else: self.fail("Error not raised") - self.assertEqual(2, result['nInserted']) - self.assertEqual(1, result['nUpserted']) - self.assertEqual(1, len(result['writeErrors'])) + self.assertEqual(2, details['nInserted']) + self.assertEqual(1, details['nUpserted']) + self.assertEqual(1, len(details['writeErrors'])) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(result['writeConcernErrors']) > 1) + self.assertTrue(len(details['writeConcernErrors']) > 1) - failed = result['writeErrors'][0] + failed = details['writeErrors'][0] self.assertEqual(2, failed['index']) self.assertEqual(11000, failed['code']) self.assertTrue(isinstance(failed['errmsg'], str)) self.assertEqual(1, failed['op']['a']) - failed = result['writeConcernErrors'][0] + failed = details['writeConcernErrors'][0] self.assertEqual(64, failed['code']) self.assertTrue(isinstance(failed['errmsg'], str)) - upserts = result['upserted'] + upserts = details['upserted'] self.assertEqual(1, len(upserts)) self.assertEqual(1, upserts[0]['index']) self.assertTrue(upserts[0].get('_id')) diff --git a/test/test_change_stream.py b/test/test_change_stream.py index a49f6972b2..655b99e801 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -24,6 +24,7 @@ import uuid from itertools import product +from typing import no_type_check sys.path[0:0] = [''] @@ -121,6 +122,7 @@ def kill_change_stream_cursor(self, change_stream): class APITestsMixin(object): + @no_type_check def test_watch(self): with self.change_stream( [{'$project': {'foo': 0}}], full_document='updateLookup', @@ -145,6 +147,7 @@ def test_watch(self): with self.change_stream(resume_after=resume_token): pass + @no_type_check def test_try_next(self): # ChangeStreams only read majority committed data so use w:majority. coll = self.watched_collection().with_options( @@ -161,6 +164,7 @@ def test_try_next(self): wait_until(lambda: stream.try_next() is not None, "get change from try_next") + @no_type_check def test_try_next_runs_one_getmore(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) @@ -216,6 +220,7 @@ def test_try_next_runs_one_getmore(self): set(["getMore"])) self.assertIsNone(stream.try_next()) + @no_type_check def test_batch_size_is_honored(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) @@ -245,6 +250,7 @@ def test_batch_size_is_honored(self): self.assertEqual(expected[key], cmd[key]) # $changeStream.startAtOperationTime was added in 4.0.0. + @no_type_check @client_context.require_version_min(4, 0, 0) def test_start_at_operation_time(self): optime = self.get_start_at_operation_time() @@ -258,6 +264,7 @@ def test_start_at_operation_time(self): for i in range(ndocs): cs.next() + @no_type_check def _test_full_pipeline(self, expected_cs_stage): client, listener = self.client_with_listener("aggregate") results = listener.results @@ -273,12 +280,14 @@ def _test_full_pipeline(self, expected_cs_stage): {'$project': {'foo': 0}}], command.command['pipeline']) + @no_type_check def test_full_pipeline(self): """$changeStream must be the first stage in a change stream pipeline sent to the server. """ self._test_full_pipeline({}) + @no_type_check def test_iteration(self): with self.change_stream(batch_size=2) as change_stream: num_inserted = 10 @@ -292,6 +301,7 @@ def test_iteration(self): break self._test_invalidate_stops_iteration(change_stream) + @no_type_check def _test_next_blocks(self, change_stream): inserted_doc = {'_id': ObjectId()} changes = [] @@ -311,18 +321,21 @@ def _test_next_blocks(self, change_stream): self.assertEqual(changes[0]['operationType'], 'insert') self.assertEqual(changes[0]['fullDocument'], inserted_doc) + @no_type_check def test_next_blocks(self): """Test that next blocks until a change is readable""" # Use a short await time to speed up the test. with self.change_stream(max_await_time_ms=250) as change_stream: self._test_next_blocks(change_stream) + @no_type_check def test_aggregate_cursor_blocks(self): """Test that an aggregate cursor blocks until a change is readable.""" with self.watched_collection().aggregate( [{'$changeStream': {}}], maxAwaitTimeMS=250) as change_stream: self._test_next_blocks(change_stream) + @no_type_check def test_concurrent_close(self): """Ensure a ChangeStream can be closed from another thread.""" # Use a short await time to speed up the test. @@ -338,6 +351,7 @@ def iterate_cursor(): t.join(3) self.assertFalse(t.is_alive()) + @no_type_check def test_unknown_full_document(self): """Must rely on the server to raise an error on unknown fullDocument. """ @@ -347,6 +361,7 @@ def test_unknown_full_document(self): except OperationFailure: pass + @no_type_check def test_change_operations(self): """Test each operation type.""" expected_ns = {'db': self.watched_collection().database.name, @@ -393,6 +408,7 @@ def test_change_operations(self): # Invalidate. self._test_get_invalidate_event(change_stream) + @no_type_check @client_context.require_version_min(4, 1, 1) def test_start_after(self): resume_token = self.get_resume_token(invalidate=True) @@ -408,6 +424,7 @@ def test_start_after(self): self.assertEqual(change['operationType'], 'insert') self.assertEqual(change['fullDocument'], {'_id': 2}) + @no_type_check @client_context.require_version_min(4, 1, 1) def test_start_after_resume_process_with_changes(self): resume_token = self.get_resume_token(invalidate=True) @@ -427,6 +444,7 @@ def test_start_after_resume_process_with_changes(self): self.assertEqual(change['operationType'], 'insert') self.assertEqual(change['fullDocument'], {'_id': 3}) + @no_type_check @client_context.require_no_mongos # Remove after SERVER-41196 @client_context.require_version_min(4, 1, 1) def test_start_after_resume_process_without_changes(self): @@ -444,12 +462,14 @@ def test_start_after_resume_process_without_changes(self): class ProseSpecTestsMixin(object): + @no_type_check def _client_with_listener(self, *commands): listener = AllowListEventListener(*commands) client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) return client, listener + @no_type_check def _populate_and_exhaust_change_stream(self, change_stream, batch_size=3): self.watched_collection().insert_many( [{"data": k} for k in range(batch_size)]) @@ -485,6 +505,7 @@ def _get_expected_resume_token(self, stream, listener, response = listener.results['succeeded'][-1].reply return response['cursor']['postBatchResumeToken'] + @no_type_check def _test_raises_error_on_missing_id(self, expected_exception): """ChangeStream will raise an exception if the server response is missing the resume token. @@ -497,6 +518,7 @@ def _test_raises_error_on_missing_id(self, expected_exception): with self.assertRaises(StopIteration): next(change_stream) + @no_type_check def _test_update_resume_token(self, expected_rt_getter): """ChangeStream must continuously track the last seen resumeToken.""" client, listener = self._client_with_listener("aggregate", "getMore") @@ -536,6 +558,7 @@ def test_raises_error_on_missing_id_418minus(self): self._test_raises_error_on_missing_id(InvalidOperation) # Prose test no. 3 + @no_type_check def test_resume_on_error(self): with self.change_stream() as change_stream: self.insert_one_and_check(change_stream, {'_id': 1}) @@ -544,6 +567,7 @@ def test_resume_on_error(self): self.insert_one_and_check(change_stream, {'_id': 2}) # Prose test no. 4 + @no_type_check @client_context.require_failCommand_fail_point def test_no_resume_attempt_if_aggregate_command_fails(self): # Set non-retryable error on aggregate command. @@ -568,6 +592,7 @@ def test_no_resume_attempt_if_aggregate_command_fails(self): # each operation which ensure compliance with this prose test. # Prose test no. 7 + @no_type_check def test_initial_empty_batch(self): with self.change_stream() as change_stream: # The first batch should be empty. @@ -579,6 +604,7 @@ def test_initial_empty_batch(self): self.assertEqual(cursor_id, change_stream._cursor.cursor_id) # Prose test no. 8 + @no_type_check def test_kill_cursors(self): def raise_error(): raise ServerSelectionTimeoutError('mock error') @@ -591,6 +617,7 @@ def raise_error(): self.insert_one_and_check(change_stream, {'_id': 2}) # Prose test no. 9 + @no_type_check @client_context.require_version_min(4, 0, 0) @client_context.require_version_max(4, 0, 7) def test_start_at_operation_time_caching(self): @@ -619,6 +646,7 @@ def test_start_at_operation_time_caching(self): # This test is identical to prose test no. 3. # Prose test no. 11 + @no_type_check @client_context.require_version_min(4, 0, 7) def test_resumetoken_empty_batch(self): client, listener = self._client_with_listener("getMore") @@ -631,6 +659,7 @@ def test_resumetoken_empty_batch(self): response["cursor"]["postBatchResumeToken"]) # Prose test no. 11 + @no_type_check @client_context.require_version_min(4, 0, 7) def test_resumetoken_exhausted_batch(self): client, listener = self._client_with_listener("getMore") @@ -643,6 +672,7 @@ def test_resumetoken_exhausted_batch(self): response["cursor"]["postBatchResumeToken"]) # Prose test no. 12 + @no_type_check @client_context.require_version_max(4, 0, 7) def test_resumetoken_empty_batch_legacy(self): resume_point = self.get_resume_token() @@ -659,6 +689,7 @@ def test_resumetoken_empty_batch_legacy(self): self.assertEqual(resume_token, resume_point) # Prose test no. 12 + @no_type_check @client_context.require_version_max(4, 0, 7) def test_resumetoken_exhausted_batch_legacy(self): # Resume token is _id of last change. @@ -673,6 +704,7 @@ def test_resumetoken_exhausted_batch_legacy(self): self.assertEqual(change_stream.resume_token, change["_id"]) # Prose test no. 13 + @no_type_check def test_resumetoken_partially_iterated_batch(self): # When batch has been iterated up to but not including the last element. # Resume token should be _id of previous change document. @@ -686,6 +718,7 @@ def test_resumetoken_partially_iterated_batch(self): self.assertEqual(resume_token, change["_id"]) + @no_type_check def _test_resumetoken_uniterated_nonempty_batch(self, resume_option): # When the batch is not empty and hasn't been iterated at all. # Resume token should be same as the resume option used. @@ -704,17 +737,20 @@ def _test_resumetoken_uniterated_nonempty_batch(self, resume_option): self.assertEqual(resume_token, resume_point) # Prose test no. 14 + @no_type_check @client_context.require_no_mongos def test_resumetoken_uniterated_nonempty_batch_resumeafter(self): self._test_resumetoken_uniterated_nonempty_batch("resume_after") # Prose test no. 14 + @no_type_check @client_context.require_no_mongos @client_context.require_version_min(4, 1, 1) def test_resumetoken_uniterated_nonempty_batch_startafter(self): self._test_resumetoken_uniterated_nonempty_batch("start_after") # Prose test no. 17 + @no_type_check @client_context.require_version_min(4, 1, 1) def test_startafter_resume_uses_startafter_after_empty_getMore(self): # Resume should use startAfter after no changes have been returned. @@ -735,6 +771,7 @@ def test_startafter_resume_uses_startafter_after_empty_getMore(self): response.command["pipeline"][0]["$changeStream"].get("startAfter")) # Prose test no. 18 + @no_type_check @client_context.require_version_min(4, 1, 1) def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): # Resume should use resumeAfter after some changes have been returned. @@ -757,6 +794,8 @@ def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin): + dbs: list + @classmethod @client_context.require_version_min(4, 0, 0, -1) @client_context.require_no_mmap @@ -1045,6 +1084,7 @@ def test_read_concern(self): class TestAllLegacyScenarios(IntegrationTest): RUN_ON_LOAD_BALANCER = True + listener: AllowListEventListener @classmethod @client_context.require_connection diff --git a/test/test_client.py b/test/test_client.py index 8db1cb5621..9ca9989052 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -28,6 +28,8 @@ import threading import warnings +from typing import no_type_check, Type + sys.path[0:0] = [""] from bson import encode @@ -99,6 +101,7 @@ class ClientUnitTest(unittest.TestCase): """MongoClient tests that don't require a server.""" + client: MongoClient @classmethod @client_context.require_connection @@ -341,7 +344,7 @@ def transform_python(self, value): return int(value) # Ensure codec options are passed in correctly - document_class = SON + document_class: Type[SON] = SON type_registry = TypeRegistry([MyFloatAsIntEncoder()]) tz_aware = True uuid_representation_label = 'javaLegacy' @@ -614,7 +617,7 @@ def test_constants(self): port are not overloaded. """ host, port = client_context.host, client_context.port - kwargs = client_context.default_client_options.copy() + kwargs: dict = client_context.default_client_options.copy() if client_context.auth_enabled: kwargs['username'] = db_user kwargs['password'] = db_pwd @@ -1111,6 +1114,7 @@ def test_socketKeepAlive(self): socket.SO_KEEPALIVE) self.assertTrue(keepalive) + @no_type_check def test_tz_aware(self): self.assertRaises(ValueError, MongoClient, tz_aware='foo') @@ -1140,7 +1144,7 @@ def test_ipv6(self): uri = "mongodb://%s[::1]:%d" % (auth_str, client_context.port) if client_context.is_rs: - uri += '/?replicaSet=' + client_context.replica_set_name + uri += '/?replicaSet=' + (client_context.replica_set_name or "") client = rs_or_single_client_noauth(uri) client.pymongo_test.test.insert_one({"dummy": "object"}) @@ -1379,7 +1383,7 @@ def init(self, *args): heartbeat_times.append(time.time()) try: - ServerHeartbeatStartedEvent.__init__ = init + ServerHeartbeatStartedEvent.__init__ = init # type: ignore listener = HeartbeatStartedListener() uri = "mongodb://%s:%d/?heartbeatFrequencyMS=500" % ( client_context.host, client_context.port) @@ -1394,7 +1398,7 @@ def init(self, *args): client.close() finally: - ServerHeartbeatStartedEvent.__init__ = old_init + ServerHeartbeatStartedEvent.__init__ = old_init # type: ignore def test_small_heartbeat_frequency_ms(self): uri = "mongodb://example/?heartbeatFrequencyMS=499" @@ -1847,7 +1851,7 @@ def test(collection): lazy_client_trial(reset, delete_one, test, self._get_client) def test_find_one(self): - results = [] + results: list = [] def reset(collection): collection.drop() diff --git a/test/test_cmap.py b/test/test_cmap.py index 20ed7f31ec..bfc600f19f 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -213,11 +213,11 @@ def set_fail_point(self, command_args): def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" - self.logs = [] + self.logs: list = [] self.assertEqual(scenario_def['version'], 1) self.assertIn(scenario_def['style'], ['unit', 'integration']) self.listener = CMAPListener() - self._ops = [] + self._ops: list = [] # Configure the fail point before creating the client. if 'failPoint' in test: @@ -259,9 +259,9 @@ def run_scenario(self, scenario_def, test): self.pool = list(client._topology._servers.values())[0].pool # Map of target names to Thread objects. - self.targets = dict() + self.targets: dict = dict() # Map of label names to Connection objects - self.labels = dict() + self.labels: dict = dict() def cleanup(): for t in self.targets.values(): diff --git a/test/test_code.py b/test/test_code.py index c5e190f363..1c4b5be1fe 100644 --- a/test/test_code.py +++ b/test/test_code.py @@ -17,6 +17,7 @@ """Tests for the Code wrapper.""" import sys + sys.path[0:0] = [""] from bson.code import Code @@ -35,7 +36,7 @@ def test_read_only(self): c = Code("blah") def set_c(): - c.scope = 5 + c.scope = 5 # type: ignore self.assertRaises(AttributeError, set_c) def test_code(self): diff --git a/test/test_collation.py b/test/test_collation.py index f0139b4a22..9c4f4f6576 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -17,6 +17,8 @@ import functools import warnings +from typing import Any + from pymongo.collation import ( Collation, CollationCaseFirst, CollationStrength, CollationAlternate, @@ -78,6 +80,10 @@ def test_constructor(self): class TestCollation(IntegrationTest): + listener: EventListener + warn_context: Any + collation: Collation + @classmethod @client_context.require_connection def setUpClass(cls): diff --git a/test/test_collection.py b/test/test_collection.py index 4a167bacb3..3d4a107aa9 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -20,8 +20,11 @@ import re import sys -from codecs import utf_8_decode +from codecs import utf_8_decode # type: ignore from collections import defaultdict +from typing import no_type_check + +from pymongo.database import Database sys.path[0:0] = [""] @@ -66,6 +69,7 @@ class TestCollectionNoConnect(unittest.TestCase): """Test Collection features on a client that does not connect. """ + db: Database @classmethod def setUpClass(cls): @@ -116,11 +120,12 @@ def test_iteration(self): class TestCollection(IntegrationTest): + w: int @classmethod def setUpClass(cls): super(TestCollection, cls).setUpClass() - cls.w = client_context.w + cls.w = client_context.w # type: ignore @classmethod def tearDownClass(cls): @@ -726,7 +731,7 @@ def test_insert_many(self): db = self.db db.test.drop() - docs = [{} for _ in range(5)] + docs: list = [{} for _ in range(5)] result = db.test.insert_many(docs) self.assertTrue(isinstance(result, InsertManyResult)) self.assertTrue(isinstance(result.inserted_ids, list)) @@ -759,7 +764,7 @@ def test_insert_many(self): db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) - docs = [{} for _ in range(5)] + docs: list = [{} for _ in range(5)] result = db.test.insert_many(docs) self.assertTrue(isinstance(result, InsertManyResult)) self.assertFalse(result.acknowledged) @@ -792,11 +797,11 @@ def test_insert_many_invalid(self): with self.assertRaisesRegex( TypeError, "documents must be a non-empty list"): - db.test.insert_many(1) + db.test.insert_many(1) # type: ignore[arg-type] with self.assertRaisesRegex( TypeError, "documents must be a non-empty list"): - db.test.insert_many(RawBSONDocument(encode({'_id': 2}))) + db.test.insert_many(RawBSONDocument(encode({'_id': 2}))) # type: ignore[arg-type] def test_delete_one(self): self.db.test.drop() @@ -1064,7 +1069,7 @@ def test_bypass_document_validation_bulk_write(self): db_w0 = self.db.client.get_database( self.db.name, write_concern=WriteConcern(w=0)) - ops = [InsertOne({"a": -10}), + ops: list = [InsertOne({"a": -10}), InsertOne({"a": -11}), InsertOne({"a": -12}), UpdateOne({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), @@ -1087,7 +1092,7 @@ def test_bypass_document_validation_bulk_write(self): def test_find_by_default_dct(self): db = self.db db.test.insert_one({'foo': 'bar'}) - dct = defaultdict(dict, [('foo', 'bar')]) + dct = defaultdict(dict, [('foo', 'bar')]) # type: ignore[arg-type] self.assertIsNotNone(db.test.find_one(dct)) self.assertEqual(dct, defaultdict(dict, [('foo', 'bar')])) @@ -1117,6 +1122,7 @@ def test_find_w_fields(self): doc = next(db.test.find({}, ["mike"])) self.assertFalse("extra thing" in doc) + @no_type_check def test_fields_specifier_as_dict(self): db = self.db db.test.delete_many({}) @@ -1333,7 +1339,7 @@ def test_replace_one(self): self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"y": 1})) self.assertEqual(0, db.test.count_documents({"x": 1})) - self.assertEqual(db.test.find_one(id1)["y"], 1) + self.assertEqual(db.test.find_one(id1)["y"], 1) # type: ignore replacement = RawBSONDocument(encode({"_id": id1, "z": 1})) result = db.test.replace_one({"y": 1}, replacement, True) @@ -1344,7 +1350,7 @@ def test_replace_one(self): self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"z": 1})) self.assertEqual(0, db.test.count_documents({"y": 1})) - self.assertEqual(db.test.find_one(id1)["z"], 1) + self.assertEqual(db.test.find_one(id1)["z"], 1) # type: ignore result = db.test.replace_one({"x": 2}, {"y": 2}, True) self.assertTrue(isinstance(result, UpdateResult)) @@ -1377,7 +1383,7 @@ def test_update_one(self): self.assertTrue(result.modified_count in (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) - self.assertEqual(db.test.find_one(id1)["x"], 6) + self.assertEqual(db.test.find_one(id1)["x"], 6) # type: ignore id2 = db.test.insert_one({"x": 1}).inserted_id result = db.test.update_one({"x": 6}, {"$inc": {"x": 1}}) @@ -1386,8 +1392,8 @@ def test_update_one(self): self.assertTrue(result.modified_count in (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) - self.assertEqual(db.test.find_one(id1)["x"], 7) - self.assertEqual(db.test.find_one(id2)["x"], 1) + self.assertEqual(db.test.find_one(id1)["x"], 7) # type: ignore + self.assertEqual(db.test.find_one(id2)["x"], 1) # type: ignore result = db.test.update_one({"x": 2}, {"$set": {"y": 1}}, True) self.assertTrue(isinstance(result, UpdateResult)) @@ -1587,12 +1593,12 @@ def test_aggregation_cursor(self): # Test that batchSize is handled properly. cursor = db.test.aggregate([], batchSize=5) - self.assertEqual(5, len(cursor._CommandCursor__data)) + self.assertEqual(5, len(cursor._CommandCursor__data)) # type: ignore # Force a getMore - cursor._CommandCursor__data.clear() + cursor._CommandCursor__data.clear() # type: ignore next(cursor) # batchSize - 1 - self.assertEqual(4, len(cursor._CommandCursor__data)) + self.assertEqual(4, len(cursor._CommandCursor__data)) # type: ignore # Exhaust the cursor. There shouldn't be any errors. for doc in cursor: pass @@ -1679,6 +1685,7 @@ def test_rename(self): with self.write_concern_collection() as coll: coll.rename('foo') + @no_type_check def test_find_one(self): db = self.db db.drop_collection("test") @@ -1973,17 +1980,17 @@ def __getattr__(self, name): bad = BadGetAttr([('foo', 'bar')]) c.insert_one({'bad': bad}) - self.assertEqual('bar', c.find_one()['bad']['foo']) + self.assertEqual('bar', c.find_one()['bad']['foo']) # type: ignore def test_array_filters_validation(self): # array_filters must be a list. c = self.db.test with self.assertRaises(TypeError): - c.update_one({}, {'$set': {'a': 1}}, array_filters={}) + c.update_one({}, {'$set': {'a': 1}}, array_filters={}) # type: ignore[arg-type] with self.assertRaises(TypeError): - c.update_many({}, {'$set': {'a': 1}}, array_filters={}) + c.update_many({}, {'$set': {'a': 1}}, array_filters={} ) # type: ignore[arg-type] with self.assertRaises(TypeError): - c.find_one_and_update({}, {'$set': {'a': 1}}, array_filters={}) + c.find_one_and_update({}, {'$set': {'a': 1}}, array_filters={}) # type: ignore[arg-type] def test_array_filters_unacknowledged(self): c_w0 = self.db.test.with_options(write_concern=WriteConcern(w=0)) @@ -2158,7 +2165,7 @@ def test_find_regex(self): c.drop() c.insert_one({'r': re.compile('.*')}) - self.assertTrue(isinstance(c.find_one()['r'], Regex)) + self.assertTrue(isinstance(c.find_one()['r'], Regex)) # type: ignore for doc in c.find(): self.assertTrue(isinstance(doc['r'], Regex)) @@ -2189,9 +2196,9 @@ def test_helpers_with_let(self): for helper, args in helpers: with self.assertRaisesRegex(TypeError, "let must be an instance of dict"): - helper(*args, let=let) + helper(*args, let=let) # type: ignore for helper, args in helpers: - helper(*args, let={}) + helper(*args, let={}) # type: ignore if __name__ == "__main__": diff --git a/test/test_command_monitoring_legacy.py b/test/test_command_monitoring_legacy.py index 7ff80d75e5..a05dbd9668 100644 --- a/test/test_command_monitoring_legacy.py +++ b/test/test_command_monitoring_legacy.py @@ -43,6 +43,8 @@ def camel_to_snake(camel): class TestAllScenarios(unittest.TestCase): + listener: EventListener + client: MongoClient @classmethod @client_context.require_connection diff --git a/test/test_common.py b/test/test_common.py index dcd618c509..7d7a26c278 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -50,13 +50,13 @@ def test_uuid_representation(self): "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) legacy_opts = coll.codec_options coll.insert_one({'uu': uu}) - self.assertEqual(uu, coll.find_one({'uu': uu})['uu']) + self.assertEqual(uu, coll.find_one({'uu': uu})['uu']) # type: ignore coll = self.db.get_collection( "uuid", CodecOptions(uuid_representation=STANDARD)) self.assertEqual(STANDARD, coll.codec_options.uuid_representation) self.assertEqual(None, coll.find_one({'uu': uu})) uul = Binary.from_uuid(uu, PYTHON_LEGACY) - self.assertEqual(uul, coll.find_one({'uu': uul})['uu']) + self.assertEqual(uul, coll.find_one({'uu': uul})['uu']) # type: ignore # Test count_documents self.assertEqual(0, coll.count_documents({'uu': uu})) @@ -81,9 +81,9 @@ def test_uuid_representation(self): coll.update_one({'_id': uu}, {'$set': {'i': 2}}) coll = self.db.get_collection( "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - self.assertEqual(1, coll.find_one({'_id': uu})['i']) + self.assertEqual(1, coll.find_one({'_id': uu})['i']) # type: ignore coll.update_one({'_id': uu}, {'$set': {'i': 2}}) - self.assertEqual(2, coll.find_one({'_id': uu})['i']) + self.assertEqual(2, coll.find_one({'_id': uu})['i']) # type: ignore # Test Cursor.distinct self.assertEqual([2], coll.find({'_id': uu}).distinct('i')) @@ -98,7 +98,7 @@ def test_uuid_representation(self): "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) self.assertEqual(2, coll.find_one_and_update({'_id': uu}, {'$set': {'i': 5}})['i']) - self.assertEqual(5, coll.find_one({'_id': uu})['i']) + self.assertEqual(5, coll.find_one({'_id': uu})['i']) # type: ignore # Test command self.assertEqual(5, self.db.command( diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index 894b14becd..e683974b03 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -20,6 +20,7 @@ from bson import SON from pymongo import monitoring +from pymongo.collection import Collection from pymongo.errors import NotPrimaryError from pymongo.write_concern import WriteConcern @@ -33,6 +34,9 @@ class TestConnectionsSurvivePrimaryStepDown(IntegrationTest): + listener: CMAPListener + coll: Collection + @classmethod @client_context.require_replica_set def setUpClass(cls): @@ -111,7 +115,7 @@ def run_scenario(self, error_code, retry, pool_status_checker): # Insert record and verify failure. with self.assertRaises(NotPrimaryError) as exc: self.coll.insert_one({"test": 1}) - self.assertEqual(exc.exception.details['code'], error_code) + self.assertEqual(exc.exception.details['code'], error_code) # type: ignore # Retry before CMAPListener assertion if retry_before=True. if retry: self.coll.insert_one({"test": 1}) diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py index 5a63e030fe..4399d9f223 100644 --- a/test/test_crud_v1.py +++ b/test/test_crud_v1.py @@ -53,7 +53,7 @@ def check_result(self, expected_result, result): # SPEC-869: Only BulkWriteResult has upserted_count. if (prop == "upserted_count" and not isinstance(result, BulkWriteResult)): - if result.upserted_id is not None: + if result.upserted_id is not None: # type: ignore upserted_count = 1 else: upserted_count = 0 @@ -69,14 +69,14 @@ def check_result(self, expected_result, result): ids = expected_result[res] if isinstance(ids, dict): ids = [ids[str(i)] for i in range(len(ids))] - self.assertEqual(ids, result.inserted_ids, msg) + self.assertEqual(ids, result.inserted_ids, msg) # type: ignore elif prop == "upserted_ids": # Convert indexes from strings to integers. ids = expected_result[res] expected_ids = {} for str_index in ids: expected_ids[int(str_index)] = ids[str_index] - self.assertEqual(expected_ids, result.upserted_ids, msg) + self.assertEqual(expected_ids, result.upserted_ids, msg) # type: ignore else: self.assertEqual( getattr(result, prop), expected_result[res], msg) diff --git a/test/test_cursor.py b/test/test_cursor.py index 0b8ba049c2..f741b8b0cc 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -57,7 +57,7 @@ def test_deepcopy_cursor_littered_with_regexes(self): re.compile("^key.*"): {"a": [re.compile("^hm.*")]}}) cursor2 = copy.deepcopy(cursor) - self.assertEqual(cursor._Cursor__spec, cursor2._Cursor__spec) + self.assertEqual(cursor._Cursor__spec, cursor2._Cursor__spec) # type: ignore def test_add_remove_option(self): cursor = self.db.test.find() @@ -149,9 +149,9 @@ def test_allow_disk_use(self): self.assertRaises(TypeError, coll.find().allow_disk_use, 'baz') cursor = coll.find().allow_disk_use(True) - self.assertEqual(True, cursor._Cursor__allow_disk_use) + self.assertEqual(True, cursor._Cursor__allow_disk_use) # type: ignore cursor = coll.find().allow_disk_use(False) - self.assertEqual(False, cursor._Cursor__allow_disk_use) + self.assertEqual(False, cursor._Cursor__allow_disk_use) # type: ignore def test_max_time_ms(self): db = self.db @@ -165,15 +165,15 @@ def test_max_time_ms(self): coll.find().max_time_ms(1) cursor = coll.find().max_time_ms(999) - self.assertEqual(999, cursor._Cursor__max_time_ms) + self.assertEqual(999, cursor._Cursor__max_time_ms) # type: ignore cursor = coll.find().max_time_ms(10).max_time_ms(1000) - self.assertEqual(1000, cursor._Cursor__max_time_ms) + self.assertEqual(1000, cursor._Cursor__max_time_ms) # type: ignore cursor = coll.find().max_time_ms(999) c2 = cursor.clone() - self.assertEqual(999, c2._Cursor__max_time_ms) - self.assertTrue("$maxTimeMS" in cursor._Cursor__query_spec()) - self.assertTrue("$maxTimeMS" in c2._Cursor__query_spec()) + self.assertEqual(999, c2._Cursor__max_time_ms) # type: ignore + self.assertTrue("$maxTimeMS" in cursor._Cursor__query_spec()) # type: ignore + self.assertTrue("$maxTimeMS" in c2._Cursor__query_spec()) # type: ignore self.assertTrue(coll.find_one(max_time_ms=1000)) @@ -889,7 +889,7 @@ def test_clone(self): # Every attribute should be the same. cursor2 = cursor.clone() - self.assertDictEqual(cursor.__dict__, cursor2.__dict__) + self.assertEqual(cursor.__dict__, cursor2.__dict__) # Shallow copies can so can mutate cursor2 = copy.copy(cursor) @@ -1025,7 +1025,7 @@ def test_properties(self): self.assertEqual(self.db.test, self.db.test.find().collection) def set_coll(): - self.db.test.find().collection = "hello" + self.db.test.find().collection = "hello" # type: ignore self.assertRaises(AttributeError, set_coll) diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 5db208ab7e..eee47b9d2b 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -21,6 +21,7 @@ from collections import OrderedDict from decimal import Decimal from random import random +from typing import Any, Tuple, Type, no_type_check sys.path[0:0] = [""] @@ -127,6 +128,7 @@ def transform_bson(self, value): class CustomBSONTypeTests(object): + @no_type_check def roundtrip(self, doc): bsonbytes = encode(doc, codec_options=self.codecopts) rt_document = decode(bsonbytes, codec_options=self.codecopts) @@ -139,6 +141,7 @@ def test_encode_decode_roundtrip(self): self.roundtrip({'average': [[Decimal('56.47')]]}) self.roundtrip({'average': [{'b': Decimal('56.47')}]}) + @no_type_check def test_decode_all(self): documents = [] for dec in range(3): @@ -151,12 +154,14 @@ def test_decode_all(self): self.assertEqual( decode_all(bsonstream, self.codecopts), documents) + @no_type_check def test__bson_to_dict(self): document = {'average': Decimal('56.47')} rawbytes = encode(document, codec_options=self.codecopts) decoded_document = _bson_to_dict(rawbytes, self.codecopts) self.assertEqual(document, decoded_document) + @no_type_check def test__dict_to_bson(self): document = {'average': Decimal('56.47')} rawbytes = encode(document, codec_options=self.codecopts) @@ -172,12 +177,14 @@ def _generate_multidocument_bson_stream(self): bsonstream += encode(doc) return edocs, bsonstream + @no_type_check def test_decode_iter(self): expected, bson_data = self._generate_multidocument_bson_stream() for expected_doc, decoded_doc in zip( expected, decode_iter(bson_data, self.codecopts)): self.assertEqual(expected_doc, decoded_doc) + @no_type_check def test_decode_file_iter(self): expected, bson_data = self._generate_multidocument_bson_stream() fileobj = tempfile.TemporaryFile() @@ -293,6 +300,15 @@ def test_type_checks(self): class TestBSONCustomTypeEncoderAndFallbackEncoderTandem(unittest.TestCase): + + TypeA: Any + TypeB: Any + fallback_encoder_A2B: Any + fallback_encoder_A2BSON: Any + B2BSON: Type[TypeEncoder] + B2A: Type[TypeEncoder] + A2B: Type[TypeEncoder] + @classmethod def setUpClass(cls): class TypeA(object): @@ -378,6 +394,10 @@ def test_infinite_loop_exceeds_max_recursion_depth(self): class TestTypeRegistry(unittest.TestCase): + types: Tuple[object, object] + codecs: Tuple[Type[TypeCodec], Type[TypeCodec]] + fallback_encoder: Any + @classmethod def setUpClass(cls): class MyIntType(object): @@ -466,32 +486,32 @@ class MyIntDecoder(TypeDecoder): def transform_bson(self, value): return self.types[0](value) - codec_instances = [MyIntDecoder(), MyIntEncoder()] + codec_instances: list = [MyIntDecoder(), MyIntEncoder()] type_registry = TypeRegistry(codec_instances) self.assertEqual( type_registry._encoder_map, - {MyIntEncoder.python_type: codec_instances[1].transform_python}) + {MyIntEncoder.python_type: codec_instances[1].transform_python}) # type: ignore self.assertEqual( type_registry._decoder_map, - {MyIntDecoder.bson_type: codec_instances[0].transform_bson}) + {MyIntDecoder.bson_type: codec_instances[0].transform_bson}) # type: ignore def test_initialize_fail(self): err_msg = ("Expected an instance of TypeEncoder, TypeDecoder, " "or TypeCodec, got .* instead") with self.assertRaisesRegex(TypeError, err_msg): - TypeRegistry(self.codecs) + TypeRegistry(self.codecs) # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, err_msg): TypeRegistry([type('AnyType', (object,), {})()]) err_msg = "fallback_encoder %r is not a callable" % (True,) with self.assertRaisesRegex(TypeError, err_msg): - TypeRegistry([], True) + TypeRegistry([], True) # type: ignore[arg-type] err_msg = "fallback_encoder %r is not a callable" % ('hello',) with self.assertRaisesRegex(TypeError, err_msg): - TypeRegistry(fallback_encoder='hello') + TypeRegistry(fallback_encoder='hello') # type: ignore[arg-type] def test_type_registry_repr(self): codec_instances = [codec() for codec in self.codecs] @@ -525,7 +545,7 @@ def run_test(base, attrs): if pytype in [bool, type(None), RE_TYPE,]: continue - class MyType(pytype): + class MyType(pytype): # type: ignore pass attrs.update({'python_type': MyType, 'transform_python': lambda x: x}) @@ -598,7 +618,7 @@ def test_aggregate_w_custom_type_decoder(self): test = db.get_collection( 'test', codec_options=UNINT_DECODER_CODECOPTS) - pipeline = [ + pipeline: list = [ {'$match': {'status': 'complete'}}, {'$group': {'_id': "$status", 'total_qty': {"$sum": "$qty"}}},] result = test.aggregate(pipeline) @@ -680,15 +700,18 @@ def test_grid_out_custom_opts(self): class ChangeStreamsWCustomTypesTestMixin(object): + @no_type_check def change_stream(self, *args, **kwargs): return self.watched_target.watch(*args, **kwargs) + @no_type_check def insert_and_check(self, change_stream, insert_doc, expected_doc): self.input_target.insert_one(insert_doc) change = next(change_stream) self.assertEqual(change['fullDocument'], expected_doc) + @no_type_check def kill_change_stream_cursor(self, change_stream): # Cause a cursor not found error on the next getMore. cursor = change_stream._cursor @@ -696,6 +719,7 @@ def kill_change_stream_cursor(self, change_stream): client = self.input_target.database.client client._close_cursor_now(cursor.cursor_id, address) + @no_type_check def test_simple(self): codecopts = CodecOptions(type_registry=TypeRegistry([ UndecipherableIntEncoder(), UppercaseTextDecoder()])) @@ -718,6 +742,7 @@ def test_simple(self): self.kill_change_stream_cursor(change_stream) self.insert_and_check(change_stream, input_docs[2], expected_docs[2]) + @no_type_check def test_custom_type_in_pipeline(self): codecopts = CodecOptions(type_registry=TypeRegistry([ UndecipherableIntEncoder(), UppercaseTextDecoder()])) @@ -741,6 +766,7 @@ def test_custom_type_in_pipeline(self): self.kill_change_stream_cursor(change_stream) self.insert_and_check(change_stream, input_docs[2], expected_docs[1]) + @no_type_check def test_break_resume_token(self): # Get one document from a change stream to determine resumeToken type. self.create_targets() @@ -766,6 +792,7 @@ def test_break_resume_token(self): self.kill_change_stream_cursor(change_stream) self.insert_and_check(change_stream, docs[2], docs[2]) + @no_type_check def test_document_class(self): def run_test(doc_cls): codecopts = CodecOptions(type_registry=TypeRegistry([ diff --git a/test/test_database.py b/test/test_database.py index 4adccc1b58..096eb5b979 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -17,6 +17,7 @@ import datetime import re import sys +from typing import Any, List, Mapping sys.path[0:0] = [""] @@ -57,6 +58,7 @@ class TestDatabaseNoConnect(unittest.TestCase): """Test Database features on a client that does not connect. """ + client: MongoClient @classmethod def setUpClass(cls): @@ -143,7 +145,7 @@ def test_create_collection(self): test = db.create_collection("test") self.assertTrue("test" in db.list_collection_names()) test.insert_one({"hello": "world"}) - self.assertEqual(db.test.find_one()["hello"], "world") + self.assertEqual(db.test.find_one()["hello"], "world") # type: ignore db.drop_collection("test.foo") db.create_collection("test.foo") @@ -198,6 +200,7 @@ def test_list_collection_names_filter(self): self.assertNotIn("nameOnly", results["started"][0].command) # Should send nameOnly (except on 2.6). + filter: Any for filter in (None, {}, {'name': {'$in': ['capped', 'non_capped']}}): results.clear() names = db.list_collection_names(filter=filter) @@ -225,7 +228,7 @@ def test_list_collections(self): self.assertTrue("$" not in coll) # Duplicate check. - coll_cnt = {} + coll_cnt: dict = {} for coll in colls: try: # Found duplicate. @@ -233,7 +236,7 @@ def test_list_collections(self): self.assertTrue(False) except KeyError: coll_cnt[coll] = 1 - coll_cnt = {} + coll_cnt: dict = {} # Checking if is there any collection which don't exists. if (len(set(colls) - set(["test","test.mike"])) == 0 or @@ -466,6 +469,7 @@ def test_insert_find_one(self): self.assertEqual(None, db.test.find_one({"hello": "test"})) b = db.test.find_one() + assert b is not None b["hello"] = "mike" db.test.replace_one({"_id": b["_id"]}, b) @@ -482,12 +486,12 @@ def test_long(self): db = self.client.pymongo_test db.test.drop() db.test.insert_one({"x": 9223372036854775807}) - retrieved = db.test.find_one()['x'] + retrieved = db.test.find_one()['x'] # type: ignore self.assertEqual(Int64(9223372036854775807), retrieved) self.assertIsInstance(retrieved, Int64) db.test.delete_many({}) db.test.insert_one({"x": Int64(1)}) - retrieved = db.test.find_one()['x'] + retrieved = db.test.find_one()['x'] # type: ignore self.assertEqual(Int64(1), retrieved) self.assertIsInstance(retrieved, Int64) @@ -509,8 +513,8 @@ def test_delete(self): length += 1 self.assertEqual(length, 2) - db.test.delete_one(db.test.find_one()) - db.test.delete_one(db.test.find_one()) + db.test.delete_one(db.test.find_one()) # type: ignore[arg-type] + db.test.delete_one(db.test.find_one()) # type: ignore[arg-type] self.assertEqual(db.test.find_one(), None) db.test.insert_one({"x": 1}) @@ -625,7 +629,7 @@ def test_with_options(self): 'read_preference': ReadPreference.PRIMARY, 'write_concern': WriteConcern(w=1), 'read_concern': ReadConcern(level="local")} - db2 = db1.with_options(**newopts) + db2 = db1.with_options(**newopts) # type: ignore[arg-type] for opt in newopts: self.assertEqual( getattr(db2, opt), newopts.get(opt, getattr(db1, opt))) @@ -633,7 +637,7 @@ def test_with_options(self): class TestDatabaseAggregation(IntegrationTest): def setUp(self): - self.pipeline = [{"$listLocalSessions": {}}, + self.pipeline: List[Mapping[str, Any]] = [{"$listLocalSessions": {}}, {"$limit": 1}, {"$addFields": {"dummy": "dummy field"}}, {"$project": {"_id": 0, "dummy": 1}}] @@ -648,6 +652,7 @@ def test_database_aggregation(self): @client_context.require_no_mongos def test_database_aggregation_fake_cursor(self): coll_name = "test_output" + write_stage: dict if client_context.version < (4, 3): db_name = "admin" write_stage = {"$out": coll_name} diff --git a/test/test_dbref.py b/test/test_dbref.py index 964947351e..348b1d14de 100644 --- a/test/test_dbref.py +++ b/test/test_dbref.py @@ -16,6 +16,7 @@ import pickle import sys +from typing import Any sys.path[0:0] = [""] from bson import encode, decode @@ -44,10 +45,10 @@ def test_read_only(self): a = DBRef("coll", ObjectId()) def foo(): - a.collection = "blah" + a.collection = "blah" # type: ignore[misc] def bar(): - a.id = "aoeu" + a.id = "aoeu" # type: ignore[misc] self.assertEqual("coll", a.collection) a.id @@ -136,6 +137,7 @@ def test_dbref_hash(self): # https://github.com/mongodb/specifications/blob/master/source/dbref.rst#test-plan class TestDBRefSpec(unittest.TestCase): def test_decoding_1_2_3(self): + doc: Any for doc in [ # 1, Valid documents MUST be decoded to a DBRef: {"$ref": "coll0", "$id": ObjectId("60a6fe9a54f4180c86309efa")}, @@ -183,6 +185,7 @@ def test_decoding_4_5(self): self.assertIsInstance(dbref, dict) def test_encoding_1_2(self): + doc: Any for doc in [ # 1, Encoding DBRefs with basic fields: {"$ref": "coll0", "$id": ObjectId("60a6fe9a54f4180c86309efa")}, diff --git a/test/test_decimal128.py b/test/test_decimal128.py index 4ff25935dd..3988a4559a 100644 --- a/test/test_decimal128.py +++ b/test/test_decimal128.py @@ -35,6 +35,7 @@ def test_round_trip(self): b'\x00@cR\xbf\xc6\x01\x00\x00\x00\x00\x00\x00\x00\x1c0') coll.insert_one({'dec128': dec128}) doc = coll.find_one({'dec128': dec128}) + assert doc is not None self.assertIsNotNone(doc) self.assertEqual(doc['dec128'], dec128) diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 107168f294..c3a50709ac 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -364,10 +364,12 @@ def _event_count(self, event): def marked_unknown(e): return (isinstance(e, monitoring.ServerDescriptionChangedEvent) and not e.new_description.is_server_type_known) + assert self.server_listener is not None return len(self.server_listener.matching(marked_unknown)) # Only support CMAP events for now. self.assertTrue(event.startswith('Pool') or event.startswith('Conn')) event_type = getattr(monitoring, event) + assert self.pool_listener is not None return self.pool_listener.event_count(event_type) def assert_event_count(self, event, count): diff --git a/test/test_encryption.py b/test/test_encryption.py index af4165f1d1..966d9b5815 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -25,6 +25,10 @@ import traceback import uuid +from typing import Any + +from pymongo.collection import Collection + sys.path[0:0] = [""] from bson import encode, json_util @@ -126,6 +130,7 @@ def test_init_kms_tls_options(self): with self.assertRaisesRegex( TypeError, r'kms_tls_options\["kmip"\] must be a dict'): AutoEncryptionOpts({}, 'k.d', kms_tls_options={'kmip': 1}) + tls_opts: Any for tls_opts in [ {'kmip': {'tls': True, 'tlsInsecure': True}}, {'kmip': {'tls': True, 'tlsAllowInvalidCertificates': True}}, @@ -138,6 +143,7 @@ def test_init_kms_tls_options(self): AutoEncryptionOpts({}, 'k.d', kms_tls_options={ 'kmip': {'tlsCAFile': 'does-not-exist'}}) # Success cases: + tls_opts: Any for tls_opts in [None, {}]: opts = AutoEncryptionOpts({}, 'k.d', kms_tls_options=tls_opts) self.assertEqual(opts._kms_ssl_contexts, {}) @@ -432,14 +438,14 @@ def test_validation(self): msg = 'value to decrypt must be a bson.binary.Binary with subtype 6' with self.assertRaisesRegex(TypeError, msg): - client_encryption.decrypt('str') + client_encryption.decrypt('str') # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, msg): client_encryption.decrypt(Binary(b'123')) msg = 'key_id must be a bson.binary.Binary with subtype 4' algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic with self.assertRaisesRegex(TypeError, msg): - client_encryption.encrypt('str', algo, key_id=uuid.uuid4()) + client_encryption.encrypt('str', algo, key_id=uuid.uuid4()) # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, msg): client_encryption.encrypt('str', algo, key_id=Binary(b'123')) @@ -459,7 +465,7 @@ def test_bson_errors(self): def test_codec_options(self): with self.assertRaisesRegex(TypeError, 'codec_options must be'): ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, None) + KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, None) # type: ignore[arg-type] opts = CodecOptions(uuid_representation=JAVA_LEGACY) client_encryption_legacy = ClientEncryption( @@ -708,6 +714,10 @@ def create_key_vault(vault, *data_keys): class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): + client_encrypted: MongoClient + client_encryption: ClientEncryption + listener: OvertCommandListener + vault: Any KMS_PROVIDERS = ALL_KMS_PROVIDERS @@ -776,7 +786,7 @@ def setUp(self): def run_test(self, provider_name): # Create data key. - master_key = self.MASTER_KEYS[provider_name] + master_key: Any = self.MASTER_KEYS[provider_name] datakey_id = self.client_encryption.create_data_key( provider_name, master_key=master_key, key_alt_names=['%s_altname' % (provider_name,)]) @@ -798,7 +808,7 @@ def run_test(self, provider_name): {'_id': provider_name, 'value': encrypted}) doc_decrypted = self.client_encrypted.db.coll.find_one( {'_id': provider_name}) - self.assertEqual(doc_decrypted['value'], 'hello %s' % (provider_name,)) + self.assertEqual(doc_decrypted['value'], 'hello %s' % (provider_name,)) # type: ignore # Encrypt by key_alt_name. encrypted_altname = self.client_encryption.encrypt( @@ -985,7 +995,7 @@ def _test_corpus(self, opts): self.addCleanup(client_encryption.close) corpus = self.fix_up_curpus(json_data('corpus', 'corpus.json')) - corpus_copied = SON() + corpus_copied: SON = SON() for key, value in corpus.items(): corpus_copied[key] = copy.deepcopy(value) if key in ('_id', 'altname_aws', 'altname_azure', 'altname_gcp', @@ -1021,7 +1031,7 @@ def _test_corpus(self, opts): try: encrypted_val = client_encryption.encrypt( - value['value'], algo, **kwargs) + value['value'], algo, **kwargs) # type: ignore[arg-type] if not value['allowed']: self.fail('encrypt should have failed: %r: %r' % ( key, value)) @@ -1082,6 +1092,10 @@ def test_corpus_local_schema(self): class TestBsonSizeBatches(EncryptionIntegrationTest): """Prose tests for BSON size limits and batch splitting.""" + coll: Collection + coll_encrypted: Collection + client_encrypted: MongoClient + listener: OvertCommandListener @classmethod def setUpClass(cls): @@ -1397,6 +1411,7 @@ class AzureGCPEncryptionTestMixin(object): KMS_PROVIDER_MAP = None KEYVAULT_DB = 'keyvault' KEYVAULT_COLL = 'datakeys' + client: MongoClient def setUp(self): keyvault = self.client.get_database( @@ -1406,7 +1421,7 @@ def setUp(self): def _test_explicit(self, expectation): client_encryption = ClientEncryption( - self.KMS_PROVIDER_MAP, + self.KMS_PROVIDER_MAP, # type: ignore[arg-type] '.'.join([self.KEYVAULT_DB, self.KEYVAULT_COLL]), client_context.client, OPTS) @@ -1426,7 +1441,7 @@ def _test_automatic(self, expectation_extjson, payload): keyvault_namespace = '.'.join([self.KEYVAULT_DB, self.KEYVAULT_COLL]) encryption_opts = AutoEncryptionOpts( - self.KMS_PROVIDER_MAP, + self.KMS_PROVIDER_MAP, # type: ignore[arg-type] keyvault_namespace, schema_map=self.SCHEMA_MAP) @@ -1818,7 +1833,7 @@ class TestKmsTLSOptions(EncryptionIntegrationTest): def setUp(self): super(TestKmsTLSOptions, self).setUp() # 1, create client with only tlsCAFile. - providers = copy.deepcopy(ALL_KMS_PROVIDERS) + providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8002' providers['gcp']['endpoint'] = '127.0.0.1:8002' kms_tls_opts_ca_only = { @@ -1840,7 +1855,7 @@ def setUp(self): kms_tls_options=kms_tls_opts) self.addCleanup(self.client_encryption_with_tls.close) # 3, update endpoints to expired host. - providers = copy.deepcopy(providers) + providers: dict = copy.deepcopy(providers) providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8000' providers['gcp']['endpoint'] = '127.0.0.1:8000' providers['kmip']['endpoint'] = '127.0.0.1:8000' @@ -1849,7 +1864,7 @@ def setUp(self): kms_tls_options=kms_tls_opts_ca_only) self.addCleanup(self.client_encryption_expired.close) # 3, update endpoints to invalid host. - providers = copy.deepcopy(providers) + providers: dict = copy.deepcopy(providers) providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8001' providers['gcp']['endpoint'] = '127.0.0.1:8001' providers['kmip']['endpoint'] = '127.0.0.1:8001' diff --git a/test/test_examples.py b/test/test_examples.py index dcf9dd2de3..ed12c8bcc1 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -890,6 +890,7 @@ def update_employee_info(session): update_employee_info(session) employee = employees.find_one({"employee": 3}) + assert employee is not None self.assertIsNotNone(employee) self.assertEqual(employee['status'], 'Inactive') @@ -916,6 +917,7 @@ def run_transaction_with_retry(txn_func, session): run_transaction_with_retry(update_employee_info, session) employee = employees.find_one({"employee": 3}) + assert employee is not None self.assertIsNotNone(employee) self.assertEqual(employee['status'], 'Inactive') @@ -954,6 +956,7 @@ def _insert_employee_retry_commit(session): run_transaction_with_retry(_insert_employee_retry_commit, session) employee = employees.find_one({"employee": 4}) + assert employee is not None self.assertIsNotNone(employee) self.assertEqual(employee['status'], 'Active') @@ -1021,6 +1024,7 @@ def update_employee_info(session): # End Transactions Retry Example 3 employee = employees.find_one({"employee": 3}) + assert employee is not None self.assertIsNotNone(employee) self.assertEqual(employee['status'], 'Inactive') @@ -1089,6 +1093,9 @@ def test_causal_consistency(self): 'start': current_date}, session=s1) # End Causal Consistency Example 1 + assert s1.cluster_time is not None + assert s1.operation_time is not None + # Start Causal Consistency Example 2 with client.start_session(causal_consistency=True) as s2: s2.advance_cluster_time(s1.cluster_time) diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 6d7cc7ba3b..2208e97b42 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -24,6 +24,8 @@ from io import BytesIO +from pymongo.database import Database + sys.path[0:0] = [""] from bson.objectid import ObjectId @@ -47,6 +49,7 @@ class TestGridFileNoConnect(unittest.TestCase): """Test GridFile features on a client that does not connect. """ + db: Database @classmethod def setUpClass(cls): diff --git a/test/test_gridfs.py b/test/test_gridfs.py index d7d5a74e5f..3d8a7d8f6b 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -27,6 +27,7 @@ sys.path[0:0] = [""] from bson.binary import Binary +from pymongo.database import Database from pymongo.mongo_client import MongoClient from pymongo.errors import (ConfigurationError, NotPrimaryError, @@ -78,6 +79,7 @@ def run(self): class TestGridfsNoConnect(unittest.TestCase): + db: Database @classmethod def setUpClass(cls): @@ -89,6 +91,8 @@ def test_gridfs(self): class TestGridfs(IntegrationTest): + fs: gridfs.GridFS + alt: gridfs.GridFS @classmethod def setUpClass(cls): @@ -152,6 +156,7 @@ def test_empty_file(self): self.assertEqual(0, self.db.fs.chunks.count_documents({})) raw = self.db.fs.files.find_one() + assert raw is not None self.assertEqual(0, raw["length"]) self.assertEqual(oid, raw["_id"]) self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) @@ -213,7 +218,7 @@ def test_threaded_reads(self): self.fs.put(b"hello", _id="test") threads = [] - results = [] + results: list = [] for i in range(10): threads.append(JustRead(self.fs, 10, results)) threads[i].start() @@ -396,6 +401,7 @@ def test_missing_length_iter(self): # Test fix that guards against PHP-237 self.fs.put(b"", filename="empty") doc = self.db.fs.files.find_one({"filename": "empty"}) + assert doc is not None doc.pop("length") self.db.fs.files.replace_one({"_id": doc["_id"]}, doc) f = self.fs.get_last_version(filename="empty") @@ -447,23 +453,32 @@ def test_delete_not_initialized(self): # but will still call __del__. cursor = GridOutCursor.__new__(GridOutCursor) # Skip calling __init__ with self.assertRaises(TypeError): - cursor.__init__(self.db.fs.files, {}, {"_id": True}) + cursor.__init__(self.db.fs.files, {}, {"_id": True}) # type: ignore cursor.__del__() # no error def test_gridfs_find_one(self): self.assertEqual(None, self.fs.find_one()) id1 = self.fs.put(b'test1', filename='file1') - self.assertEqual(b'test1', self.fs.find_one().read()) + res = self.fs.find_one() + assert res is not None + self.assertEqual(b'test1', res.read()) id2 = self.fs.put(b'test2', filename='file2', meta='data') - self.assertEqual(b'test1', self.fs.find_one(id1).read()) - self.assertEqual(b'test2', self.fs.find_one(id2).read()) - - self.assertEqual(b'test1', - self.fs.find_one({'filename': 'file1'}).read()) - - self.assertEqual('data', self.fs.find_one(id2).meta) + res1 = self.fs.find_one(id1) + assert res1 is not None + self.assertEqual(b'test1', res1.read()) + res2 = self.fs.find_one(id2) + assert res2 is not None + self.assertEqual(b'test2', res2.read()) + + res3 = self.fs.find_one({'filename': 'file1'}) + assert res3 is not None + self.assertEqual(b'test1', res3.read()) + + res4 = self.fs.find_one(id2) + assert res4 is not None + self.assertEqual('data', res4.meta) def test_grid_in_non_int_chunksize(self): # Lua, and perhaps other buggy GridFS clients, store size as a float. diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 499643f673..53f94991d3 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -77,6 +77,8 @@ def run(self): class TestGridfs(IntegrationTest): + fs: gridfs.GridFSBucket + alt: gridfs.GridFSBucket @classmethod def setUpClass(cls): @@ -123,6 +125,7 @@ def test_empty_file(self): self.assertEqual(0, self.db.fs.chunks.count_documents({})) raw = self.db.fs.files.find_one() + assert raw is not None self.assertEqual(0, raw["length"]) self.assertEqual(oid, raw["_id"]) self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) @@ -208,7 +211,7 @@ def test_threaded_reads(self): self.fs.upload_from_stream("test", b"hello") threads = [] - results = [] + results: list = [] for i in range(10): threads.append(JustRead(self.fs, 10, results)) threads[i].start() @@ -322,6 +325,7 @@ def test_missing_length_iter(self): # Test fix that guards against PHP-237 self.fs.upload_from_stream("empty", b"") doc = self.db.fs.files.find_one({"filename": "empty"}) + assert doc is not None doc.pop("length") self.db.fs.files.replace_one({"_id": doc["_id"]}, doc) fstr = self.fs.open_download_stream_by_name("empty") diff --git a/test/test_gridfs_spec.py b/test/test_gridfs_spec.py index 86449db370..057a7b4841 100644 --- a/test/test_gridfs_spec.py +++ b/test/test_gridfs_spec.py @@ -55,6 +55,9 @@ def camel_to_snake(camel): class TestAllScenarios(IntegrationTest): + fs: gridfs.GridFSBucket + str_to_cmd: dict + @classmethod def setUpClass(cls): super(TestAllScenarios, cls).setUpClass() diff --git a/test/test_json_util.py b/test/test_json_util.py index dbf4f1c26a..16c7d96a2f 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -20,6 +20,8 @@ import sys import uuid +from typing import Any, List, MutableMapping + sys.path[0:0] = [""] from bson import json_util, EPOCH_AWARE, EPOCH_NAIVE, SON @@ -466,7 +468,7 @@ def test_cursor(self): db = self.db db.drop_collection("test") - docs = [ + docs: List[MutableMapping[str, Any]] = [ {'foo': [1, 2]}, {'bar': {'hello': 'world'}}, {'code': Code("function x() { return 1; }")}, diff --git a/test/test_max_staleness.py b/test/test_max_staleness.py index 1fd82884f1..5c484fe334 100644 --- a/test/test_max_staleness.py +++ b/test/test_max_staleness.py @@ -35,7 +35,7 @@ 'max_staleness') -class TestAllScenarios(create_selection_tests(_TEST_PATH)): +class TestAllScenarios(create_selection_tests(_TEST_PATH)): # type: ignore pass diff --git a/test/test_monitor.py b/test/test_monitor.py index 61e2057b52..ed0d4543f8 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -59,7 +59,7 @@ def test_cleanup_executors_on_client_del(self): # Each executor stores a weakref to itself in _EXECUTORS. executor_refs = [ - (r, r()._name) for r in _EXECUTORS.copy() if r() in executors] + (r, r()._name) for r in _EXECUTORS.copy() if r() in executors] # type: ignore del executors del client diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 0d925b04bf..4e513c5c69 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -16,6 +16,7 @@ import datetime import sys import time +from typing import Any import warnings sys.path[0:0] = [""] @@ -43,6 +44,7 @@ class TestCommandMonitoring(IntegrationTest): + listener: EventListener @classmethod @client_context.require_connection @@ -754,7 +756,7 @@ def test_non_bulk_writes(self): # delete_one self.listener.results.clear() - res = coll.delete_one({'x': 3}) + res2 = coll.delete_one({'x': 3}) results = self.listener.results started = results['started'][0] succeeded = results['succeeded'][0] @@ -1091,6 +1093,8 @@ def test_sensitive_commands(self): class TestGlobalListener(IntegrationTest): + listener: EventListener + saved_listeners: Any @classmethod @client_context.require_connection @@ -1167,13 +1171,13 @@ def test_server_heartbeat_event_repr(self): "") delta = 0.1 event = monitoring.ServerHeartbeatSucceededEvent( - delta, {'ok': 1}, connection_id) + delta, {'ok': 1}, connection_id) # type: ignore[arg-type] self.assertEqual( repr(event), "") event = monitoring.ServerHeartbeatFailedEvent( - delta, 'ERROR', connection_id) + delta, 'ERROR', connection_id) # type: ignore[arg-type] self.assertEqual( repr(event), "") event = monitoring.ServerDescriptionChangedEvent( - 'PREV', 'NEW', server_address, topology_id) + 'PREV', 'NEW', server_address, topology_id) # type: ignore[arg-type] self.assertEqual( repr(event), "") event = monitoring.TopologyDescriptionChangedEvent( - 'PREV', 'NEW', topology_id) + 'PREV', 'NEW', topology_id) # type: ignore[arg-type] self.assertEqual( repr(event), " Date: Tue, 8 Feb 2022 10:01:46 -0800 Subject: [PATCH 0064/1588] PYTHON-3043 Test mod_wsgi with Python 3.10 (#850) --- .evergreen/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index bf96f220ff..d681815c12 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2448,12 +2448,12 @@ buildvariants: - matrix_name: "tests-mod-wsgi" matrix_spec: platform: ubuntu-18.04 - python-version: ["3.6", "3.7", "3.8", "3.9"] + python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"] mod-wsgi-version: "*" exclude_spec: # mod-wsgi 3.5 won't build against CPython 3.8+ - platform: ubuntu-18.04 - python-version: ["3.8", "3.9"] + python-version: ["3.8", "3.9", "3.10"] mod-wsgi-version: "3" display_name: "${mod-wsgi-version} ${python-version} ${platform}" tasks: From 5578999a90e439fbca06fc0ffc98f4d04e96f7b4 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 9 Feb 2022 06:44:28 -0600 Subject: [PATCH 0065/1588] PYTHON-1834 Use a code formatter (#852) --- bson/__init__.py | 382 ++-- bson/_helpers.py | 4 +- bson/binary.py | 58 +- bson/code.py | 10 +- bson/codec_options.py | 201 +- bson/dbref.py | 49 +- bson/decimal128.py | 56 +- bson/errors.py | 15 +- bson/int64.py | 1 + bson/json_util.py | 313 ++- bson/max_key.py | 1 + bson/min_key.py | 1 + bson/objectid.py | 32 +- bson/raw_bson.py | 14 +- bson/regex.py | 8 +- bson/son.py | 39 +- bson/timestamp.py | 12 +- doc/conf.py | 91 +- green_framework_test.py | 32 +- gridfs/__init__.py | 202 +- gridfs/grid_file.py | 251 +-- pymongo/__init__.py | 23 +- pymongo/aggregation.py | 58 +- pymongo/auth.py | 376 ++-- pymongo/auth_aws.py | 54 +- pymongo/bulk.py | 228 ++- pymongo/change_stream.py | 141 +- pymongo/client_options.py | 166 +- pymongo/client_session.py | 205 +- pymongo/collation.py | 80 +- pymongo/collection.py | 944 +++++---- pymongo/command_cursor.py | 109 +- pymongo/common.py | 439 ++--- pymongo/compression_support.py | 15 +- pymongo/cursor.py | 358 ++-- pymongo/daemon.py | 43 +- pymongo/database.py | 312 +-- pymongo/driver_info.py | 13 +- pymongo/encryption.py | 154 +- pymongo/encryption_options.py | 24 +- pymongo/errors.py | 55 +- pymongo/event_loggers.py | 117 +- pymongo/hello.py | 93 +- pymongo/helpers.py | 128 +- pymongo/max_staleness_selectors.py | 24 +- pymongo/message.py | 730 +++---- pymongo/mongo_client.py | 456 +++-- pymongo/monitor.py | 77 +- pymongo/monitoring.py | 362 ++-- pymongo/network.py | 136 +- pymongo/ocsp_cache.py | 20 +- pymongo/ocsp_support.py | 99 +- pymongo/operations.py | 150 +- pymongo/periodic_executor.py | 5 +- pymongo/pool.py | 606 +++--- pymongo/pyopenssl_context.py | 70 +- pymongo/read_concern.py | 11 +- pymongo/read_preferences.py | 203 +- pymongo/response.py | 21 +- pymongo/results.py | 23 +- pymongo/saslprep.py | 17 +- pymongo/server.py | 76 +- pymongo/server_api.py | 16 +- pymongo/server_description.py | 88 +- pymongo/server_selectors.py | 49 +- pymongo/settings.py | 47 +- pymongo/socket_checker.py | 12 +- pymongo/srv_resolver.py | 31 +- pymongo/ssl_support.py | 24 +- pymongo/topology.py | 294 +-- pymongo/topology_description.py | 209 +- pymongo/typings.py | 20 +- pymongo/uri_parser.py | 327 ++-- pymongo/write_concern.py | 14 +- setup.py | 170 +- test/__init__.py | 560 +++--- test/atlas/test_connection.py | 57 +- test/auth_aws/test_auth_aws.py | 15 +- test/crud_v2_format.py | 10 +- test/mockupdb/operations.py | 94 +- test/mockupdb/test_auth_recovering_member.py | 31 +- test/mockupdb/test_cluster_time.py | 116 +- test/mockupdb/test_cursor_namespace.py | 92 +- test/mockupdb/test_getmore_sharded.py | 24 +- test/mockupdb/test_handshake.py | 195 +- test/mockupdb/test_initial_ismaster.py | 16 +- test/mockupdb/test_list_indexes.py | 30 +- test/mockupdb/test_max_staleness.py | 30 +- test/mockupdb/test_mixed_version_sharded.py | 42 +- .../mockupdb/test_mongos_command_read_mode.py | 80 +- .../test_network_disconnect_primary.py | 63 +- test/mockupdb/test_op_msg.py | 290 +-- test/mockupdb/test_op_msg_read_preference.py | 89 +- test/mockupdb/test_query_read_pref_sharded.py | 44 +- test/mockupdb/test_reset_and_request_check.py | 42 +- test/mockupdb/test_rsghost.py | 35 +- test/mockupdb/test_slave_okay_rs.py | 36 +- test/mockupdb/test_slave_okay_sharded.py | 46 +- test/mockupdb/test_slave_okay_single.py | 53 +- test/mod_wsgi_test/test_client.py | 87 +- test/ocsp/test_ocsp.py | 25 +- test/performance/perf_test.py | 213 +-- test/pymongo_mocks.py | 100 +- test/qcheck.py | 60 +- test/test_auth.py | 575 +++--- test/test_auth_spec.py | 70 +- test/test_binary.py | 364 ++-- test/test_bson.py | 811 ++++---- test/test_bson_corpus.py | 156 +- test/test_bulk.py | 1033 +++++----- test/test_change_stream.py | 615 +++--- test/test_client.py | 999 +++++----- test/test_client_context.py | 48 +- test/test_cmap.py | 280 ++- test/test_code.py | 19 +- test/test_collation.py | 267 ++- test/test_collection.py | 1190 ++++++------ test/test_collection_management.py | 4 +- test/test_command_monitoring_legacy.py | 182 +- test/test_command_monitoring_unified.py | 14 +- test/test_common.py | 120 +- ...nnections_survive_primary_stepdown_spec.py | 44 +- test/test_create_entities.py | 71 +- test/test_crud_unified.py | 7 +- test/test_crud_v1.py | 129 +- test/test_cursor.py | 696 ++++--- test/test_custom_types.py | 549 +++--- test/test_data_lake.py | 46 +- test/test_database.py | 277 +-- test/test_dbref.py | 95 +- test/test_decimal128.py | 28 +- test/test_discovery_and_monitoring.py | 305 ++- test/test_dns.py | 116 +- test/test_encryption.py | 1684 +++++++++-------- test/test_errors.py | 40 +- test/test_examples.py | 928 +++++---- test/test_grid_file.py | 152 +- test/test_gridfs.py | 169 +- test/test_gridfs_bucket.py | 267 ++- test/test_gridfs_spec.py | 123 +- test/test_heartbeat_monitoring.py | 59 +- test/test_json_util.py | 429 +++-- test/test_load_balancer.py | 37 +- test/test_max_staleness.py | 41 +- test/test_mongos_load_balancing.py | 73 +- test/test_monitor.py | 27 +- test/test_monitoring.py | 1136 +++++------ test/test_objectid.py | 54 +- test/test_ocsp_cache.py | 32 +- test/test_pooling.py | 101 +- test/test_pymongo.py | 7 +- test/test_raw_bson.py | 128 +- test/test_read_concern.py | 72 +- test/test_read_preferences.py | 472 ++--- test/test_read_write_concern_spec.py | 272 ++- test/test_replica_set_reconfig.py | 136 +- test/test_retryable_reads.py | 126 +- test/test_retryable_writes.py | 446 ++--- test/test_retryable_writes_unified.py | 3 +- test/test_saslprep.py | 5 +- test/test_sdam_monitoring_spec.py | 232 +-- test/test_server.py | 9 +- test/test_server_description.py | 136 +- test/test_server_selection.py | 150 +- test/test_server_selection_in_window.py | 60 +- test/test_server_selection_rtt.py | 18 +- test/test_session.py | 639 +++---- test/test_sessions_unified.py | 3 +- test/test_son.py | 71 +- test/test_srv_polling.py | 146 +- test/test_ssl.py | 533 +++--- test/test_streaming_protocol.py | 145 +- test/test_threads.py | 19 +- test/test_timestamp.py | 9 +- test/test_topology.py | 810 ++++---- test/test_transactions.py | 284 +-- test/test_transactions_unified.py | 3 +- test/test_unified_format.py | 73 +- test/test_uri_parser.py | 728 ++++--- test/test_uri_spec.py | 137 +- test/test_versioned_api.py | 48 +- test/test_write_concern.py | 9 +- test/unicode/test_utf8.py | 13 +- test/unified_format.py | 870 ++++----- test/utils.py | 387 ++-- test/utils_selection_tests.py | 146 +- test/utils_spec_runner.py | 314 ++- test/version.py | 16 +- tools/clean.py | 2 + tools/fail_if_no_c.py | 1 + tools/ocsptest.py | 33 +- 191 files changed, 18246 insertions(+), 16796 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index e518cd91c9..9431909f9c 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -64,16 +64,39 @@ from codecs import utf_8_decode as _utf_8_decode # type: ignore[attr-defined] from codecs import utf_8_encode as _utf_8_encode # type: ignore[attr-defined] from collections import abc as _abc -from typing import (IO, TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Generator, - Iterator, List, Mapping, MutableMapping, NoReturn, - Sequence, Tuple, Type, TypeVar, Union, cast) - -from bson.binary import (ALL_UUID_SUBTYPES, CSHARP_LEGACY, JAVA_LEGACY, - OLD_UUID_SUBTYPE, STANDARD, UUID_SUBTYPE, Binary, - UuidRepresentation) +from typing import ( + IO, + TYPE_CHECKING, + Any, + BinaryIO, + Callable, + Dict, + Generator, + Iterator, + List, + Mapping, + MutableMapping, + NoReturn, + Sequence, + Tuple, + Type, + TypeVar, + Union, + cast, +) + +from bson.binary import ( + ALL_UUID_SUBTYPES, + CSHARP_LEGACY, + JAVA_LEGACY, + OLD_UUID_SUBTYPE, + STANDARD, + UUID_SUBTYPE, + Binary, + UuidRepresentation, +) from bson.code import Code -from bson.codec_options import (DEFAULT_CODEC_OPTIONS, CodecOptions, - _raw_document_class) +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, _raw_document_class from bson.dbref import DBRef from bson.decimal128 import Decimal128 from bson.errors import InvalidBSON, InvalidDocument, InvalidStringData @@ -90,11 +113,13 @@ if TYPE_CHECKING: from array import array from mmap import mmap + from bson.raw_bson import RawBSONDocument try: from bson import _cbson # type: ignore[attr-defined] + _USE_C = True except ImportError: _USE_C = False @@ -104,27 +129,27 @@ EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0) -BSONNUM = b"\x01" # Floating point -BSONSTR = b"\x02" # UTF-8 string -BSONOBJ = b"\x03" # Embedded document -BSONARR = b"\x04" # Array -BSONBIN = b"\x05" # Binary -BSONUND = b"\x06" # Undefined -BSONOID = b"\x07" # ObjectId -BSONBOO = b"\x08" # Boolean -BSONDAT = b"\x09" # UTC Datetime -BSONNUL = b"\x0A" # Null -BSONRGX = b"\x0B" # Regex -BSONREF = b"\x0C" # DBRef -BSONCOD = b"\x0D" # Javascript code -BSONSYM = b"\x0E" # Symbol -BSONCWS = b"\x0F" # Javascript code with scope -BSONINT = b"\x10" # 32bit int -BSONTIM = b"\x11" # Timestamp -BSONLON = b"\x12" # 64bit int -BSONDEC = b"\x13" # Decimal128 -BSONMIN = b"\xFF" # Min key -BSONMAX = b"\x7F" # Max key +BSONNUM = b"\x01" # Floating point +BSONSTR = b"\x02" # UTF-8 string +BSONOBJ = b"\x03" # Embedded document +BSONARR = b"\x04" # Array +BSONBIN = b"\x05" # Binary +BSONUND = b"\x06" # Undefined +BSONOID = b"\x07" # ObjectId +BSONBOO = b"\x08" # Boolean +BSONDAT = b"\x09" # UTC Datetime +BSONNUL = b"\x0A" # Null +BSONRGX = b"\x0B" # Regex +BSONREF = b"\x0C" # DBRef +BSONCOD = b"\x0D" # Javascript code +BSONSYM = b"\x0E" # Symbol +BSONCWS = b"\x0F" # Javascript code with scope +BSONINT = b"\x10" # 32bit int +BSONTIM = b"\x11" # Timestamp +BSONLON = b"\x12" # 64bit int +BSONDEC = b"\x13" # Decimal128 +BSONMIN = b"\xFF" # Min key +BSONMAX = b"\x7F" # Max key _UNPACK_FLOAT_FROM = struct.Struct(" Tuple[Any, memoryview]: def _raise_unknown_type(element_type: int, element_name: str) -> NoReturn: """Unknown type helper.""" - raise InvalidBSON("Detected unknown BSON type %r for fieldname '%s'. Are " - "you using the latest driver version?" % ( - chr(element_type).encode(), element_name)) + raise InvalidBSON( + "Detected unknown BSON type %r for fieldname '%s'. Are " + "you using the latest driver version?" % (chr(element_type).encode(), element_name) + ) -def _get_int(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[int, int]: +def _get_int( + data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[int, int]: """Decode a BSON int32 to python int.""" return _UNPACK_INT_FROM(data, position)[0], position + 4 @@ -157,16 +185,19 @@ def _get_int(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dumm def _get_c_string(data: Any, view: Any, position: int, opts: Any) -> Tuple[str, int]: """Decode a BSON 'C' string to python str.""" end = data.index(b"\x00", position) - return _utf_8_decode(view[position:end], - opts.unicode_decode_error_handler, True)[0], end + 1 + return _utf_8_decode(view[position:end], opts.unicode_decode_error_handler, True)[0], end + 1 -def _get_float(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[float, int]: +def _get_float( + data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[float, int]: """Decode a BSON double to python float.""" return _UNPACK_FLOAT_FROM(data, position)[0], position + 8 -def _get_string(data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy: Any) -> Tuple[str, int]: +def _get_string( + data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy: Any +) -> Tuple[str, int]: """Decode a BSON string to python str.""" length = _UNPACK_INT_FROM(data, position)[0] position += 4 @@ -175,8 +206,7 @@ def _get_string(data: Any, view: Any, position: int, obj_end: int, opts: Any, du end = position + length - 1 if data[end] != 0: raise InvalidBSON("invalid end of string") - return _utf_8_decode(view[position:end], - opts.unicode_decode_error_handler, True)[0], end + 1 + return _utf_8_decode(view[position:end], opts.unicode_decode_error_handler, True)[0], end + 1 def _get_object_size(data: Any, position: int, obj_end: int) -> Tuple[int, int]: @@ -196,26 +226,30 @@ def _get_object_size(data: Any, position: int, obj_end: int) -> Tuple[int, int]: return obj_size, end -def _get_object(data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy: Any) -> Tuple[Any, int]: +def _get_object( + data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy: Any +) -> Tuple[Any, int]: """Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef.""" obj_size, end = _get_object_size(data, position, obj_end) if _raw_document_class(opts.document_class): - return (opts.document_class(data[position:end + 1], opts), - position + obj_size) + return (opts.document_class(data[position : end + 1], opts), position + obj_size) obj = _elements_to_dict(data, view, position + 4, end, opts) position += obj_size # If DBRef validation fails, return a normal doc. - if (isinstance(obj.get('$ref'), str) and - "$id" in obj and - isinstance(obj.get('$db'), (str, type(None)))): - return (DBRef(obj.pop("$ref"), obj.pop("$id", None), - obj.pop("$db", None), obj), position) + if ( + isinstance(obj.get("$ref"), str) + and "$id" in obj + and isinstance(obj.get("$db"), (str, type(None))) + ): + return (DBRef(obj.pop("$ref"), obj.pop("$id", None), obj.pop("$db", None), obj), position) return obj, position -def _get_array(data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str) -> Tuple[Any, int]: +def _get_array( + data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str +) -> Tuple[Any, int]: """Decode a BSON array to python list.""" size = _UNPACK_INT_FROM(data, position)[0] end = position + size - 1 @@ -235,10 +269,11 @@ def _get_array(data: Any, view: Any, position: int, obj_end: int, opts: Any, ele while position < end: element_type = data[position] # Just skip the keys. - position = index(b'\x00', position) + 1 + position = index(b"\x00", position) + 1 try: value, position = getter[element_type]( - data, view, position, obj_end, opts, element_name) + data, view, position, obj_end, opts, element_name + ) except KeyError: _raise_unknown_type(element_type, element_name) @@ -250,11 +285,13 @@ def _get_array(data: Any, view: Any, position: int, obj_end: int, opts: Any, ele append(value) if position != end + 1: - raise InvalidBSON('bad array length') + raise InvalidBSON("bad array length") return result, position + 1 -def _get_binary(data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy1: Any) -> Tuple[Union[Binary, uuid.UUID], int]: +def _get_binary( + data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy1: Any +) -> Tuple[Union[Binary, uuid.UUID], int]: """Decode a BSON binary to bson.binary.Binary or python UUID.""" length, subtype = _UNPACK_LENGTH_SUBTYPE_FROM(data, position) position += 5 @@ -266,15 +303,17 @@ def _get_binary(data: Any, view: Any, position: int, obj_end: int, opts: Any, du length = length2 end = position + length if length < 0 or end > obj_end: - raise InvalidBSON('bad binary object length') + raise InvalidBSON("bad binary object length") # Convert UUID subtypes to native UUIDs. if subtype in ALL_UUID_SUBTYPES: uuid_rep = opts.uuid_representation binary_value = Binary(data[position:end], subtype) - if ((uuid_rep == UuidRepresentation.UNSPECIFIED) or - (subtype == UUID_SUBTYPE and uuid_rep != STANDARD) or - (subtype == OLD_UUID_SUBTYPE and uuid_rep == STANDARD)): + if ( + (uuid_rep == UuidRepresentation.UNSPECIFIED) + or (subtype == UUID_SUBTYPE and uuid_rep != STANDARD) + or (subtype == OLD_UUID_SUBTYPE and uuid_rep == STANDARD) + ): return binary_value, end return binary_value.as_uuid(uuid_rep), end @@ -287,47 +326,57 @@ def _get_binary(data: Any, view: Any, position: int, obj_end: int, opts: Any, du return value, end -def _get_oid(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[ObjectId, int]: +def _get_oid( + data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[ObjectId, int]: """Decode a BSON ObjectId to bson.objectid.ObjectId.""" end = position + 12 return ObjectId(data[position:end]), end -def _get_boolean(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[bool, int]: +def _get_boolean( + data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[bool, int]: """Decode a BSON true/false to python True/False.""" end = position + 1 boolean_byte = data[position:end] - if boolean_byte == b'\x00': + if boolean_byte == b"\x00": return False, end - elif boolean_byte == b'\x01': + elif boolean_byte == b"\x01": return True, end - raise InvalidBSON('invalid boolean value: %r' % boolean_byte) + raise InvalidBSON("invalid boolean value: %r" % boolean_byte) -def _get_date(data: Any, view: Any, position: int, dummy0: int, opts: Any, dummy1: Any) -> Tuple[datetime.datetime, int]: +def _get_date( + data: Any, view: Any, position: int, dummy0: int, opts: Any, dummy1: Any +) -> Tuple[datetime.datetime, int]: """Decode a BSON datetime to python datetime.datetime.""" - return _millis_to_datetime( - _UNPACK_LONG_FROM(data, position)[0], opts), position + 8 + return _millis_to_datetime(_UNPACK_LONG_FROM(data, position)[0], opts), position + 8 -def _get_code(data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str) -> Tuple[Code, int]: +def _get_code( + data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str +) -> Tuple[Code, int]: """Decode a BSON code to bson.code.Code.""" code, position = _get_string(data, view, position, obj_end, opts, element_name) return Code(code), position -def _get_code_w_scope(data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str) -> Tuple[Code, int]: +def _get_code_w_scope( + data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str +) -> Tuple[Code, int]: """Decode a BSON code_w_scope to bson.code.Code.""" code_end = position + _UNPACK_INT_FROM(data, position)[0] - code, position = _get_string( - data, view, position + 4, code_end, opts, element_name) + code, position = _get_string(data, view, position + 4, code_end, opts, element_name) scope, position = _get_object(data, view, position, code_end, opts, element_name) if position != code_end: - raise InvalidBSON('scope outside of javascript code boundaries') + raise InvalidBSON("scope outside of javascript code boundaries") return Code(code, scope), position -def _get_regex(data: Any, view: Any, position: int, dummy0: Any, opts: Any, dummy1: Any) -> Tuple[Regex, int]: +def _get_regex( + data: Any, view: Any, position: int, dummy0: Any, opts: Any, dummy1: Any +) -> Tuple[Regex, int]: """Decode a BSON regex to bson.regex.Regex or a python pattern object.""" pattern, position = _get_c_string(data, view, position, opts) bson_flags, position = _get_c_string(data, view, position, opts) @@ -335,26 +384,33 @@ def _get_regex(data: Any, view: Any, position: int, dummy0: Any, opts: Any, dumm return bson_re, position -def _get_ref(data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str) -> Tuple[DBRef, int]: +def _get_ref( + data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str +) -> Tuple[DBRef, int]: """Decode (deprecated) BSON DBPointer to bson.dbref.DBRef.""" - collection, position = _get_string( - data, view, position, obj_end, opts, element_name) + collection, position = _get_string(data, view, position, obj_end, opts, element_name) oid, position = _get_oid(data, view, position, obj_end, opts, element_name) return DBRef(collection, oid), position -def _get_timestamp(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[Timestamp, int]: +def _get_timestamp( + data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[Timestamp, int]: """Decode a BSON timestamp to bson.timestamp.Timestamp.""" inc, timestamp = _UNPACK_TIMESTAMP_FROM(data, position) return Timestamp(timestamp, inc), position + 8 -def _get_int64(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[Int64, int]: +def _get_int64( + data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[Int64, int]: """Decode a BSON int64 to bson.int64.Int64.""" return Int64(_UNPACK_LONG_FROM(data, position)[0]), position + 8 -def _get_decimal128(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[Decimal128, int]: +def _get_decimal128( + data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[Decimal128, int]: """Decode a BSON decimal128 to bson.decimal128.Decimal128.""" end = position + 16 return Decimal128.from_bid(data[position:end]), end @@ -366,7 +422,7 @@ def _get_decimal128(data: Any, view: Any, position: int, dummy0: Any, dummy1: An # - position: int, beginning of object in 'data' to decode # - obj_end: int, end of object to decode in 'data' if variable-length type # - opts: a CodecOptions -_ELEMENT_GETTER: Dict[int, Callable[..., Tuple[Any, int]]]= { +_ELEMENT_GETTER: Dict[int, Callable[..., Tuple[Any, int]]] = { ord(BSONNUM): _get_float, ord(BSONSTR): _get_string, ord(BSONOBJ): _get_object, @@ -387,22 +443,26 @@ def _get_decimal128(data: Any, view: Any, position: int, dummy0: Any, dummy1: An ord(BSONLON): _get_int64, ord(BSONDEC): _get_decimal128, ord(BSONMIN): lambda u, v, w, x, y, z: (MinKey(), w), - ord(BSONMAX): lambda u, v, w, x, y, z: (MaxKey(), w)} + ord(BSONMAX): lambda u, v, w, x, y, z: (MaxKey(), w), +} if _USE_C: + def _element_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: Any) -> Any: return _cbson._element_to_dict(data, position, obj_end, opts) + else: + def _element_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: Any) -> Any: """Decode a single key, value pair.""" element_type = data[position] position += 1 element_name, position = _get_c_string(data, view, position, opts) try: - value, position = _ELEMENT_GETTER[element_type](data, view, position, - obj_end, opts, - element_name) + value, position = _ELEMENT_GETTER[element_type]( + data, view, position, obj_end, opts, element_name + ) except KeyError: _raise_unknown_type(element_type, element_name) @@ -422,7 +482,9 @@ def _raw_to_dict(data: Any, position: int, obj_end: int, opts: Any, result: _T) return _elements_to_dict(data, view, position, obj_end, opts, result) -def _elements_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: Any, result: Any = None) -> Any: +def _elements_to_dict( + data: Any, view: Any, position: int, obj_end: int, opts: Any, result: Any = None +) -> Any: """Decode a BSON document into result.""" if result is None: result = opts.document_class() @@ -431,7 +493,7 @@ def _elements_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: A key, value, position = _element_to_dict(data, view, position, obj_end, opts) result[key] = value if position != obj_end: - raise InvalidBSON('bad object or element length') + raise InvalidBSON("bad object or element length") return result @@ -449,6 +511,8 @@ def _bson_to_dict(data: Any, opts: Any) -> Any: # Change exception type to InvalidBSON but preserve traceback. _, exc_value, exc_tb = sys.exc_info() raise InvalidBSON(str(exc_value)).with_traceback(exc_tb) + + if _USE_C: _bson_to_dict = _cbson._bson_to_dict @@ -458,7 +522,7 @@ def _bson_to_dict(data: Any, opts: Any) -> Any: _PACK_LENGTH_SUBTYPE = struct.Struct(" Generator[bytes, None, None]: @@ -473,25 +537,22 @@ def gen_list_name() -> Generator[bytes, None, None]: counter = itertools.count(1000) while True: - yield (str(next(counter)) + "\x00").encode('utf8') + yield (str(next(counter)) + "\x00").encode("utf8") def _make_c_string_check(string: Union[str, bytes]) -> bytes: """Make a 'C' string, checking for embedded NUL characters.""" if isinstance(string, bytes): if b"\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not " - "contain a NUL character") + raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") try: _utf_8_decode(string, None, True) return string + b"\x00" except UnicodeError: - raise InvalidStringData("strings in documents must be valid " - "UTF-8: %r" % string) + raise InvalidStringData("strings in documents must be valid " "UTF-8: %r" % string) else: if "\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not " - "contain a NUL character") + raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" @@ -502,8 +563,7 @@ def _make_c_string(string: Union[str, bytes]) -> bytes: _utf_8_decode(string, None, True) return string + b"\x00" except UnicodeError: - raise InvalidStringData("strings in documents must be valid " - "UTF-8: %r" % string) + raise InvalidStringData("strings in documents must be valid " "UTF-8: %r" % string) else: return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" @@ -512,8 +572,7 @@ def _make_name(string: str) -> bytes: """Make a 'C' string suitable for a BSON key.""" # Keys can only be text in python 3. if "\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not " - "contain a NUL character") + raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" @@ -531,9 +590,8 @@ def _encode_bytes(name: bytes, value: bytes, dummy0: Any, dummy1: Any) -> bytes: def _encode_mapping(name: bytes, value: Any, check_keys: bool, opts: Any) -> bytes: """Encode a mapping type.""" if _raw_document_class(value): - return b'\x03' + name + value.raw - data = b"".join([_element_to_bson(key, val, check_keys, opts) - for key, val in value.items()]) + return b"\x03" + name + value.raw + data = b"".join([_element_to_bson(key, val, check_keys, opts) for key, val in value.items()]) return b"\x03" + name + _PACK_INT(len(data) + 5) + data + b"\x00" @@ -542,27 +600,22 @@ def _encode_dbref(name: bytes, value: DBRef, check_keys: bool, opts: Any) -> byt buf = bytearray(b"\x03" + name + b"\x00\x00\x00\x00") begin = len(buf) - 4 - buf += _name_value_to_bson(b"$ref\x00", - value.collection, check_keys, opts) - buf += _name_value_to_bson(b"$id\x00", - value.id, check_keys, opts) + buf += _name_value_to_bson(b"$ref\x00", value.collection, check_keys, opts) + buf += _name_value_to_bson(b"$id\x00", value.id, check_keys, opts) if value.database is not None: - buf += _name_value_to_bson( - b"$db\x00", value.database, check_keys, opts) + buf += _name_value_to_bson(b"$db\x00", value.database, check_keys, opts) for key, val in value._DBRef__kwargs.items(): buf += _element_to_bson(key, val, check_keys, opts) buf += b"\x00" - buf[begin:begin + 4] = _PACK_INT(len(buf) - begin) + buf[begin : begin + 4] = _PACK_INT(len(buf) - begin) return bytes(buf) def _encode_list(name: bytes, value: Sequence[Any], check_keys: bool, opts: Any) -> bytes: """Encode a list/tuple.""" lname = gen_list_name() - data = b"".join([_name_value_to_bson(next(lname), item, - check_keys, opts) - for item in value]) + data = b"".join([_name_value_to_bson(next(lname), item, check_keys, opts) for item in value]) return b"\x04" + name + _PACK_INT(len(data) + 5) + data + b"\x00" @@ -586,6 +639,7 @@ def _encode_uuid(name: bytes, value: uuid.UUID, dummy: Any, opts: Any) -> bytes: binval = Binary.from_uuid(value, uuid_representation=uuid_representation) return _encode_binary(name, binval, dummy, opts) + def _encode_objectid(name: bytes, value: ObjectId, dummy: Any, dummy1: Any) -> bytes: """Encode bson.objectid.ObjectId.""" return b"\x07" + name + value.binary @@ -733,9 +787,14 @@ def _encode_maxkey(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: _BUILT_IN_TYPES = tuple(t for t in _ENCODERS) -def _name_value_to_bson(name: bytes, value: Any, check_keys: bool, opts: Any, - in_custom_call: bool = False, - in_fallback_call: bool = False) -> bytes: +def _name_value_to_bson( + name: bytes, + value: Any, + check_keys: bool, + opts: Any, + in_custom_call: bool = False, + in_fallback_call: bool = False, +) -> bytes: """Encode a single name, value pair.""" # First see if the type is already cached. KeyError will only ever # happen once per subtype. @@ -760,8 +819,8 @@ def _name_value_to_bson(name: bytes, value: Any, check_keys: bool, opts: Any, custom_encoder = opts.type_registry._encoder_map.get(type(value)) if custom_encoder is not None: return _name_value_to_bson( - name, custom_encoder(value), check_keys, opts, - in_custom_call=True) + name, custom_encoder(value), check_keys, opts, in_custom_call=True + ) # Fourth, test each base type. This will only happen once for # a subtype of a supported base type. Unlike in the C-extensions, this @@ -779,18 +838,16 @@ def _name_value_to_bson(name: bytes, value: Any, check_keys: bool, opts: Any, fallback_encoder = opts.type_registry._fallback_encoder if not in_fallback_call and fallback_encoder is not None: return _name_value_to_bson( - name, fallback_encoder(value), check_keys, opts, - in_fallback_call=True) + name, fallback_encoder(value), check_keys, opts, in_fallback_call=True + ) - raise InvalidDocument( - "cannot encode object: %r, of type: %r" % (value, type(value))) + raise InvalidDocument("cannot encode object: %r, of type: %r" % (value, type(value))) def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: Any) -> bytes: """Encode a single key, value pair.""" if not isinstance(key, str): - raise InvalidDocument("documents must have only string keys, " - "key was %r" % (key,)) + raise InvalidDocument("documents must have only string keys, " "key was %r" % (key,)) if check_keys: if key.startswith("$"): raise InvalidDocument("key %r must not start with '$'" % (key,)) @@ -808,17 +865,17 @@ def _dict_to_bson(doc: Any, check_keys: bool, opts: Any, top_level: bool = True) try: elements = [] if top_level and "_id" in doc: - elements.append(_name_value_to_bson(b"_id\x00", doc["_id"], - check_keys, opts)) + elements.append(_name_value_to_bson(b"_id\x00", doc["_id"], check_keys, opts)) for key, value in doc.items(): if not top_level or key != "_id": - elements.append(_element_to_bson(key, value, - check_keys, opts)) + elements.append(_element_to_bson(key, value, check_keys, opts)) except AttributeError: raise TypeError("encoder expected a mapping type but got: %r" % (doc,)) encoded = b"".join(elements) return _PACK_INT(len(encoded) + 5) + encoded + b"\x00" + + if _USE_C: _dict_to_bson = _cbson._dict_to_bson @@ -829,26 +886,22 @@ def _millis_to_datetime(millis: int, opts: Any) -> datetime.datetime: seconds = (millis - diff) // 1000 micros = diff * 1000 if opts.tz_aware: - dt = EPOCH_AWARE + datetime.timedelta(seconds=seconds, - microseconds=micros) + dt = EPOCH_AWARE + datetime.timedelta(seconds=seconds, microseconds=micros) if opts.tzinfo: dt = dt.astimezone(opts.tzinfo) return dt else: - return EPOCH_NAIVE + datetime.timedelta(seconds=seconds, - microseconds=micros) + return EPOCH_NAIVE + datetime.timedelta(seconds=seconds, microseconds=micros) def _datetime_to_millis(dtm: datetime.datetime) -> int: """Convert datetime to milliseconds since epoch UTC.""" if dtm.utcoffset() is not None: dtm = dtm - dtm.utcoffset() # type: ignore - return int(calendar.timegm(dtm.timetuple()) * 1000 + - dtm.microsecond // 1000) + return int(calendar.timegm(dtm.timetuple()) * 1000 + dtm.microsecond // 1000) -_CODEC_OPTIONS_TYPE_ERROR = TypeError( - "codec_options must be an instance of CodecOptions") +_CODEC_OPTIONS_TYPE_ERROR = TypeError("codec_options must be an instance of CodecOptions") _DocumentIn = Mapping[str, Any] @@ -856,7 +909,11 @@ def _datetime_to_millis(dtm: datetime.datetime) -> int: _ReadableBuffer = Union[bytes, memoryview, "mmap", "array"] -def encode(document: _DocumentIn, check_keys: bool = False, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> bytes: +def encode( + document: _DocumentIn, + check_keys: bool = False, + codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS, +) -> bytes: """Encode a document to BSON. A document can be any mapping type (like :class:`dict`). @@ -883,7 +940,9 @@ def encode(document: _DocumentIn, check_keys: bool = False, codec_options: Codec return _dict_to_bson(document, check_keys, codec_options) -def decode(data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Dict[str, Any]: +def decode( + data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS +) -> Dict[str, Any]: """Decode BSON to a document. By default, returns a BSON document represented as a Python @@ -915,7 +974,9 @@ def decode(data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OP return _bson_to_dict(data, codec_options) -def decode_all(data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> List[Dict[str, Any]]: +def decode_all( + data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS +) -> List[Dict[str, Any]]: """Decode BSON data to multiple documents. `data` must be a bytes-like object implementing the buffer protocol that @@ -957,14 +1018,10 @@ def decode_all(data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODE raise InvalidBSON("bad eoo") if use_raw: docs.append( - codec_options.document_class( - data[position:obj_end + 1], codec_options)) + codec_options.document_class(data[position : obj_end + 1], codec_options) + ) else: - docs.append(_elements_to_dict(data, - view, - position + 4, - obj_end, - codec_options)) + docs.append(_elements_to_dict(data, view, position + 4, obj_end, codec_options)) position += obj_size return docs except InvalidBSON: @@ -999,9 +1056,9 @@ def _decode_selective(rawdoc: Any, fields: Any, codec_options: Any) -> Mapping[A def _convert_raw_document_lists_to_streams(document: Any) -> None: - cursor = document.get('cursor') + cursor = document.get("cursor") if cursor: - for key in ('firstBatch', 'nextBatch'): + for key in ("firstBatch", "nextBatch"): batch = cursor.get(key) if batch: stream = b"".join(doc.raw for doc in batch) @@ -1039,13 +1096,23 @@ def _decode_all_selective(data: Any, codec_options: CodecOptions, fields: Any) - # Decode documents for internal use. from bson.raw_bson import RawBSONDocument + internal_codec_options = codec_options.with_options( - document_class=RawBSONDocument, type_registry=None) + document_class=RawBSONDocument, type_registry=None + ) _doc = _bson_to_dict(data, internal_codec_options) - return [_decode_selective(_doc, fields, codec_options,)] - - -def decode_iter(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Iterator[_DocumentOut]: + return [ + _decode_selective( + _doc, + fields, + codec_options, + ) + ] + + +def decode_iter( + data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS +) -> Iterator[_DocumentOut]: """Decode BSON data to multiple documents as a generator. Works similarly to the decode_all function, but yields one document at a @@ -1072,13 +1139,15 @@ def decode_iter(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS end = len(data) - 1 while position < end: obj_size = _UNPACK_INT_FROM(data, position)[0] - elements = data[position:position + obj_size] + elements = data[position : position + obj_size] position += obj_size yield _bson_to_dict(elements, codec_options) -def decode_file_iter(file_obj: Union[BinaryIO, IO], codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Iterator[_DocumentOut]: +def decode_file_iter( + file_obj: Union[BinaryIO, IO], codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS +) -> Iterator[_DocumentOut]: """Decode bson data from a file to multiple documents as a generator. Works similarly to the decode_all function, but reads from the file object @@ -1136,8 +1205,12 @@ class BSON(bytes): """ @classmethod - def encode(cls: Type["BSON"], document: _DocumentIn, check_keys: bool = False, - codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> "BSON": + def encode( + cls: Type["BSON"], + document: _DocumentIn, + check_keys: bool = False, + codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS, + ) -> "BSON": """Encode a document to a new :class:`BSON` instance. A document can be any mapping type (like :class:`dict`). @@ -1196,6 +1269,5 @@ def decode(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Dict[st def has_c() -> bool: - """Is the C extension installed? - """ + """Is the C extension installed?""" return _USE_C diff --git a/bson/_helpers.py b/bson/_helpers.py index 2d89789586..ee3b0f1099 100644 --- a/bson/_helpers.py +++ b/bson/_helpers.py @@ -25,7 +25,7 @@ def _setstate_slots(self: Any, state: Any) -> None: def _mangle_name(name: str, prefix: str) -> str: if name.startswith("__"): - prefix = "_"+prefix + prefix = "_" + prefix else: prefix = "" return prefix + name @@ -37,5 +37,5 @@ def _getstate_slots(self: Any) -> Mapping[Any, Any]: for name in self.__slots__: mangled_name = _mangle_name(name, prefix) if hasattr(self, mangled_name): - ret[mangled_name] = getattr(self, mangled_name) + ret[mangled_name] = getattr(self, mangled_name) return ret diff --git a/bson/binary.py b/bson/binary.py index de44d48174..e20bf87af3 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Tuple, Type, Union, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Tuple, Type, Union from uuid import UUID """Tools for representing BSON binary data. @@ -163,13 +163,15 @@ class UuidRepresentation: UuidRepresentation.STANDARD, UuidRepresentation.PYTHON_LEGACY, UuidRepresentation.JAVA_LEGACY, - UuidRepresentation.CSHARP_LEGACY) + UuidRepresentation.CSHARP_LEGACY, +) UUID_REPRESENTATION_NAMES = { - UuidRepresentation.UNSPECIFIED: 'UuidRepresentation.UNSPECIFIED', - UuidRepresentation.STANDARD: 'UuidRepresentation.STANDARD', - UuidRepresentation.PYTHON_LEGACY: 'UuidRepresentation.PYTHON_LEGACY', - UuidRepresentation.JAVA_LEGACY: 'UuidRepresentation.JAVA_LEGACY', - UuidRepresentation.CSHARP_LEGACY: 'UuidRepresentation.CSHARP_LEGACY'} + UuidRepresentation.UNSPECIFIED: "UuidRepresentation.UNSPECIFIED", + UuidRepresentation.STANDARD: "UuidRepresentation.STANDARD", + UuidRepresentation.PYTHON_LEGACY: "UuidRepresentation.PYTHON_LEGACY", + UuidRepresentation.JAVA_LEGACY: "UuidRepresentation.JAVA_LEGACY", + UuidRepresentation.CSHARP_LEGACY: "UuidRepresentation.CSHARP_LEGACY", +} MD5_SUBTYPE = 5 """BSON binary subtype for an MD5 hash. @@ -216,7 +218,11 @@ class Binary(bytes): _type_marker = 5 __subtype: int - def __new__(cls: Type["Binary"], data: Union[memoryview, bytes, "_mmap", "_array"], subtype: int = BINARY_SUBTYPE) -> "Binary": + def __new__( + cls: Type["Binary"], + data: Union[memoryview, bytes, "_mmap", "_array"], + subtype: int = BINARY_SUBTYPE, + ) -> "Binary": if not isinstance(subtype, int): raise TypeError("subtype must be an instance of int") if subtype >= 256 or subtype < 0: @@ -227,7 +233,9 @@ def __new__(cls: Type["Binary"], data: Union[memoryview, bytes, "_mmap", "_array return self @classmethod - def from_uuid(cls: Type["Binary"], uuid: UUID, uuid_representation: int = UuidRepresentation.STANDARD) -> "Binary": + def from_uuid( + cls: Type["Binary"], uuid: UUID, uuid_representation: int = UuidRepresentation.STANDARD + ) -> "Binary": """Create a BSON Binary object from a Python UUID. Creates a :class:`~bson.binary.Binary` object from a @@ -251,8 +259,9 @@ def from_uuid(cls: Type["Binary"], uuid: UUID, uuid_representation: int = UuidRe raise TypeError("uuid must be an instance of uuid.UUID") if uuid_representation not in ALL_UUID_REPRESENTATIONS: - raise ValueError("uuid_representation must be a value " - "from bson.binary.UuidRepresentation") + raise ValueError( + "uuid_representation must be a value " "from bson.binary.UuidRepresentation" + ) if uuid_representation == UuidRepresentation.UNSPECIFIED: raise ValueError( @@ -261,7 +270,8 @@ def from_uuid(cls: Type["Binary"], uuid: UUID, uuid_representation: int = UuidRe "converted to bson.Binary instances using " "bson.Binary.from_uuid() or a different UuidRepresentation " "can be configured. See the documentation for " - "UuidRepresentation for more information.") + "UuidRepresentation for more information." + ) subtype = OLD_UUID_SUBTYPE if uuid_representation == UuidRepresentation.PYTHON_LEGACY: @@ -296,12 +306,12 @@ def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUI .. versionadded:: 3.11 """ if self.subtype not in ALL_UUID_SUBTYPES: - raise ValueError("cannot decode subtype %s as a uuid" % ( - self.subtype,)) + raise ValueError("cannot decode subtype %s as a uuid" % (self.subtype,)) if uuid_representation not in ALL_UUID_REPRESENTATIONS: - raise ValueError("uuid_representation must be a value from " - "bson.binary.UuidRepresentation") + raise ValueError( + "uuid_representation must be a value from " "bson.binary.UuidRepresentation" + ) if uuid_representation == UuidRepresentation.UNSPECIFIED: raise ValueError("uuid_representation cannot be UNSPECIFIED") @@ -319,26 +329,26 @@ def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUI if self.subtype == UUID_SUBTYPE: return UUID(bytes=self) - raise ValueError("cannot decode subtype %s to %s" % ( - self.subtype, UUID_REPRESENTATION_NAMES[uuid_representation])) + raise ValueError( + "cannot decode subtype %s to %s" + % (self.subtype, UUID_REPRESENTATION_NAMES[uuid_representation]) + ) @property def subtype(self) -> int: - """Subtype of this binary data. - """ + """Subtype of this binary data.""" return self.__subtype def __getnewargs__(self) -> Tuple[bytes, int]: # type: ignore[override] # Work around http://bugs.python.org/issue7382 data = super(Binary, self).__getnewargs__()[0] if not isinstance(data, bytes): - data = data.encode('latin-1') + data = data.encode("latin-1") return data, self.__subtype - def __eq__(self, other : Any) -> bool: + def __eq__(self, other: Any) -> bool: if isinstance(other, Binary): - return ((self.__subtype, bytes(self)) == - (other.subtype, bytes(other))) + return (self.__subtype, bytes(self)) == (other.subtype, bytes(other)) # We don't return NotImplemented here because if we did then # Binary("foo") == "foo" would return True, since Binary is a # subclass of str... diff --git a/bson/code.py b/bson/code.py index 6f4b1838d8..b732e82469 100644 --- a/bson/code.py +++ b/bson/code.py @@ -50,7 +50,12 @@ class Code(str): _type_marker = 13 __scope: Union[Mapping[str, Any], None] - def __new__(cls: Type["Code"], code: Union[str, "Code"], scope: Optional[Mapping[str, Any]] = None, **kwargs: Any) -> "Code": + def __new__( + cls: Type["Code"], + code: Union[str, "Code"], + scope: Optional[Mapping[str, Any]] = None, + **kwargs: Any + ) -> "Code": if not isinstance(code, str): raise TypeError("code must be an instance of str") @@ -79,8 +84,7 @@ def __new__(cls: Type["Code"], code: Union[str, "Code"], scope: Optional[Mapping @property def scope(self) -> Optional[Mapping[str, Any]]: - """Scope dictionary for this instance or ``None``. - """ + """Scope dictionary for this instance or ``None``.""" return self.__scope def __repr__(self): diff --git a/bson/codec_options.py b/bson/codec_options.py index 27df48de8a..b43a0275d8 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -18,11 +18,26 @@ import datetime from collections import namedtuple from collections.abc import MutableMapping as _MutableMapping -from typing import (TYPE_CHECKING, Any, Callable, Dict, Generic, Iterable, - MutableMapping, Optional, Type, TypeVar, Union, cast) - -from bson.binary import (ALL_UUID_REPRESENTATIONS, UUID_REPRESENTATION_NAMES, - UuidRepresentation) +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Generic, + Iterable, + MutableMapping, + Optional, + Type, + TypeVar, + Union, + cast, +) + +from bson.binary import ( + ALL_UUID_REPRESENTATIONS, + UUID_REPRESENTATION_NAMES, + UuidRepresentation, +) # Import RawBSONDocument for type-checking only to avoid circular dependency. if TYPE_CHECKING: @@ -32,12 +47,13 @@ def _abstractproperty(func: Callable[..., Any]) -> property: return property(abc.abstractmethod(func)) + _RAW_BSON_DOCUMENT_MARKER = 101 def _raw_document_class(document_class: Any) -> bool: """Determine if a document_class is a RawBSONDocument class.""" - marker = getattr(document_class, '_type_marker', None) + marker = getattr(document_class, "_type_marker", None) return marker == _RAW_BSON_DOCUMENT_MARKER @@ -50,6 +66,7 @@ class TypeEncoder(abc.ABC): See :ref:`custom-type-type-codec` documentation for an example. """ + @_abstractproperty def python_type(self) -> Any: """The Python type to be converted into something serializable.""" @@ -70,6 +87,7 @@ class TypeDecoder(abc.ABC): See :ref:`custom-type-type-codec` documentation for an example. """ + @_abstractproperty def bson_type(self) -> Any: """The BSON type to be converted into our own type.""" @@ -93,12 +111,14 @@ class TypeCodec(TypeEncoder, TypeDecoder): See :ref:`custom-type-type-codec` documentation for an example. """ + pass _Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] _Fallback = Callable[[Any], Any] + class TypeRegistry(object): """Encapsulates type codecs used in encoding and / or decoding BSON, as well as the fallback encoder. Type registries cannot be modified after @@ -125,7 +145,12 @@ class TypeRegistry(object): :mod:`bson` can encode. See :ref:`fallback-encoder-callable` documentation for an example. """ - def __init__(self, type_codecs: Optional[Iterable[_Codec]] = None, fallback_encoder: Optional[_Fallback] = None) -> None: + + def __init__( + self, + type_codecs: Optional[Iterable[_Codec]] = None, + fallback_encoder: Optional[_Fallback] = None, + ) -> None: self.__type_codecs = list(type_codecs or []) self._fallback_encoder = fallback_encoder self._encoder_map = {} @@ -133,8 +158,7 @@ def __init__(self, type_codecs: Optional[Iterable[_Codec]] = None, fallback_enco if self._fallback_encoder is not None: if not callable(fallback_encoder): - raise TypeError("fallback_encoder %r is not a callable" % ( - fallback_encoder)) + raise TypeError("fallback_encoder %r is not a callable" % (fallback_encoder)) for codec in self.__type_codecs: is_valid_codec = False @@ -147,36 +171,49 @@ def __init__(self, type_codecs: Optional[Iterable[_Codec]] = None, fallback_enco self._decoder_map[codec.bson_type] = codec.transform_bson if not is_valid_codec: raise TypeError( - "Expected an instance of %s, %s, or %s, got %r instead" % ( - TypeEncoder.__name__, TypeDecoder.__name__, - TypeCodec.__name__, codec)) + "Expected an instance of %s, %s, or %s, got %r instead" + % (TypeEncoder.__name__, TypeDecoder.__name__, TypeCodec.__name__, codec) + ) def _validate_type_encoder(self, codec: _Codec) -> None: from bson import _BUILT_IN_TYPES + for pytype in _BUILT_IN_TYPES: if issubclass(cast(TypeCodec, codec).python_type, pytype): - err_msg = ("TypeEncoders cannot change how built-in types are " - "encoded (encoder %s transforms type %s)" % - (codec, pytype)) + err_msg = ( + "TypeEncoders cannot change how built-in types are " + "encoded (encoder %s transforms type %s)" % (codec, pytype) + ) raise TypeError(err_msg) def __repr__(self): - return ('%s(type_codecs=%r, fallback_encoder=%r)' % ( - self.__class__.__name__, self.__type_codecs, - self._fallback_encoder)) + return "%s(type_codecs=%r, fallback_encoder=%r)" % ( + self.__class__.__name__, + self.__type_codecs, + self._fallback_encoder, + ) def __eq__(self, other: Any) -> Any: if not isinstance(other, type(self)): return NotImplemented - return ((self._decoder_map == other._decoder_map) and - (self._encoder_map == other._encoder_map) and - (self._fallback_encoder == other._fallback_encoder)) + return ( + (self._decoder_map == other._decoder_map) + and (self._encoder_map == other._encoder_map) + and (self._fallback_encoder == other._fallback_encoder) + ) _options_base = namedtuple( # type: ignore - 'CodecOptions', - ('document_class', 'tz_aware', 'uuid_representation', - 'unicode_decode_error_handler', 'tzinfo', 'type_registry')) + "CodecOptions", + ( + "document_class", + "tz_aware", + "uuid_representation", + "unicode_decode_error_handler", + "tzinfo", + "type_registry", + ), +) class CodecOptions(_options_base): @@ -255,32 +292,35 @@ class CodecOptions(_options_base): retrieved from the server will be modified in the client application and stored back to the server. """ - def __new__(cls: Type["CodecOptions"], document_class: Union[Type[MutableMapping], Type["RawBSONDocument"]] = dict, - tz_aware: bool = False, - uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, - unicode_decode_error_handler: Optional[str] = "strict", - tzinfo: Optional[datetime.tzinfo] = None, - type_registry: Optional[TypeRegistry] = None) -> "CodecOptions": - if not (issubclass(document_class, _MutableMapping) or - _raw_document_class(document_class)): - raise TypeError("document_class must be dict, bson.son.SON, " - "bson.raw_bson.RawBSONDocument, or a " - "sublass of collections.abc.MutableMapping") + + def __new__( + cls: Type["CodecOptions"], + document_class: Union[Type[MutableMapping], Type["RawBSONDocument"]] = dict, + tz_aware: bool = False, + uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, + unicode_decode_error_handler: Optional[str] = "strict", + tzinfo: Optional[datetime.tzinfo] = None, + type_registry: Optional[TypeRegistry] = None, + ) -> "CodecOptions": + if not (issubclass(document_class, _MutableMapping) or _raw_document_class(document_class)): + raise TypeError( + "document_class must be dict, bson.son.SON, " + "bson.raw_bson.RawBSONDocument, or a " + "sublass of collections.abc.MutableMapping" + ) if not isinstance(tz_aware, bool): raise TypeError("tz_aware must be True or False") if uuid_representation not in ALL_UUID_REPRESENTATIONS: - raise ValueError("uuid_representation must be a value " - "from bson.binary.UuidRepresentation") + raise ValueError( + "uuid_representation must be a value " "from bson.binary.UuidRepresentation" + ) if not isinstance(unicode_decode_error_handler, (str, None)): # type: ignore - raise ValueError("unicode_decode_error_handler must be a string " - "or None") + raise ValueError("unicode_decode_error_handler must be a string " "or None") if tzinfo is not None: if not isinstance(tzinfo, datetime.tzinfo): - raise TypeError( - "tzinfo must be an instance of datetime.tzinfo") + raise TypeError("tzinfo must be an instance of datetime.tzinfo") if not tz_aware: - raise ValueError( - "cannot specify tzinfo without also setting tz_aware=True") + raise ValueError("cannot specify tzinfo without also setting tz_aware=True") type_registry = type_registry or TypeRegistry() @@ -288,38 +328,53 @@ def __new__(cls: Type["CodecOptions"], document_class: Union[Type[MutableMapping raise TypeError("type_registry must be an instance of TypeRegistry") return tuple.__new__( - cls, (document_class, tz_aware, uuid_representation, - unicode_decode_error_handler, tzinfo, type_registry)) + cls, + ( + document_class, + tz_aware, + uuid_representation, + unicode_decode_error_handler, + tzinfo, + type_registry, + ), + ) def _arguments_repr(self) -> str: """Representation of the arguments used to create this object.""" - document_class_repr = ( - 'dict' if self.document_class is dict - else repr(self.document_class)) - - uuid_rep_repr = UUID_REPRESENTATION_NAMES.get(self.uuid_representation, - self.uuid_representation) - - return ('document_class=%s, tz_aware=%r, uuid_representation=%s, ' - 'unicode_decode_error_handler=%r, tzinfo=%r, ' - 'type_registry=%r' % - (document_class_repr, self.tz_aware, uuid_rep_repr, - self.unicode_decode_error_handler, self.tzinfo, - self.type_registry)) + document_class_repr = "dict" if self.document_class is dict else repr(self.document_class) + + uuid_rep_repr = UUID_REPRESENTATION_NAMES.get( + self.uuid_representation, self.uuid_representation + ) + + return ( + "document_class=%s, tz_aware=%r, uuid_representation=%s, " + "unicode_decode_error_handler=%r, tzinfo=%r, " + "type_registry=%r" + % ( + document_class_repr, + self.tz_aware, + uuid_rep_repr, + self.unicode_decode_error_handler, + self.tzinfo, + self.type_registry, + ) + ) def _options_dict(self) -> Dict[str, Any]: """Dictionary of the arguments used to create this object.""" # TODO: PYTHON-2442 use _asdict() instead return { - 'document_class': self.document_class, - 'tz_aware': self.tz_aware, - 'uuid_representation': self.uuid_representation, - 'unicode_decode_error_handler': self.unicode_decode_error_handler, - 'tzinfo': self.tzinfo, - 'type_registry': self.type_registry} + "document_class": self.document_class, + "tz_aware": self.tz_aware, + "uuid_representation": self.uuid_representation, + "unicode_decode_error_handler": self.unicode_decode_error_handler, + "tzinfo": self.tzinfo, + "type_registry": self.type_registry, + } def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, self._arguments_repr()) + return "%s(%s)" % (self.__class__.__name__, self._arguments_repr()) def with_options(self, **kwargs: Any) -> "CodecOptions": """Make a copy of this CodecOptions, overriding some options:: @@ -344,12 +399,16 @@ def with_options(self, **kwargs: Any) -> "CodecOptions": def _parse_codec_options(options: Any) -> CodecOptions: """Parse BSON codec options.""" kwargs = {} - for k in set(options) & {'document_class', 'tz_aware', - 'uuidrepresentation', - 'unicode_decode_error_handler', 'tzinfo', - 'type_registry'}: - if k == 'uuidrepresentation': - kwargs['uuid_representation'] = options[k] + for k in set(options) & { + "document_class", + "tz_aware", + "uuidrepresentation", + "unicode_decode_error_handler", + "tzinfo", + "type_registry", + }: + if k == "uuidrepresentation": + kwargs["uuid_representation"] = options[k] else: kwargs[k] = options[k] return CodecOptions(**kwargs) diff --git a/bson/dbref.py b/bson/dbref.py index 92a3a68367..773c95f59d 100644 --- a/bson/dbref.py +++ b/bson/dbref.py @@ -22,15 +22,22 @@ class DBRef(object): - """A reference to a document stored in MongoDB. - """ + """A reference to a document stored in MongoDB.""" + __slots__ = "__collection", "__id", "__database", "__kwargs" __getstate__ = _getstate_slots __setstate__ = _setstate_slots # DBRef isn't actually a BSON "type" so this number was arbitrarily chosen. _type_marker = 100 - def __init__(self, collection: str, id: Any, database: Optional[str] = None, _extra: Mapping[str, Any] = {}, **kwargs: Any) -> None: + def __init__( + self, + collection: str, + id: Any, + database: Optional[str] = None, + _extra: Mapping[str, Any] = {}, + **kwargs: Any + ) -> None: """Initialize a new :class:`DBRef`. Raises :class:`TypeError` if `collection` or `database` is not @@ -61,14 +68,12 @@ def __init__(self, collection: str, id: Any, database: Optional[str] = None, _ex @property def collection(self) -> str: - """Get the name of this DBRef's collection. - """ + """Get the name of this DBRef's collection.""" return self.__collection @property def id(self) -> Any: - """Get this DBRef's _id. - """ + """Get this DBRef's _id.""" return self.__id @property @@ -90,27 +95,22 @@ def as_doc(self) -> SON[str, Any]: Generally not needed by application developers """ - doc = SON([("$ref", self.collection), - ("$id", self.id)]) + doc = SON([("$ref", self.collection), ("$id", self.id)]) if self.database is not None: doc["$db"] = self.database doc.update(self.__kwargs) return doc def __repr__(self): - extra = "".join([", %s=%r" % (k, v) - for k, v in self.__kwargs.items()]) + extra = "".join([", %s=%r" % (k, v) for k, v in self.__kwargs.items()]) if self.database is None: return "DBRef(%r, %r%s)" % (self.collection, self.id, extra) - return "DBRef(%r, %r, %r%s)" % (self.collection, self.id, - self.database, extra) + return "DBRef(%r, %r, %r%s)" % (self.collection, self.id, self.database, extra) def __eq__(self, other: Any) -> bool: if isinstance(other, DBRef): - us = (self.__database, self.__collection, - self.__id, self.__kwargs) - them = (other.__database, other.__collection, - other.__id, other.__kwargs) + us = (self.__database, self.__collection, self.__id, self.__kwargs) + them = (other.__database, other.__collection, other.__id, other.__kwargs) return us == them return NotImplemented @@ -119,12 +119,15 @@ def __ne__(self, other: Any) -> bool: def __hash__(self) -> int: """Get a hash value for this :class:`DBRef`.""" - return hash((self.__collection, self.__id, self.__database, - tuple(sorted(self.__kwargs.items())))) + return hash( + (self.__collection, self.__id, self.__database, tuple(sorted(self.__kwargs.items()))) + ) def __deepcopy__(self, memo: Any) -> "DBRef": """Support function for `copy.deepcopy()`.""" - return DBRef(deepcopy(self.__collection, memo), - deepcopy(self.__id, memo), - deepcopy(self.__database, memo), - deepcopy(self.__kwargs, memo)) + return DBRef( + deepcopy(self.__collection, memo), + deepcopy(self.__id, memo), + deepcopy(self.__database, memo), + deepcopy(self.__kwargs, memo), + ) diff --git a/bson/decimal128.py b/bson/decimal128.py index bbf5d326e4..ab2d1a24ac 100644 --- a/bson/decimal128.py +++ b/bson/decimal128.py @@ -31,8 +31,8 @@ _MAX_DIGITS = 34 _INF = 0x7800000000000000 -_NAN = 0x7c00000000000000 -_SNAN = 0x7e00000000000000 +_NAN = 0x7C00000000000000 +_SNAN = 0x7E00000000000000 _SIGN = 0x8000000000000000 _NINF = (_INF + _SIGN, 0) @@ -43,16 +43,14 @@ _PSNAN = (_SNAN, 0) _CTX_OPTIONS = { - 'prec': _MAX_DIGITS, - 'rounding': decimal.ROUND_HALF_EVEN, - 'Emin': _EXPONENT_MIN, - 'Emax': _EXPONENT_MAX, - 'capitals': 1, - 'flags': [], - 'traps': [decimal.InvalidOperation, - decimal.Overflow, - decimal.Inexact], - 'clamp': 1 + "prec": _MAX_DIGITS, + "rounding": decimal.ROUND_HALF_EVEN, + "Emin": _EXPONENT_MIN, + "Emax": _EXPONENT_MAX, + "capitals": 1, + "flags": [], + "traps": [decimal.InvalidOperation, decimal.Overflow, decimal.Inexact], + "clamp": 1, } _DEC128_CTX = decimal.Context(**_CTX_OPTIONS.copy()) # type: ignore @@ -64,7 +62,7 @@ def create_decimal128_context() -> decimal.Context: for working with IEEE-754 128-bit decimal floating point values. """ opts = _CTX_OPTIONS.copy() - opts['traps'] = [] + opts["traps"] = [] return decimal.Context(**opts) # type: ignore @@ -105,9 +103,9 @@ def _decimal_to_128(value: _VALUE_OPTIONS) -> Tuple[int, int]: biased_exponent = exponent + _EXPONENT_BIAS if high >> 49 == 1: - high = high & 0x7fffffffffff + high = high & 0x7FFFFFFFFFFF high |= _EXPONENT_MASK - high |= (biased_exponent & 0x3fff) << 47 + high |= (biased_exponent & 0x3FFF) << 47 else: high |= biased_exponent << 49 @@ -211,7 +209,8 @@ class Decimal128(object): >>> Decimal('NaN') == Decimal('NaN') False """ - __slots__ = ('__high', '__low') + + __slots__ = ("__high", "__low") _type_marker = 19 @@ -220,9 +219,11 @@ def __init__(self, value: _VALUE_OPTIONS) -> None: self.__high, self.__low = _decimal_to_128(value) elif isinstance(value, (list, tuple)): if len(value) != 2: - raise ValueError('Invalid size for creation of Decimal128 ' - 'from list or tuple. Must have exactly 2 ' - 'elements.') + raise ValueError( + "Invalid size for creation of Decimal128 " + "from list or tuple. Must have exactly 2 " + "elements." + ) self.__high, self.__low = value # type: ignore else: raise TypeError("Cannot convert %r to Decimal128" % (value,)) @@ -236,25 +237,25 @@ def to_decimal(self) -> decimal.Decimal: sign = 1 if (high & _SIGN) else 0 if (high & _SNAN) == _SNAN: - return decimal.Decimal((sign, (), 'N')) # type: ignore + return decimal.Decimal((sign, (), "N")) # type: ignore elif (high & _NAN) == _NAN: - return decimal.Decimal((sign, (), 'n')) # type: ignore + return decimal.Decimal((sign, (), "n")) # type: ignore elif (high & _INF) == _INF: - return decimal.Decimal((sign, (), 'F')) # type: ignore + return decimal.Decimal((sign, (), "F")) # type: ignore if (high & _EXPONENT_MASK) == _EXPONENT_MASK: - exponent = ((high & 0x1fffe00000000000) >> 47) - _EXPONENT_BIAS + exponent = ((high & 0x1FFFE00000000000) >> 47) - _EXPONENT_BIAS return decimal.Decimal((sign, (0,), exponent)) else: - exponent = ((high & 0x7fff800000000000) >> 49) - _EXPONENT_BIAS + exponent = ((high & 0x7FFF800000000000) >> 49) - _EXPONENT_BIAS arr = bytearray(15) - mask = 0x00000000000000ff + mask = 0x00000000000000FF for i in range(14, 6, -1): arr[i] = (low & mask) >> ((14 - i) << 3) mask = mask << 8 - mask = 0x00000000000000ff + mask = 0x00000000000000FF for i in range(6, 0, -1): arr[i] = (high & mask) >> ((6 - i) << 3) mask = mask << 8 @@ -263,8 +264,7 @@ def to_decimal(self) -> decimal.Decimal: arr[0] = (high & mask) >> 48 # cdecimal only accepts a tuple for digits. - digits = tuple( - int(digit) for digit in str(int.from_bytes(arr, 'big'))) + digits = tuple(int(digit) for digit in str(int.from_bytes(arr, "big"))) with decimal.localcontext(_DEC128_CTX) as ctx: return ctx.create_decimal((sign, digits, exponent)) diff --git a/bson/errors.py b/bson/errors.py index 9bdb741371..7333b27b58 100644 --- a/bson/errors.py +++ b/bson/errors.py @@ -16,25 +16,20 @@ class BSONError(Exception): - """Base class for all BSON exceptions. - """ + """Base class for all BSON exceptions.""" class InvalidBSON(BSONError): - """Raised when trying to create a BSON object from invalid data. - """ + """Raised when trying to create a BSON object from invalid data.""" class InvalidStringData(BSONError): - """Raised when trying to encode a string containing non-UTF8 data. - """ + """Raised when trying to encode a string containing non-UTF8 data.""" class InvalidDocument(BSONError): - """Raised when trying to create a BSON object from an invalid document. - """ + """Raised when trying to create a BSON object from an invalid document.""" class InvalidId(BSONError): - """Raised when trying to create an ObjectId from invalid data. - """ + """Raised when trying to create an ObjectId from invalid data.""" diff --git a/bson/int64.py b/bson/int64.py index f1424c8812..ed4dfa5661 100644 --- a/bson/int64.py +++ b/bson/int64.py @@ -27,6 +27,7 @@ class Int64(int): :Parameters: - `value`: the numeric value to represent """ + __slots__ = () _type_marker = 18 diff --git a/bson/json_util.py b/bson/json_util.py index d7f501f120..3cdf701f70 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -92,13 +92,11 @@ import math import re import uuid -from typing import (Any, Dict, Mapping, Optional, Sequence, Tuple, Type, Union, - cast) +from typing import Any, Dict, Mapping, Optional, Sequence, Tuple, Type, Union, cast import bson from bson import EPOCH_AWARE -from bson.binary import (ALL_UUID_SUBTYPES, UUID_SUBTYPE, Binary, - UuidRepresentation) +from bson.binary import ALL_UUID_SUBTYPES, UUID_SUBTYPE, Binary, UuidRepresentation from bson.code import Code from bson.codec_options import CodecOptions from bson.dbref import DBRef @@ -248,64 +246,64 @@ class JSONOptions(CodecOptions): .. versionchanged:: 4.0 Changed default value of `tz_aware` to False. """ + json_mode: int strict_number_long: bool datetime_representation: int strict_uuid: bool - def __new__(cls: Type["JSONOptions"], - strict_number_long: Optional[bool] = None, - datetime_representation: Optional[int] = None, - strict_uuid: Optional[bool] = None, - json_mode: int = JSONMode.RELAXED, - *args: Any, **kwargs: Any) -> "JSONOptions": + def __new__( + cls: Type["JSONOptions"], + strict_number_long: Optional[bool] = None, + datetime_representation: Optional[int] = None, + strict_uuid: Optional[bool] = None, + json_mode: int = JSONMode.RELAXED, + *args: Any, + **kwargs: Any + ) -> "JSONOptions": kwargs["tz_aware"] = kwargs.get("tz_aware", False) if kwargs["tz_aware"]: kwargs["tzinfo"] = kwargs.get("tzinfo", utc) - if datetime_representation not in (DatetimeRepresentation.LEGACY, - DatetimeRepresentation.NUMBERLONG, - DatetimeRepresentation.ISO8601, - None): + if datetime_representation not in ( + DatetimeRepresentation.LEGACY, + DatetimeRepresentation.NUMBERLONG, + DatetimeRepresentation.ISO8601, + None, + ): raise ValueError( "JSONOptions.datetime_representation must be one of LEGACY, " - "NUMBERLONG, or ISO8601 from DatetimeRepresentation.") + "NUMBERLONG, or ISO8601 from DatetimeRepresentation." + ) self = cast(JSONOptions, super(JSONOptions, cls).__new__(cls, *args, **kwargs)) - if json_mode not in (JSONMode.LEGACY, - JSONMode.RELAXED, - JSONMode.CANONICAL): + if json_mode not in (JSONMode.LEGACY, JSONMode.RELAXED, JSONMode.CANONICAL): raise ValueError( "JSONOptions.json_mode must be one of LEGACY, RELAXED, " - "or CANONICAL from JSONMode.") + "or CANONICAL from JSONMode." + ) self.json_mode = json_mode if self.json_mode == JSONMode.RELAXED: if strict_number_long: - raise ValueError( - "Cannot specify strict_number_long=True with" - " JSONMode.RELAXED") - if datetime_representation not in (None, - DatetimeRepresentation.ISO8601): + raise ValueError("Cannot specify strict_number_long=True with" " JSONMode.RELAXED") + if datetime_representation not in (None, DatetimeRepresentation.ISO8601): raise ValueError( "datetime_representation must be DatetimeRepresentation." - "ISO8601 or omitted with JSONMode.RELAXED") + "ISO8601 or omitted with JSONMode.RELAXED" + ) if strict_uuid not in (None, True): - raise ValueError( - "Cannot specify strict_uuid=False with JSONMode.RELAXED") + raise ValueError("Cannot specify strict_uuid=False with JSONMode.RELAXED") self.strict_number_long = False self.datetime_representation = DatetimeRepresentation.ISO8601 self.strict_uuid = True elif self.json_mode == JSONMode.CANONICAL: if strict_number_long not in (None, True): - raise ValueError( - "Cannot specify strict_number_long=False with" - " JSONMode.RELAXED") - if datetime_representation not in ( - None, DatetimeRepresentation.NUMBERLONG): + raise ValueError("Cannot specify strict_number_long=False with" " JSONMode.RELAXED") + if datetime_representation not in (None, DatetimeRepresentation.NUMBERLONG): raise ValueError( "datetime_representation must be DatetimeRepresentation." - "NUMBERLONG or omitted with JSONMode.RELAXED") + "NUMBERLONG or omitted with JSONMode.RELAXED" + ) if strict_uuid not in (None, True): - raise ValueError( - "Cannot specify strict_uuid=False with JSONMode.RELAXED") + raise ValueError("Cannot specify strict_uuid=False with JSONMode.RELAXED") self.strict_number_long = True self.datetime_representation = DatetimeRepresentation.NUMBERLONG self.strict_uuid = True @@ -322,23 +320,30 @@ def __new__(cls: Type["JSONOptions"], return self def _arguments_repr(self) -> str: - return ('strict_number_long=%r, ' - 'datetime_representation=%r, ' - 'strict_uuid=%r, json_mode=%r, %s' % ( - self.strict_number_long, - self.datetime_representation, - self.strict_uuid, - self.json_mode, - super(JSONOptions, self)._arguments_repr())) + return ( + "strict_number_long=%r, " + "datetime_representation=%r, " + "strict_uuid=%r, json_mode=%r, %s" + % ( + self.strict_number_long, + self.datetime_representation, + self.strict_uuid, + self.json_mode, + super(JSONOptions, self)._arguments_repr(), + ) + ) def _options_dict(self) -> Dict[Any, Any]: # TODO: PYTHON-2442 use _asdict() instead options_dict = super(JSONOptions, self)._options_dict() - options_dict.update({ - 'strict_number_long': self.strict_number_long, - 'datetime_representation': self.datetime_representation, - 'strict_uuid': self.strict_uuid, - 'json_mode': self.json_mode}) + options_dict.update( + { + "strict_number_long": self.strict_number_long, + "datetime_representation": self.datetime_representation, + "strict_uuid": self.strict_uuid, + "json_mode": self.json_mode, + } + ) return options_dict def with_options(self, **kwargs: Any) -> "JSONOptions": @@ -355,8 +360,7 @@ def with_options(self, **kwargs: Any) -> "JSONOptions": .. versionadded:: 3.12 """ opts = self._options_dict() - for opt in ('strict_number_long', 'datetime_representation', - 'strict_uuid', 'json_mode'): + for opt in ("strict_number_long", "datetime_representation", "strict_uuid", "json_mode"): opts[opt] = kwargs.get(opt, getattr(self, opt)) opts.update(kwargs) return JSONOptions(**opts) @@ -443,8 +447,7 @@ def loads(s: str, *args: Any, **kwargs: Any) -> Any: Accepts optional parameter `json_options`. See :class:`JSONOptions`. """ json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS) - kwargs["object_pairs_hook"] = lambda pairs: object_pairs_hook( - pairs, json_options) + kwargs["object_pairs_hook"] = lambda pairs: object_pairs_hook(pairs, json_options) return json.loads(s, *args, **kwargs) @@ -452,10 +455,9 @@ def _json_convert(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> """Recursive helper method that converts BSON types so they can be converted into json. """ - if hasattr(obj, 'items'): - return SON(((k, _json_convert(v, json_options)) - for k, v in obj.items())) - elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes)): + if hasattr(obj, "items"): + return SON(((k, _json_convert(v, json_options)) for k, v in obj.items())) + elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes)): return list((_json_convert(v, json_options) for v in obj)) try: return default(obj, json_options) @@ -463,16 +465,20 @@ def _json_convert(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> return obj -def object_pairs_hook(pairs: Sequence[Tuple[str, Any]], json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: +def object_pairs_hook( + pairs: Sequence[Tuple[str, Any]], json_options: JSONOptions = DEFAULT_JSON_OPTIONS +) -> Any: return object_hook(json_options.document_class(pairs), json_options) def object_hook(dct: Mapping[str, Any], json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: if "$oid" in dct: return _parse_canonical_oid(dct) - if (isinstance(dct.get('$ref'), str) and - "$id" in dct and - isinstance(dct.get('$db'), (str, type(None)))): + if ( + isinstance(dct.get("$ref"), str) + and "$id" in dct + and isinstance(dct.get("$db"), (str, type(None))) + ): return _parse_canonical_dbref(dct) if "$date" in dct: return _parse_canonical_datetime(dct, json_options) @@ -528,9 +534,9 @@ def _parse_legacy_regex(doc: Any) -> Any: def _parse_legacy_uuid(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: """Decode a JSON legacy $uuid to Python UUID.""" if len(doc) != 1: - raise TypeError('Bad $uuid, extra field(s): %s' % (doc,)) + raise TypeError("Bad $uuid, extra field(s): %s" % (doc,)) if not isinstance(doc["$uuid"], str): - raise TypeError('$uuid must be a string: %s' % (doc,)) + raise TypeError("$uuid must be a string: %s" % (doc,)) if json_options.uuid_representation == UuidRepresentation.UNSPECIFIED: return Binary.from_uuid(uuid.UUID(doc["$uuid"])) else: @@ -562,7 +568,7 @@ def _parse_legacy_binary(doc: Any, json_options: JSONOptions) -> Union[Binary, u if isinstance(doc["$type"], int): doc["$type"] = "%02x" % doc["$type"] subtype = int(doc["$type"], 16) - if subtype >= 0xffffff80: # Handle mongoexport values + if subtype >= 0xFFFFFF80: # Handle mongoexport values subtype = int(doc["$type"][6:], 16) data = base64.b64decode(doc["$binary"].encode()) return _binary_or_uuid(data, subtype, json_options) @@ -573,13 +579,13 @@ def _parse_canonical_binary(doc: Any, json_options: JSONOptions) -> Union[Binary b64 = binary["base64"] subtype = binary["subType"] if not isinstance(b64, str): - raise TypeError('$binary base64 must be a string: %s' % (doc,)) + raise TypeError("$binary base64 must be a string: %s" % (doc,)) if not isinstance(subtype, str) or len(subtype) > 2: - raise TypeError('$binary subType must be a string at most 2 ' - 'characters: %s' % (doc,)) + raise TypeError("$binary subType must be a string at most 2 " "characters: %s" % (doc,)) if len(binary) != 2: - raise TypeError('$binary must include only "base64" and "subType" ' - 'components: %s' % (doc,)) + raise TypeError( + '$binary must include only "base64" and "subType" ' "components: %s" % (doc,) + ) data = base64.b64decode(b64.encode()) return _binary_or_uuid(data, int(subtype, 16), json_options) @@ -589,46 +595,46 @@ def _parse_canonical_datetime(doc: Any, json_options: JSONOptions) -> datetime.d """Decode a JSON datetime to python datetime.datetime.""" dtm = doc["$date"] if len(doc) != 1: - raise TypeError('Bad $date, extra field(s): %s' % (doc,)) + raise TypeError("Bad $date, extra field(s): %s" % (doc,)) # mongoexport 2.6 and newer if isinstance(dtm, str): # Parse offset - if dtm[-1] == 'Z': + if dtm[-1] == "Z": dt = dtm[:-1] - offset = 'Z' - elif dtm[-6] in ('+', '-') and dtm[-3] == ':': + offset = "Z" + elif dtm[-6] in ("+", "-") and dtm[-3] == ":": # (+|-)HH:MM dt = dtm[:-6] offset = dtm[-6:] - elif dtm[-5] in ('+', '-'): + elif dtm[-5] in ("+", "-"): # (+|-)HHMM dt = dtm[:-5] offset = dtm[-5:] - elif dtm[-3] in ('+', '-'): + elif dtm[-3] in ("+", "-"): # (+|-)HH dt = dtm[:-3] offset = dtm[-3:] else: dt = dtm - offset = '' + offset = "" # Parse the optional factional seconds portion. - dot_index = dt.rfind('.') + dot_index = dt.rfind(".") microsecond = 0 if dot_index != -1: microsecond = int(float(dt[dot_index:]) * 1000000) dt = dt[:dot_index] - aware = datetime.datetime.strptime( - dt, "%Y-%m-%dT%H:%M:%S").replace(microsecond=microsecond, - tzinfo=utc) + aware = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S").replace( + microsecond=microsecond, tzinfo=utc + ) - if offset and offset != 'Z': + if offset and offset != "Z": if len(offset) == 6: - hours, minutes = offset[1:].split(':') - secs = (int(hours) * 3600 + int(minutes) * 60) + hours, minutes = offset[1:].split(":") + secs = int(hours) * 3600 + int(minutes) * 60 elif len(offset) == 5: - secs = (int(offset[1:3]) * 3600 + int(offset[3:]) * 60) + secs = int(offset[1:3]) * 3600 + int(offset[3:]) * 60 elif len(offset) == 3: secs = int(offset[1:3]) * 3600 if offset[0] == "-": @@ -647,133 +653,130 @@ def _parse_canonical_datetime(doc: Any, json_options: JSONOptions) -> datetime.d def _parse_canonical_oid(doc: Any) -> ObjectId: """Decode a JSON ObjectId to bson.objectid.ObjectId.""" if len(doc) != 1: - raise TypeError('Bad $oid, extra field(s): %s' % (doc,)) - return ObjectId(doc['$oid']) + raise TypeError("Bad $oid, extra field(s): %s" % (doc,)) + return ObjectId(doc["$oid"]) def _parse_canonical_symbol(doc: Any) -> str: """Decode a JSON symbol to Python string.""" - symbol = doc['$symbol'] + symbol = doc["$symbol"] if len(doc) != 1: - raise TypeError('Bad $symbol, extra field(s): %s' % (doc,)) + raise TypeError("Bad $symbol, extra field(s): %s" % (doc,)) return str(symbol) def _parse_canonical_code(doc: Any) -> Code: """Decode a JSON code to bson.code.Code.""" for key in doc: - if key not in ('$code', '$scope'): - raise TypeError('Bad $code, extra field(s): %s' % (doc,)) - return Code(doc['$code'], scope=doc.get('$scope')) + if key not in ("$code", "$scope"): + raise TypeError("Bad $code, extra field(s): %s" % (doc,)) + return Code(doc["$code"], scope=doc.get("$scope")) def _parse_canonical_regex(doc: Any) -> Regex: """Decode a JSON regex to bson.regex.Regex.""" - regex = doc['$regularExpression'] + regex = doc["$regularExpression"] if len(doc) != 1: - raise TypeError('Bad $regularExpression, extra field(s): %s' % (doc,)) + raise TypeError("Bad $regularExpression, extra field(s): %s" % (doc,)) if len(regex) != 2: - raise TypeError('Bad $regularExpression must include only "pattern"' - 'and "options" components: %s' % (doc,)) - opts = regex['options'] + raise TypeError( + 'Bad $regularExpression must include only "pattern"' + 'and "options" components: %s' % (doc,) + ) + opts = regex["options"] if not isinstance(opts, str): - raise TypeError('Bad $regularExpression options, options must be ' - 'string, was type %s' % (type(opts))) - return Regex(regex['pattern'], opts) + raise TypeError( + "Bad $regularExpression options, options must be " "string, was type %s" % (type(opts)) + ) + return Regex(regex["pattern"], opts) def _parse_canonical_dbref(doc: Any) -> DBRef: """Decode a JSON DBRef to bson.dbref.DBRef.""" - return DBRef(doc.pop('$ref'), doc.pop('$id'), - database=doc.pop('$db', None), **doc) + return DBRef(doc.pop("$ref"), doc.pop("$id"), database=doc.pop("$db", None), **doc) def _parse_canonical_dbpointer(doc: Any) -> Any: """Decode a JSON (deprecated) DBPointer to bson.dbref.DBRef.""" - dbref = doc['$dbPointer'] + dbref = doc["$dbPointer"] if len(doc) != 1: - raise TypeError('Bad $dbPointer, extra field(s): %s' % (doc,)) + raise TypeError("Bad $dbPointer, extra field(s): %s" % (doc,)) if isinstance(dbref, DBRef): dbref_doc = dbref.as_doc() # DBPointer must not contain $db in its value. if dbref.database is not None: - raise TypeError( - 'Bad $dbPointer, extra field $db: %s' % (dbref_doc,)) + raise TypeError("Bad $dbPointer, extra field $db: %s" % (dbref_doc,)) if not isinstance(dbref.id, ObjectId): - raise TypeError( - 'Bad $dbPointer, $id must be an ObjectId: %s' % (dbref_doc,)) + raise TypeError("Bad $dbPointer, $id must be an ObjectId: %s" % (dbref_doc,)) if len(dbref_doc) != 2: - raise TypeError( - 'Bad $dbPointer, extra field(s) in DBRef: %s' % (dbref_doc,)) + raise TypeError("Bad $dbPointer, extra field(s) in DBRef: %s" % (dbref_doc,)) return dbref else: - raise TypeError('Bad $dbPointer, expected a DBRef: %s' % (doc,)) + raise TypeError("Bad $dbPointer, expected a DBRef: %s" % (doc,)) def _parse_canonical_int32(doc: Any) -> int: """Decode a JSON int32 to python int.""" - i_str = doc['$numberInt'] + i_str = doc["$numberInt"] if len(doc) != 1: - raise TypeError('Bad $numberInt, extra field(s): %s' % (doc,)) + raise TypeError("Bad $numberInt, extra field(s): %s" % (doc,)) if not isinstance(i_str, str): - raise TypeError('$numberInt must be string: %s' % (doc,)) + raise TypeError("$numberInt must be string: %s" % (doc,)) return int(i_str) def _parse_canonical_int64(doc: Any) -> Int64: """Decode a JSON int64 to bson.int64.Int64.""" - l_str = doc['$numberLong'] + l_str = doc["$numberLong"] if len(doc) != 1: - raise TypeError('Bad $numberLong, extra field(s): %s' % (doc,)) + raise TypeError("Bad $numberLong, extra field(s): %s" % (doc,)) return Int64(l_str) def _parse_canonical_double(doc: Any) -> float: """Decode a JSON double to python float.""" - d_str = doc['$numberDouble'] + d_str = doc["$numberDouble"] if len(doc) != 1: - raise TypeError('Bad $numberDouble, extra field(s): %s' % (doc,)) + raise TypeError("Bad $numberDouble, extra field(s): %s" % (doc,)) if not isinstance(d_str, str): - raise TypeError('$numberDouble must be string: %s' % (doc,)) + raise TypeError("$numberDouble must be string: %s" % (doc,)) return float(d_str) def _parse_canonical_decimal128(doc: Any) -> Decimal128: """Decode a JSON decimal128 to bson.decimal128.Decimal128.""" - d_str = doc['$numberDecimal'] + d_str = doc["$numberDecimal"] if len(doc) != 1: - raise TypeError('Bad $numberDecimal, extra field(s): %s' % (doc,)) + raise TypeError("Bad $numberDecimal, extra field(s): %s" % (doc,)) if not isinstance(d_str, str): - raise TypeError('$numberDecimal must be string: %s' % (doc,)) + raise TypeError("$numberDecimal must be string: %s" % (doc,)) return Decimal128(d_str) def _parse_canonical_minkey(doc: Any) -> MinKey: """Decode a JSON MinKey to bson.min_key.MinKey.""" - if type(doc['$minKey']) is not int or doc['$minKey'] != 1: - raise TypeError('$minKey value must be 1: %s' % (doc,)) + if type(doc["$minKey"]) is not int or doc["$minKey"] != 1: + raise TypeError("$minKey value must be 1: %s" % (doc,)) if len(doc) != 1: - raise TypeError('Bad $minKey, extra field(s): %s' % (doc,)) + raise TypeError("Bad $minKey, extra field(s): %s" % (doc,)) return MinKey() def _parse_canonical_maxkey(doc: Any) -> MaxKey: """Decode a JSON MaxKey to bson.max_key.MaxKey.""" - if type(doc['$maxKey']) is not int or doc['$maxKey'] != 1: - raise TypeError('$maxKey value must be 1: %s', (doc,)) + if type(doc["$maxKey"]) is not int or doc["$maxKey"] != 1: + raise TypeError("$maxKey value must be 1: %s", (doc,)) if len(doc) != 1: - raise TypeError('Bad $minKey, extra field(s): %s' % (doc,)) + raise TypeError("Bad $minKey, extra field(s): %s" % (doc,)) return MaxKey() def _encode_binary(data: bytes, subtype: int, json_options: JSONOptions) -> Any: if json_options.json_mode == JSONMode.LEGACY: - return SON([ - ('$binary', base64.b64encode(data).decode()), - ('$type', "%02x" % subtype)]) - return {'$binary': SON([ - ('base64', base64.b64encode(data).decode()), - ('subType', "%02x" % subtype)])} + return SON([("$binary", base64.b64encode(data).decode()), ("$type", "%02x" % subtype)]) + return { + "$binary": SON([("base64", base64.b64encode(data).decode()), ("subType", "%02x" % subtype)]) + } def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: @@ -784,25 +787,24 @@ def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: if isinstance(obj, DBRef): return _json_convert(obj.as_doc(), json_options=json_options) if isinstance(obj, datetime.datetime): - if (json_options.datetime_representation == - DatetimeRepresentation.ISO8601): + if json_options.datetime_representation == DatetimeRepresentation.ISO8601: if not obj.tzinfo: obj = obj.replace(tzinfo=utc) assert obj.tzinfo is not None if obj >= EPOCH_AWARE: off = obj.tzinfo.utcoffset(obj) if (off.days, off.seconds, off.microseconds) == (0, 0, 0): # type: ignore - tz_string = 'Z' + tz_string = "Z" else: - tz_string = obj.strftime('%z') + tz_string = obj.strftime("%z") millis = int(obj.microsecond / 1000) fracsecs = ".%03d" % (millis,) if millis else "" - return {"$date": "%s%s%s" % ( - obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string)} + return { + "$date": "%s%s%s" % (obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string) + } millis = bson._datetime_to_millis(obj) - if (json_options.datetime_representation == - DatetimeRepresentation.LEGACY): + if json_options.datetime_representation == DatetimeRepresentation.LEGACY: return {"$date": millis} return {"$date": {"$numberLong": str(millis)}} if json_options.strict_number_long and isinstance(obj, Int64): @@ -824,11 +826,10 @@ def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: if isinstance(obj.pattern, str): pattern = obj.pattern else: - pattern = obj.pattern.decode('utf-8') + pattern = obj.pattern.decode("utf-8") if json_options.json_mode == JSONMode.LEGACY: return SON([("$regex", pattern), ("$options", flags)]) - return {'$regularExpression': SON([("pattern", pattern), - ("options", flags)])} + return {"$regularExpression": SON([("pattern", pattern), ("options", flags)])} if isinstance(obj, MinKey): return {"$minKey": 1} if isinstance(obj, MaxKey): @@ -837,18 +838,15 @@ def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: return {"$timestamp": SON([("t", obj.time), ("i", obj.inc)])} if isinstance(obj, Code): if obj.scope is None: - return {'$code': str(obj)} - return SON([ - ('$code', str(obj)), - ('$scope', _json_convert(obj.scope, json_options))]) + return {"$code": str(obj)} + return SON([("$code", str(obj)), ("$scope", _json_convert(obj.scope, json_options))]) if isinstance(obj, Binary): return _encode_binary(obj, obj.subtype, json_options) if isinstance(obj, bytes): return _encode_binary(obj, 0, json_options) if isinstance(obj, uuid.UUID): if json_options.strict_uuid: - binval = Binary.from_uuid( - obj, uuid_representation=json_options.uuid_representation) + binval = Binary.from_uuid(obj, uuid_representation=json_options.uuid_representation) return _encode_binary(binval, binval.subtype, json_options) else: return {"$uuid": obj.hex} @@ -856,19 +854,18 @@ def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: return {"$numberDecimal": str(obj)} if isinstance(obj, bool): return obj - if (json_options.json_mode == JSONMode.CANONICAL and - isinstance(obj, int)): - if -2 ** 31 <= obj < 2 ** 31: - return {'$numberInt': str(obj)} - return {'$numberLong': str(obj)} + if json_options.json_mode == JSONMode.CANONICAL and isinstance(obj, int): + if -(2**31) <= obj < 2**31: + return {"$numberInt": str(obj)} + return {"$numberLong": str(obj)} if json_options.json_mode != JSONMode.LEGACY and isinstance(obj, float): if math.isnan(obj): - return {'$numberDouble': 'NaN'} + return {"$numberDouble": "NaN"} elif math.isinf(obj): - representation = 'Infinity' if obj > 0 else '-Infinity' - return {'$numberDouble': representation} + representation = "Infinity" if obj > 0 else "-Infinity" + return {"$numberDouble": representation} elif json_options.json_mode == JSONMode.CANONICAL: # repr() will return the shortest string guaranteed to produce the # original value, when float() is called on it. - return {'$numberDouble': str(repr(obj))} + return {"$numberDouble": str(repr(obj))} raise TypeError("%r is not JSON serializable" % obj) diff --git a/bson/max_key.py b/bson/max_key.py index 107dc9dec6..b4f38d072e 100644 --- a/bson/max_key.py +++ b/bson/max_key.py @@ -19,6 +19,7 @@ class MaxKey(object): """MongoDB internal MaxKey type.""" + __slots__ = () _type_marker = 127 diff --git a/bson/min_key.py b/bson/min_key.py index 5483eb6cf8..babc655e43 100644 --- a/bson/min_key.py +++ b/bson/min_key.py @@ -19,6 +19,7 @@ class MinKey(object): """MongoDB internal MinKey type.""" + __slots__ = () _type_marker = 255 diff --git a/bson/objectid.py b/bson/objectid.py index baf1966bce..9ad3ed60be 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -35,7 +35,8 @@ def _raise_invalid_id(oid: str) -> NoReturn: raise InvalidId( "%r is not a valid ObjectId, it must be a 12-byte input" - " or a 24-character hex string" % oid) + " or a 24-character hex string" % oid + ) def _random_bytes() -> bytes: @@ -44,8 +45,7 @@ def _random_bytes() -> bytes: class ObjectId(object): - """A MongoDB ObjectId. - """ + """A MongoDB ObjectId.""" _pid = os.getpid() @@ -54,7 +54,7 @@ class ObjectId(object): __random = _random_bytes() - __slots__ = ('__id',) + __slots__ = ("__id",) _type_marker = 7 @@ -131,12 +131,11 @@ def from_datetime(cls: Type["ObjectId"], generation_time: datetime.datetime) -> - `generation_time`: :class:`~datetime.datetime` to be used as the generation time for the resulting ObjectId. """ - offset = generation_time.utcoffset() + offset = generation_time.utcoffset() if offset is not None: generation_time = generation_time - offset timestamp = calendar.timegm(generation_time.timetuple()) - oid = struct.pack( - ">I", int(timestamp)) + b"\x00\x00\x00\x00\x00\x00\x00\x00" + oid = struct.pack(">I", int(timestamp)) + b"\x00\x00\x00\x00\x00\x00\x00\x00" return cls(oid) @classmethod @@ -159,8 +158,7 @@ def is_valid(cls: Type["ObjectId"], oid: Any) -> bool: @classmethod def _random(cls) -> bytes: - """Generate a 5-byte random number once per process. - """ + """Generate a 5-byte random number once per process.""" pid = os.getpid() if pid != cls._pid: cls._pid = pid @@ -168,8 +166,7 @@ def _random(cls) -> bytes: return cls.__random def __generate(self) -> None: - """Generate a new value for this ObjectId. - """ + """Generate a new value for this ObjectId.""" # 4 bytes current time oid = struct.pack(">I", int(time.time())) @@ -206,13 +203,13 @@ def __validate(self, oid: Any) -> None: else: _raise_invalid_id(oid) else: - raise TypeError("id must be an instance of (bytes, str, ObjectId), " - "not %s" % (type(oid),)) + raise TypeError( + "id must be an instance of (bytes, str, ObjectId), " "not %s" % (type(oid),) + ) @property def binary(self) -> bytes: - """12-byte binary representation of this ObjectId. - """ + """12-byte binary representation of this ObjectId.""" return self.__id @property @@ -234,8 +231,7 @@ def __getstate__(self) -> bytes: return self.__id def __setstate__(self, value: Any) -> None: - """explicit state set from pickling - """ + """explicit state set from pickling""" # Provide backwards compatability with OIDs # pickled with pymongo-1.9 or older. if isinstance(value, dict): @@ -246,7 +242,7 @@ def __setstate__(self, value: Any) -> None: # In python 3.x this has to be converted to `bytes` # by encoding latin-1. if isinstance(oid, str): - self.__id = oid.encode('latin-1') + self.__id = oid.encode("latin-1") else: self.__id = oid diff --git a/bson/raw_bson.py b/bson/raw_bson.py index 8a3b0cb4fb..c102b367a2 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -69,7 +69,7 @@ class RawBSONDocument(Mapping[str, Any]): RawBSONDocument decode its bytes. """ - __slots__ = ('__raw', '__inflated_doc', '__codec_options') + __slots__ = ("__raw", "__inflated_doc", "__codec_options") _type_marker = _RAW_BSON_DOCUMENT_MARKER def __init__(self, bson_bytes: bytes, codec_options: Optional[CodecOptions] = None) -> None: @@ -115,7 +115,8 @@ class from the standard library so it can be used like a read-only elif codec_options.document_class is not RawBSONDocument: raise TypeError( "RawBSONDocument cannot use CodecOptions with document " - "class %s" % (codec_options.document_class, )) + "class %s" % (codec_options.document_class,) + ) self.__codec_options = codec_options # Validate the bson object size. _get_object_size(bson_bytes, 0, len(bson_bytes)) @@ -135,8 +136,7 @@ def __inflated(self) -> Mapping[str, Any]: # We already validated the object's size when this document was # created, so no need to do that again. # Use SON to preserve ordering of elements. - self.__inflated_doc = _inflate_bson( - self.__raw, self.__codec_options) + self.__inflated_doc = _inflate_bson(self.__raw, self.__codec_options) return self.__inflated_doc def __getitem__(self, item: str) -> Any: @@ -154,8 +154,7 @@ def __eq__(self, other: Any) -> bool: return NotImplemented def __repr__(self): - return ("RawBSONDocument(%r, codec_options=%r)" - % (self.raw, self.__codec_options)) + return "RawBSONDocument(%r, codec_options=%r)" % (self.raw, self.__codec_options) def _inflate_bson(bson_bytes: bytes, codec_options: CodecOptions) -> Mapping[Any, Any]: @@ -168,8 +167,7 @@ def _inflate_bson(bson_bytes: bytes, codec_options: CodecOptions) -> Mapping[Any must be :class:`RawBSONDocument`. """ # Use SON to preserve ordering of elements. - return _raw_to_dict( - bson_bytes, 4, len(bson_bytes)-1, codec_options, SON()) + return _raw_to_dict(bson_bytes, 4, len(bson_bytes) - 1, codec_options, SON()) DEFAULT_RAW_BSON_OPTIONS: CodecOptions = DEFAULT.with_options(document_class=RawBSONDocument) diff --git a/bson/regex.py b/bson/regex.py index 454aca3cec..317c65049f 100644 --- a/bson/regex.py +++ b/bson/regex.py @@ -42,6 +42,7 @@ def str_flags_to_int(str_flags: str) -> int: class Regex(object): """BSON regular expression data.""" + __slots__ = ("pattern", "flags") __getstate__ = _getstate_slots @@ -75,9 +76,7 @@ def from_native(cls: Type["Regex"], regex: Pattern[Any]) -> "Regex": .. _PCRE: http://www.pcre.org/ """ if not isinstance(regex, RE_TYPE): - raise TypeError( - "regex must be a compiled regular expression, not %s" - % type(regex)) + raise TypeError("regex must be a compiled regular expression, not %s" % type(regex)) return Regex(regex.pattern, regex.flags) @@ -101,8 +100,7 @@ def __init__(self, pattern: Union[str, bytes], flags: Union[str, int] = 0) -> No elif isinstance(flags, int): self.flags = flags else: - raise TypeError( - "flags must be a string or int, not %s" % type(flags)) + raise TypeError("flags must be a string or int, not %s" % type(flags)) def __eq__(self, other: Any) -> bool: if isinstance(other, Regex): diff --git a/bson/son.py b/bson/son.py index bb39644637..e4238b4058 100644 --- a/bson/son.py +++ b/bson/son.py @@ -21,8 +21,20 @@ import copy import re from collections.abc import Mapping as _Mapping -from typing import (Any, Dict, Iterable, Iterator, List, Mapping, - Optional, Pattern, Tuple, Type, TypeVar, Union) +from typing import ( + Any, + Dict, + Iterable, + Iterator, + List, + Mapping, + Optional, + Pattern, + Tuple, + Type, + TypeVar, + Union, +) # This sort of sucks, but seems to be as good as it gets... # This is essentially the same as re._pattern_type @@ -40,9 +52,14 @@ class SON(Dict[_Key, _Value]): few extra niceties for dealing with SON. SON provides an API similar to collections.OrderedDict. """ + __keys: List[Any] - def __init__(self, data: Optional[Union[Mapping[_Key, _Value], Iterable[Tuple[_Key, _Value]]]] = None, **kwargs: Any) -> None: + def __init__( + self, + data: Optional[Union[Mapping[_Key, _Value], Iterable[Tuple[_Key, _Value]]]] = None, + **kwargs: Any + ) -> None: self.__keys = [] dict.__init__(self) self.update(data) @@ -107,8 +124,7 @@ def setdefault(self, key: _Key, default: _Value) -> _Value: # type: ignore[over def pop(self, key: _Key, *args: Union[_Value, _T]) -> Union[_Value, _T]: if len(args) > 1: - raise TypeError("pop expected at most 2 arguments, got "\ - + repr(1 + len(args))) + raise TypeError("pop expected at most 2 arguments, got " + repr(1 + len(args))) try: value = self[key] except KeyError: @@ -122,7 +138,7 @@ def popitem(self) -> Tuple[_Key, _Value]: try: k, v = next(iter(self.items())) except StopIteration: - raise KeyError('container is empty') + raise KeyError("container is empty") del self[k] return (k, v) @@ -130,10 +146,10 @@ def update(self, other: Optional[Any] = None, **kwargs: _Value) -> None: # type # Make progressively weaker assumptions about "other" if other is None: pass - elif hasattr(other, 'items'): + elif hasattr(other, "items"): for k, v in other.items(): self[k] = v - elif hasattr(other, 'keys'): + elif hasattr(other, "keys"): for k in other.keys(): self[k] = other[k] else: @@ -153,8 +169,7 @@ def __eq__(self, other: Any) -> bool: regular dictionary is order-insensitive. """ if isinstance(other, SON): - return len(self) == len(other) and list(self.items()) == \ - list(other.items()) + return len(self) == len(other) and list(self.items()) == list(other.items()) return self.to_dict() == other def __ne__(self, other: Any) -> bool: @@ -174,9 +189,7 @@ def transform_value(value: Any) -> Any: if isinstance(value, list): return [transform_value(v) for v in value] elif isinstance(value, _Mapping): - return dict([ - (k, transform_value(v)) - for k, v in value.items()]) + return dict([(k, transform_value(v)) for k, v in value.items()]) else: return value diff --git a/bson/timestamp.py b/bson/timestamp.py index 93c7540fd0..a333b9fa3e 100644 --- a/bson/timestamp.py +++ b/bson/timestamp.py @@ -26,8 +26,8 @@ class Timestamp(object): - """MongoDB internal timestamps used in the opLog. - """ + """MongoDB internal timestamps used in the opLog.""" + __slots__ = ("__time", "__inc") __getstate__ = _getstate_slots @@ -72,19 +72,17 @@ def __init__(self, time: Union[datetime.datetime, int], inc: int) -> None: @property def time(self) -> int: - """Get the time portion of this :class:`Timestamp`. - """ + """Get the time portion of this :class:`Timestamp`.""" return self.__time @property def inc(self) -> int: - """Get the inc portion of this :class:`Timestamp`. - """ + """Get the inc portion of this :class:`Timestamp`.""" return self.__inc def __eq__(self, other: Any) -> bool: if isinstance(other, Timestamp): - return (self.__time == other.time and self.__inc == other.inc) + return self.__time == other.time and self.__inc == other.inc else: return NotImplemented diff --git a/doc/conf.py b/doc/conf.py index facb74f470..47debcf14c 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -4,8 +4,10 @@ # # This file is execfile()d with the current directory set to its containing dir. -import sys, os -sys.path[0:0] = [os.path.abspath('..')] +import os +import sys + +sys.path[0:0] = [os.path.abspath("..")] import pymongo @@ -13,21 +15,26 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage', - 'sphinx.ext.todo', 'sphinx.ext.intersphinx'] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.coverage", + "sphinx.ext.todo", + "sphinx.ext.intersphinx", +] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'PyMongo' -copyright = 'MongoDB, Inc. 2008-present. MongoDB, Mongo, and the leaf logo are registered trademarks of MongoDB, Inc' +project = "PyMongo" +copyright = "MongoDB, Inc. 2008-present. MongoDB, Mongo, and the leaf logo are registered trademarks of MongoDB, Inc" html_show_sphinx = False # The version info for the project you're documenting, acts as replacement for @@ -44,31 +51,31 @@ # List of directories, relative to source directory, that shouldn't be searched # for source files. -exclude_trees = ['_build'] +exclude_trees = ["_build"] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # -- Options for extensions ---------------------------------------------------- -autoclass_content = 'init' +autoclass_content = "init" -doctest_path = [os.path.abspath('..')] +doctest_path = [os.path.abspath("..")] -doctest_test_doctest_blocks = '' +doctest_test_doctest_blocks = "" doctest_global_setup = """ from pymongo.mongo_client import MongoClient @@ -82,91 +89,87 @@ # Theme gratefully vendored from CPython source. html_theme = "pydoctheme" html_theme_path = ["."] -html_theme_options = { - 'collapsiblesidebar': True, - 'googletag': False -} +html_theme_options = {"collapsiblesidebar": True, "googletag": False} # Additional static files. -html_static_path = ['static'] +html_static_path = ["static"] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] +# html_static_path = ['_static'] # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' +# html_file_suffix = '' # Output file base name for HTML help builder. -htmlhelp_basename = 'PyMongo' + release.replace('.', '_') +htmlhelp_basename = "PyMongo" + release.replace(".", "_") # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' +# latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' +# latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'PyMongo.tex', 'PyMongo Documentation', - 'Michael Dirolf', 'manual'), + ("index", "PyMongo.tex", "PyMongo Documentation", "Michael Dirolf", "manual"), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # Additional stuff for the LaTeX preamble. -#latex_preamble = '' +# latex_preamble = '' # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_use_modindex = True +# latex_use_modindex = True intersphinx_mapping = { - 'gevent': ('http://www.gevent.org/', None), - 'py': ('https://docs.python.org/3/', None), + "gevent": ("http://www.gevent.org/", None), + "py": ("https://docs.python.org/3/", None), } diff --git a/green_framework_test.py b/green_framework_test.py index baffe21b15..610845a9f6 100644 --- a/green_framework_test.py +++ b/green_framework_test.py @@ -21,30 +21,35 @@ def run_gevent(): """Prepare to run tests with Gevent. Can raise ImportError.""" from gevent import monkey + monkey.patch_all() def run_eventlet(): """Prepare to run tests with Eventlet. Can raise ImportError.""" import eventlet + # https://github.com/eventlet/eventlet/issues/401 eventlet.sleep() eventlet.monkey_patch() FRAMEWORKS = { - 'gevent': run_gevent, - 'eventlet': run_eventlet, + "gevent": run_gevent, + "eventlet": run_eventlet, } def list_frameworks(): """Tell the user what framework names are valid.""" - sys.stdout.write("""Testable frameworks: %s + sys.stdout.write( + """Testable frameworks: %s Note that membership in this list means the framework can be tested with PyMongo, not necessarily that it is officially supported. -""" % ", ".join(sorted(FRAMEWORKS))) +""" + % ", ".join(sorted(FRAMEWORKS)) + ) def run(framework_name, *args): @@ -53,7 +58,7 @@ def run(framework_name, *args): FRAMEWORKS[framework_name]() # Run the tests. - sys.argv[:] = ['setup.py', 'test'] + list(args) + sys.argv[:] = ["setup.py", "test"] + list(args) import setup @@ -62,11 +67,13 @@ def main(): usage = """python %s FRAMEWORK_NAME Test PyMongo with a variety of greenlet-based monkey-patching frameworks. See -python %s --help-frameworks.""" % (sys.argv[0], sys.argv[0]) +python %s --help-frameworks.""" % ( + sys.argv[0], + sys.argv[0], + ) try: - opts, args = getopt.getopt( - sys.argv[1:], "h", ["help", "help-frameworks"]) + opts, args = getopt.getopt(sys.argv[1:], "h", ["help", "help-frameworks"]) except getopt.GetoptError as err: print(str(err)) print(usage) @@ -87,13 +94,14 @@ def main(): sys.exit(1) if args[0] not in FRAMEWORKS: - print('%r is not a testable framework.\n' % args[0]) + print("%r is not a testable framework.\n" % args[0]) list_frameworks() sys.exit(1) - run(args[0], # Framework name. - *args[1:]) # Command line args to setup.py, like what test to run. + run( + args[0], *args[1:] # Framework name. + ) # Command line args to setup.py, like what test to run. -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 02c42d6eb6..22b28af1a7 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -25,9 +25,14 @@ from bson.objectid import ObjectId from gridfs.errors import NoFile -from gridfs.grid_file import (DEFAULT_CHUNK_SIZE, GridIn, GridOut, - GridOutCursor, _clear_entity_type_registry, - _disallow_transactions) +from gridfs.grid_file import ( + DEFAULT_CHUNK_SIZE, + GridIn, + GridOut, + GridOutCursor, + _clear_entity_type_registry, + _disallow_transactions, +) from pymongo import ASCENDING, DESCENDING from pymongo.client_session import ClientSession from pymongo.collation import Collation @@ -40,8 +45,8 @@ class GridFS(object): - """An instance of GridFS on top of a single Database. - """ + """An instance of GridFS on top of a single Database.""" + def __init__(self, database: Database, collection: str = "fs"): """Create a new instance of :class:`GridFS`. @@ -78,8 +83,7 @@ def __init__(self, database: Database, collection: str = "fs"): database = _clear_entity_type_registry(database) if not database.write_concern.acknowledged: - raise ConfigurationError('database must use ' - 'acknowledged write_concern') + raise ConfigurationError("database must use " "acknowledged write_concern") self.__collection = database[collection] self.__files = self.__collection.files @@ -159,7 +163,13 @@ def get(self, file_id: Any, session: Optional[ClientSession] = None) -> GridOut: gout._ensure_file() return gout - def get_version(self, filename: Optional[str] = None, version: Optional[int] = -1, session: Optional[ClientSession] = None, **kwargs: Any) -> GridOut: + def get_version( + self, + filename: Optional[str] = None, + version: Optional[int] = -1, + session: Optional[ClientSession] = None, + **kwargs: Any + ) -> GridOut: """Get a file from GridFS by ``"filename"`` or metadata fields. Returns a version of the file in GridFS whose filename matches @@ -201,7 +211,7 @@ def get_version(self, filename: Optional[str] = None, version: Optional[int] = - _disallow_transactions(session) cursor = self.__files.find(query, session=session) if version is None: - version = -1 + version = -1 if version < 0: skip = abs(version) - 1 cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) @@ -209,12 +219,13 @@ def get_version(self, filename: Optional[str] = None, version: Optional[int] = - cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING) try: doc = next(cursor) - return GridOut( - self.__collection, file_document=doc, session=session) + return GridOut(self.__collection, file_document=doc, session=session) except StopIteration: raise NoFile("no version %d for filename %r" % (version, filename)) - def get_last_version(self, filename: Optional[str] = None, session: Optional[ClientSession] = None, **kwargs: Any) -> GridOut: + def get_last_version( + self, filename: Optional[str] = None, session: Optional[ClientSession] = None, **kwargs: Any + ) -> GridOut: """Get the most recent version of a file in GridFS by ``"filename"`` or metadata fields. @@ -280,10 +291,16 @@ def list(self, session: Optional[ClientSession] = None) -> List[str]: # With an index, distinct includes documents with no filename # as None. return [ - name for name in self.__files.distinct("filename", session=session) - if name is not None] - - def find_one(self, filter: Optional[Any] = None, session: Optional[ClientSession] = None, *args: Any, **kwargs: Any) -> Optional[GridOut]: + name for name in self.__files.distinct("filename", session=session) if name is not None + ] + + def find_one( + self, + filter: Optional[Any] = None, + session: Optional[ClientSession] = None, + *args: Any, + **kwargs: Any + ) -> Optional[GridOut]: """Get a single file from gridfs. All arguments to :meth:`find` are also valid arguments for @@ -377,7 +394,12 @@ def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: """ return GridOutCursor(self.__collection, *args, **kwargs) - def exists(self, document_or_id: Optional[Any] = None, session: Optional[ClientSession] = None, **kwargs: Any) -> bool: + def exists( + self, + document_or_id: Optional[Any] = None, + session: Optional[ClientSession] = None, + **kwargs: Any + ) -> bool: """Check if a file exists in this instance of :class:`GridFS`. The file to check for can be specified by the value of its @@ -427,10 +449,14 @@ def exists(self, document_or_id: Optional[Any] = None, session: Optional[ClientS class GridFSBucket(object): """An instance of GridFS on top of a single Database.""" - def __init__(self, db: Database, bucket_name: str = "fs", - chunk_size_bytes: int = DEFAULT_CHUNK_SIZE, - write_concern: Optional[WriteConcern] = None, - read_preference: Optional[_ServerMode] = None) -> None: + def __init__( + self, + db: Database, + bucket_name: str = "fs", + chunk_size_bytes: int = DEFAULT_CHUNK_SIZE, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + ) -> None: """Create a new instance of :class:`GridFSBucket`. Raises :exc:`TypeError` if `database` is not an instance of @@ -472,23 +498,27 @@ def __init__(self, db: Database, bucket_name: str = "fs", wtc = write_concern if write_concern is not None else db.write_concern if not wtc.acknowledged: - raise ConfigurationError('write concern must be acknowledged') + raise ConfigurationError("write concern must be acknowledged") self._bucket_name = bucket_name self._collection = db[bucket_name] self._chunks: Collection = self._collection.chunks.with_options( - write_concern=write_concern, - read_preference=read_preference) + write_concern=write_concern, read_preference=read_preference + ) self._files: Collection = self._collection.files.with_options( - write_concern=write_concern, - read_preference=read_preference) + write_concern=write_concern, read_preference=read_preference + ) self._chunk_size_bytes = chunk_size_bytes - def open_upload_stream(self, filename: str, chunk_size_bytes: Optional[int] = None, - metadata: Optional[Mapping[str, Any]] = None, - session: Optional[ClientSession] = None) -> GridIn: + def open_upload_stream( + self, + filename: str, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> GridIn: """Opens a Stream that the application can write the contents of the file to. @@ -526,18 +556,25 @@ def open_upload_stream(self, filename: str, chunk_size_bytes: Optional[int] = No """ validate_string("filename", filename) - opts = {"filename": filename, - "chunk_size": (chunk_size_bytes if chunk_size_bytes - is not None else self._chunk_size_bytes)} + opts = { + "filename": filename, + "chunk_size": ( + chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes + ), + } if metadata is not None: opts["metadata"] = metadata return GridIn(self._collection, session=session, **opts) def open_upload_stream_with_id( - self, file_id: Any, filename: str, chunk_size_bytes: Optional[int] = None, - metadata: Optional[Mapping[str, Any]] = None, - session: Optional[ClientSession] = None) -> GridIn: + self, + file_id: Any, + filename: str, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> GridIn: """Opens a Stream that the application can write the contents of the file to. @@ -579,19 +616,26 @@ def open_upload_stream_with_id( """ validate_string("filename", filename) - opts = {"_id": file_id, - "filename": filename, - "chunk_size": (chunk_size_bytes if chunk_size_bytes - is not None else self._chunk_size_bytes)} + opts = { + "_id": file_id, + "filename": filename, + "chunk_size": ( + chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes + ), + } if metadata is not None: opts["metadata"] = metadata return GridIn(self._collection, session=session, **opts) - def upload_from_stream(self, filename: str, source: Any, - chunk_size_bytes: Optional[int] = None, - metadata: Optional[Mapping[str, Any]] = None, - session: Optional[ClientSession] = None) -> ObjectId: + def upload_from_stream( + self, + filename: str, + source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> ObjectId: """Uploads a user file to a GridFS bucket. Reads the contents of the user file from `source` and uploads @@ -627,16 +671,20 @@ def upload_from_stream(self, filename: str, source: Any, .. versionchanged:: 3.6 Added ``session`` parameter. """ - with self.open_upload_stream( - filename, chunk_size_bytes, metadata, session=session) as gin: + with self.open_upload_stream(filename, chunk_size_bytes, metadata, session=session) as gin: gin.write(source) return cast(ObjectId, gin._id) - def upload_from_stream_with_id(self, file_id: Any, filename: str, source: Any, - chunk_size_bytes: Optional[int] = None, - metadata: Optional[Mapping[str, Any]] = None, - session: Optional[ClientSession] = None) -> None: + def upload_from_stream_with_id( + self, + file_id: Any, + filename: str, + source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> None: """Uploads a user file to a GridFS bucket with a custom file id. Reads the contents of the user file from `source` and uploads @@ -674,11 +722,13 @@ def upload_from_stream_with_id(self, file_id: Any, filename: str, source: Any, Added ``session`` parameter. """ with self.open_upload_stream_with_id( - file_id, filename, chunk_size_bytes, metadata, - session=session) as gin: + file_id, filename, chunk_size_bytes, metadata, session=session + ) as gin: gin.write(source) - def open_download_stream(self, file_id: Any, session: Optional[ClientSession] = None) -> GridOut: + def open_download_stream( + self, file_id: Any, session: Optional[ClientSession] = None + ) -> GridOut: """Opens a Stream from which the application can read the contents of the stored file specified by file_id. @@ -709,7 +759,9 @@ def open_download_stream(self, file_id: Any, session: Optional[ClientSession] = gout._ensure_file() return gout - def download_to_stream(self, file_id: Any, destination: Any, session: Optional[ClientSession] = None) -> None: + def download_to_stream( + self, file_id: Any, destination: Any, session: Optional[ClientSession] = None + ) -> None: """Downloads the contents of the stored file specified by file_id and writes the contents to `destination`. @@ -766,8 +818,7 @@ def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: res = self._files.delete_one({"_id": file_id}, session=session) self._chunks.delete_many({"files_id": file_id}, session=session) if not res.deleted_count: - raise NoFile( - "no file could be deleted because none matched %s" % file_id) + raise NoFile("no file could be deleted because none matched %s" % file_id) def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: """Find and return the files collection documents that match ``filter`` @@ -817,7 +868,9 @@ def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: """ return GridOutCursor(self._collection, *args, **kwargs) - def open_download_stream_by_name(self, filename: str, revision: int = -1, session: Optional[ClientSession] = None) -> GridOut: + def open_download_stream_by_name( + self, filename: str, revision: int = -1, session: Optional[ClientSession] = None + ) -> GridOut: """Opens a Stream from which the application can read the contents of `filename` and optional `revision`. @@ -866,15 +919,17 @@ def open_download_stream_by_name(self, filename: str, revision: int = -1, sessio cursor.limit(-1).skip(revision).sort("uploadDate", ASCENDING) try: grid_file = next(cursor) - return GridOut( - self._collection, file_document=grid_file, session=session) + return GridOut(self._collection, file_document=grid_file, session=session) except StopIteration: - raise NoFile( - "no version %d for filename %r" % (revision, filename)) - - def download_to_stream_by_name(self, filename: str, destination: Any, - revision: int = -1, - session: Optional[ClientSession] = None) -> None: + raise NoFile("no version %d for filename %r" % (revision, filename)) + + def download_to_stream_by_name( + self, + filename: str, + destination: Any, + revision: int = -1, + session: Optional[ClientSession] = None, + ) -> None: """Write the contents of `filename` (with optional `revision`) to `destination`. @@ -912,12 +967,13 @@ def download_to_stream_by_name(self, filename: str, destination: Any, .. versionchanged:: 3.6 Added ``session`` parameter. """ - with self.open_download_stream_by_name( - filename, revision, session=session) as gout: + with self.open_download_stream_by_name(filename, revision, session=session) as gout: for chunk in gout: destination.write(chunk) - def rename(self, file_id: Any, new_filename: str, session: Optional[ClientSession] = None) -> None: + def rename( + self, file_id: Any, new_filename: str, session: Optional[ClientSession] = None + ) -> None: """Renames the stored file with the specified file_id. For example:: @@ -940,9 +996,11 @@ def rename(self, file_id: Any, new_filename: str, session: Optional[ClientSessio Added ``session`` parameter. """ _disallow_transactions(session) - result = self._files.update_one({"_id": file_id}, - {"$set": {"filename": new_filename}}, - session=session) + result = self._files.update_one( + {"_id": file_id}, {"$set": {"filename": new_filename}}, session=session + ) if not result.matched_count: - raise NoFile("no files could be renamed %r because none " - "matched file_id %i" % (new_filename, file_id)) + raise NoFile( + "no files could be renamed %r because none " + "matched file_id %i" % (new_filename, file_id) + ) diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 686d328a3c..93a97158ae 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -28,9 +28,13 @@ from pymongo.client_session import ClientSession from pymongo.collection import Collection from pymongo.cursor import Cursor -from pymongo.errors import (ConfigurationError, CursorNotFound, - DuplicateKeyError, InvalidOperation, - OperationFailure) +from pymongo.errors import ( + ConfigurationError, + CursorNotFound, + DuplicateKeyError, + InvalidOperation, + OperationFailure, +) from pymongo.read_preferences import ReadPreference _SEEK_SET = os.SEEK_SET @@ -48,30 +52,36 @@ _F_INDEX: SON[str, Any] = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)]) -def _grid_in_property(field_name: str, docstring: str, read_only: Optional[bool] = False, - closed_only: Optional[bool] = False) -> Any: +def _grid_in_property( + field_name: str, + docstring: str, + read_only: Optional[bool] = False, + closed_only: Optional[bool] = False, +) -> Any: """Create a GridIn property.""" + def getter(self: Any) -> Any: if closed_only and not self._closed: - raise AttributeError("can only get %r on a closed file" % - field_name) + raise AttributeError("can only get %r on a closed file" % field_name) # Protect against PHP-237 - if field_name == 'length': + if field_name == "length": return self._file.get(field_name, 0) return self._file.get(field_name, None) def setter(self: Any, value: Any) -> Any: if self._closed: - self._coll.files.update_one({"_id": self._file["_id"]}, - {"$set": {field_name: value}}) + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {field_name: value}}) self._file[field_name] = value if read_only: docstring += "\n\nThis attribute is read-only." elif closed_only: - docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and " - "can only be read after :meth:`close` " - "has been called.") + docstring = "%s\n\n%s" % ( + docstring, + "This attribute is read-only and " + "can only be read after :meth:`close` " + "has been called.", + ) if not read_only and not closed_only: return property(getter, setter, doc=docstring) @@ -80,11 +90,12 @@ def setter(self: Any, value: Any) -> Any: def _grid_out_property(field_name: str, docstring: str) -> Any: """Create a GridOut property.""" + def getter(self: Any) -> Any: self._ensure_file() # Protect against PHP-237 - if field_name == 'length': + if field_name == "length": return self._file.get(field_name, 0) return self._file.get(field_name, None) @@ -100,14 +111,15 @@ def _clear_entity_type_registry(entity: Any, **kwargs: Any) -> Any: def _disallow_transactions(session: Optional[ClientSession]) -> None: if session and session.in_transaction: - raise InvalidOperation( - 'GridFS does not support multi-document transactions') + raise InvalidOperation("GridFS does not support multi-document transactions") class GridIn(object): - """Class to write data to GridFS. - """ - def __init__(self, root_collection: Collection, session: Optional[ClientSession] = None, **kwargs: Any) -> None: + """Class to write data to GridFS.""" + + def __init__( + self, root_collection: Collection, session: Optional[ClientSession] = None, **kwargs: Any + ) -> None: """Write a file to GridFS Application developers should generally not need to @@ -160,12 +172,10 @@ def __init__(self, root_collection: Collection, session: Optional[ClientSession] :attr:`~pymongo.collection.Collection.write_concern` """ if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an " - "instance of Collection") + raise TypeError("root_collection must be an " "instance of Collection") if not root_collection.write_concern.acknowledged: - raise ConfigurationError('root_collection must use ' - 'acknowledged write_concern') + raise ConfigurationError("root_collection must use " "acknowledged write_concern") _disallow_transactions(session) # Handle alternative naming @@ -174,8 +184,7 @@ def __init__(self, root_collection: Collection, session: Optional[ClientSession] if "chunk_size" in kwargs: kwargs["chunkSize"] = kwargs.pop("chunk_size") - coll = _clear_entity_type_registry( - root_collection, read_preference=ReadPreference.PRIMARY) + coll = _clear_entity_type_registry(root_collection, read_preference=ReadPreference.PRIMARY) # Defaults kwargs["_id"] = kwargs.get("_id", ObjectId()) @@ -194,13 +203,14 @@ def __create_index(self, collection: Collection, index_key: Any, unique: bool) - doc = collection.find_one(projection={"_id": 1}, session=self._session) if doc is None: try: - index_keys = [index_spec['key'] for index_spec in - collection.list_indexes(session=self._session)] + index_keys = [ + index_spec["key"] + for index_spec in collection.list_indexes(session=self._session) + ] except OperationFailure: index_keys = [] if index_key not in index_keys: - collection.create_index( - index_key.items(), unique=unique, session=self._session) + collection.create_index(index_key.items(), unique=unique, session=self._session) def __ensure_indexes(self) -> None: if not object.__getattribute__(self, "_ensured_index"): @@ -210,35 +220,28 @@ def __ensure_indexes(self) -> None: object.__setattr__(self, "_ensured_index", True) def abort(self) -> None: - """Remove all chunks/files that may have been uploaded and close. - """ - self._coll.chunks.delete_many( - {"files_id": self._file['_id']}, session=self._session) - self._coll.files.delete_one( - {"_id": self._file['_id']}, session=self._session) + """Remove all chunks/files that may have been uploaded and close.""" + self._coll.chunks.delete_many({"files_id": self._file["_id"]}, session=self._session) + self._coll.files.delete_one({"_id": self._file["_id"]}, session=self._session) object.__setattr__(self, "_closed", True) @property def closed(self) -> bool: - """Is this file closed? - """ + """Is this file closed?""" return self._closed - _id: Any = _grid_in_property("_id", "The ``'_id'`` value for this file.", - read_only=True) + _id: Any = _grid_in_property("_id", "The ``'_id'`` value for this file.", read_only=True) filename: Optional[str] = _grid_in_property("filename", "Name of this file.") name: Optional[str] = _grid_in_property("filename", "Alias for `filename`.") content_type: Optional[str] = _grid_in_property("contentType", "Mime-type for this file.") - length: int = _grid_in_property("length", "Length (in bytes) of this file.", - closed_only=True) - chunk_size: int = _grid_in_property("chunkSize", "Chunk size for this file.", - read_only=True) - upload_date: datetime.datetime = _grid_in_property("uploadDate", - "Date that this file was uploaded.", - closed_only=True) - md5: Optional[str] = _grid_in_property("md5", "MD5 of the contents of this file " - "if an md5 sum was created.", - closed_only=True) + length: int = _grid_in_property("length", "Length (in bytes) of this file.", closed_only=True) + chunk_size: int = _grid_in_property("chunkSize", "Chunk size for this file.", read_only=True) + upload_date: datetime.datetime = _grid_in_property( + "uploadDate", "Date that this file was uploaded.", closed_only=True + ) + md5: Optional[str] = _grid_in_property( + "md5", "MD5 of the contents of this file " "if an md5 sum was created.", closed_only=True + ) _buffer: io.BytesIO _closed: bool @@ -259,46 +262,39 @@ def __setattr__(self, name: str, value: Any) -> None: # them now. self._file[name] = value if self._closed: - self._coll.files.update_one({"_id": self._file["_id"]}, - {"$set": {name: value}}) + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) def __flush_data(self, data: Any) -> None: - """Flush `data` to a chunk. - """ + """Flush `data` to a chunk.""" self.__ensure_indexes() if not data: return - assert(len(data) <= self.chunk_size) + assert len(data) <= self.chunk_size - chunk = {"files_id": self._file["_id"], - "n": self._chunk_number, - "data": Binary(data)} + chunk = {"files_id": self._file["_id"], "n": self._chunk_number, "data": Binary(data)} try: self._chunks.insert_one(chunk, session=self._session) except DuplicateKeyError: - self._raise_file_exists(self._file['_id']) + self._raise_file_exists(self._file["_id"]) self._chunk_number += 1 self._position += len(data) def __flush_buffer(self) -> None: - """Flush the buffer contents out to a chunk. - """ + """Flush the buffer contents out to a chunk.""" self.__flush_data(self._buffer.getvalue()) self._buffer.close() self._buffer = io.BytesIO() def __flush(self) -> Any: - """Flush the file to the database. - """ + """Flush the file to the database.""" try: self.__flush_buffer() # The GridFS spec says length SHOULD be an Int64. self._file["length"] = Int64(self._position) self._file["uploadDate"] = datetime.datetime.utcnow() - return self._coll.files.insert_one( - self._file, session=self._session) + return self._coll.files.insert_one(self._file, session=self._session) except DuplicateKeyError: self._raise_file_exists(self._id) @@ -317,12 +313,12 @@ def close(self) -> None: object.__setattr__(self, "_closed", True) def read(self, size: Optional[int] = -1) -> None: - raise io.UnsupportedOperation('read') + raise io.UnsupportedOperation("read") def readable(self) -> bool: return False - def seekable(self)-> bool: + def seekable(self) -> bool: return False def write(self, data: Any) -> None: @@ -360,8 +356,7 @@ def write(self, data: Any) -> None: try: data = data.encode(self.encoding) except AttributeError: - raise TypeError("must specify an encoding for file in " - "order to write str") + raise TypeError("must specify an encoding for file in " "order to write str") read = io.BytesIO(data).read if self._buffer.tell() > 0: @@ -395,8 +390,7 @@ def writeable(self) -> bool: return True def __enter__(self) -> "GridIn": - """Support for the context manager protocol. - """ + """Support for the context manager protocol.""" return self def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: @@ -411,11 +405,15 @@ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: class GridOut(io.IOBase): - """Class to read data out of GridFS. - """ - def __init__(self, root_collection: Collection, file_id: Optional[int] = None, - file_document: Optional[Any] = None, - session: Optional[ClientSession] = None) -> None: + """Class to read data out of GridFS.""" + + def __init__( + self, + root_collection: Collection, + file_id: Optional[int] = None, + file_document: Optional[Any] = None, + session: Optional[ClientSession] = None, + ) -> None: """Read a file from GridFS Application developers should generally not need to @@ -449,8 +447,7 @@ def __init__(self, root_collection: Collection, file_id: Optional[int] = None, from the server. Metadata is fetched when first needed. """ if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an " - "instance of Collection") + raise TypeError("root_collection must be an " "instance of Collection") _disallow_transactions(session) root_collection = _clear_entity_type_registry(root_collection) @@ -472,12 +469,16 @@ def __init__(self, root_collection: Collection, file_id: Optional[int] = None, content_type: Optional[str] = _grid_out_property("contentType", "Mime-type for this file.") length: int = _grid_out_property("length", "Length (in bytes) of this file.") chunk_size: int = _grid_out_property("chunkSize", "Chunk size for this file.") - upload_date: datetime.datetime = _grid_out_property("uploadDate", - "Date that this file was first uploaded.") + upload_date: datetime.datetime = _grid_out_property( + "uploadDate", "Date that this file was first uploaded." + ) aliases: Optional[List[str]] = _grid_out_property("aliases", "List of aliases for this file.") - metadata: Optional[Mapping[str, Any]] = _grid_out_property("metadata", "Metadata attached to this file.") - md5: Optional[str] = _grid_out_property("md5", "MD5 of the contents of this file " - "if an md5 sum was created.") + metadata: Optional[Mapping[str, Any]] = _grid_out_property( + "metadata", "Metadata attached to this file." + ) + md5: Optional[str] = _grid_out_property( + "md5", "MD5 of the contents of this file " "if an md5 sum was created." + ) _file: Any __chunk_iter: Any @@ -485,11 +486,11 @@ def __init__(self, root_collection: Collection, file_id: Optional[int] = None, def _ensure_file(self) -> None: if not self._file: _disallow_transactions(self._session) - self._file = self.__files.find_one({"_id": self.__file_id}, - session=self._session) + self._file = self.__files.find_one({"_id": self.__file_id}, session=self._session) if not self._file: - raise NoFile("no file in gridfs collection %r with _id %r" % - (self.__files, self.__file_id)) + raise NoFile( + "no file in gridfs collection %r with _id %r" % (self.__files, self.__file_id) + ) def __getattr__(self, name: str) -> Any: self._ensure_file() @@ -514,10 +515,11 @@ def readchunk(self) -> bytes: chunk_number = int((received + self.__position) / chunk_size) if self.__chunk_iter is None: self.__chunk_iter = _GridOutChunkIterator( - self, self.__chunks, self._session, chunk_number) + self, self.__chunks, self._session, chunk_number + ) chunk = self.__chunk_iter.next() - chunk_data = chunk["data"][self.__position % chunk_size:] + chunk_data = chunk["data"][self.__position % chunk_size :] if not chunk_data: raise CorruptGridFile("truncated chunk") @@ -607,8 +609,7 @@ def readline(self, size: int = -1) -> bytes: # type: ignore[override] return data.read(size) def tell(self) -> int: - """Return the current position of this file. - """ + """Return the current position of this file.""" return self.__position def seek(self, pos: int, whence: int = _SEEK_SET) -> int: @@ -682,10 +683,10 @@ def close(self) -> None: super().close() def write(self, value: Any) -> None: - raise io.UnsupportedOperation('write') + raise io.UnsupportedOperation("write") def writelines(self, lines: Any) -> None: - raise io.UnsupportedOperation('writelines') + raise io.UnsupportedOperation("writelines") def writable(self) -> bool: return False @@ -704,7 +705,7 @@ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: return False def fileno(self) -> int: - raise io.UnsupportedOperation('fileno') + raise io.UnsupportedOperation("fileno") def flush(self) -> None: # GridOut is read-only, so flush does nothing. @@ -716,7 +717,7 @@ def isatty(self) -> bool: def truncate(self, size: Optional[int] = None) -> int: # See https://docs.python.org/3/library/io.html#io.IOBase.writable # for why truncate has to raise. - raise io.UnsupportedOperation('truncate') + raise io.UnsupportedOperation("truncate") # Override IOBase.__del__ otherwise it will lead to __getattr__ on # __IOBase_closed which calls _ensure_file and potentially performs I/O. @@ -731,7 +732,14 @@ class _GridOutChunkIterator(object): Raises CorruptGridFile when encountering any truncated, missing, or extra chunk in a file. """ - def __init__(self, grid_out: GridOut, chunks: Collection, session: Optional[ClientSession], next_chunk: Any) -> None: + + def __init__( + self, + grid_out: GridOut, + chunks: Collection, + session: Optional[ClientSession], + next_chunk: Any, + ) -> None: self._id = grid_out._id self._chunk_size = int(grid_out.chunk_size) self._length = int(grid_out.length) @@ -756,8 +764,7 @@ def _create_cursor(self) -> None: if self._next_chunk > 0: filter["n"] = {"$gte": self._next_chunk} _disallow_transactions(self._session) - self._cursor = self._chunks.find(filter, sort=[("n", 1)], - session=self._session) + self._cursor = self._chunks.find(filter, sort=[("n", 1)], session=self._session) def _next_with_retry(self) -> Mapping[str, Any]: """Return the next chunk and retry once on CursorNotFound. @@ -788,7 +795,8 @@ def next(self) -> Mapping[str, Any]: self.close() raise CorruptGridFile( "Missing chunk: expected chunk #%d but found " - "chunk with n=%d" % (self._next_chunk, chunk["n"])) + "chunk with n=%d" % (self._next_chunk, chunk["n"]) + ) if chunk["n"] >= self._num_chunks: # According to spec, ignore extra chunks if they are empty. @@ -796,15 +804,16 @@ def next(self) -> Mapping[str, Any]: self.close() raise CorruptGridFile( "Extra chunk found: expected %d chunks but found " - "chunk with n=%d" % (self._num_chunks, chunk["n"])) + "chunk with n=%d" % (self._num_chunks, chunk["n"]) + ) expected_length = self.expected_chunk_length(chunk["n"]) if len(chunk["data"]) != expected_length: self.close() raise CorruptGridFile( "truncated chunk #%d: expected chunk length to be %d but " - "found chunk with length %d" % ( - chunk["n"], expected_length, len(chunk["data"]))) + "found chunk with length %d" % (chunk["n"], expected_length, len(chunk["data"])) + ) self._next_chunk += 1 return chunk @@ -835,13 +844,18 @@ class GridOutCursor(Cursor): """A cursor / iterator for returning GridOut objects as the result of an arbitrary query against the GridFS files collection. """ - def __init__(self, collection: Collection, filter: Optional[Mapping[str, Any]] = None, - skip: int = 0, - limit: int = 0, - no_cursor_timeout: bool = False, - sort: Optional[Any] = None, - batch_size: int = 0, - session: Optional[ClientSession] = None) -> None: + + def __init__( + self, + collection: Collection, + filter: Optional[Mapping[str, Any]] = None, + skip: int = 0, + limit: int = 0, + no_cursor_timeout: bool = False, + sort: Optional[Any] = None, + batch_size: int = 0, + session: Optional[ClientSession] = None, + ) -> None: """Create a new cursor, similar to the normal :class:`~pymongo.cursor.Cursor`. @@ -859,18 +873,22 @@ def __init__(self, collection: Collection, filter: Optional[Mapping[str, Any]] = self.__root_collection = collection super(GridOutCursor, self).__init__( - collection.files, filter, skip=skip, limit=limit, - no_cursor_timeout=no_cursor_timeout, sort=sort, - batch_size=batch_size, session=session) + collection.files, + filter, + skip=skip, + limit=limit, + no_cursor_timeout=no_cursor_timeout, + sort=sort, + batch_size=batch_size, + session=session, + ) def next(self) -> GridOut: - """Get next GridOut object from cursor. - """ + """Get next GridOut object from cursor.""" _disallow_transactions(self.session) # Work around "super is not iterable" issue in Python 3.x next_file = super(GridOutCursor, self).next() - return GridOut(self.__root_collection, file_document=next_file, - session=self.session) + return GridOut(self.__root_collection, file_document=next_file, session=self.session) __next__ = next @@ -881,6 +899,5 @@ def remove_option(self, *args: Any, **kwargs: Any) -> None: # type: ignore[over raise NotImplementedError("Method does not exist for GridOutCursor") def _clone_base(self, session: ClientSession) -> "GridOutCursor": - """Creates an empty GridOutCursor for information to be copied into. - """ + """Creates an empty GridOutCursor for information to be copied into.""" return GridOutCursor(self.__root_collection, session=session) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 54a962df57..f8baa91971 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -55,12 +55,14 @@ .. _text index: http://docs.mongodb.org/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 1, 0, '.dev0') +version_tuple: Tuple[Union[int, str], ...] = (4, 1, 0, ".dev0") + def get_version_string() -> str: if isinstance(version_tuple[-1], str): - return '.'.join(map(str, version_tuple[:-1])) + version_tuple[-1] - return '.'.join(map(str, version_tuple)) + return ".".join(map(str, version_tuple[:-1])) + version_tuple[-1] + return ".".join(map(str, version_tuple)) + __version__: str = get_version_string() version = __version__ @@ -68,12 +70,18 @@ def get_version_string() -> str: """Current version of PyMongo.""" from pymongo.collection import ReturnDocument -from pymongo.common import (MAX_SUPPORTED_WIRE_VERSION, - MIN_SUPPORTED_WIRE_VERSION) +from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION from pymongo.cursor import CursorType from pymongo.mongo_client import MongoClient -from pymongo.operations import (DeleteMany, DeleteOne, IndexModel, InsertOne, - ReplaceOne, UpdateMany, UpdateOne) +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, +) from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -82,6 +90,7 @@ def has_c() -> bool: """Is the C extension installed?""" try: from pymongo import _cmessage # type: ignore[attr-defined] + return True except ImportError: return False diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index b2e20e9ca5..2b8cafe7cb 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -28,21 +28,32 @@ class _AggregationCommand(object): :meth:`pymongo.collection.Collection.aggregate`, or :meth:`pymongo.database.Database.aggregate` instead. """ - def __init__(self, target, cursor_class, pipeline, options, - explicit_session, let=None, user_fields=None, result_processor=None): + + def __init__( + self, + target, + cursor_class, + pipeline, + options, + explicit_session, + let=None, + user_fields=None, + result_processor=None, + ): if "explain" in options: - raise ConfigurationError("The explain option is not supported. " - "Use Database.command instead.") + raise ConfigurationError( + "The explain option is not supported. " "Use Database.command instead." + ) self._target = target - pipeline = common.validate_list('pipeline', pipeline) + pipeline = common.validate_list("pipeline", pipeline) self._pipeline = pipeline self._performs_write = False if pipeline and ("$out" in pipeline[-1] or "$merge" in pipeline[-1]): self._performs_write = True - common.validate_is_mapping('options', options) + common.validate_is_mapping("options", options) if let: common.validate_is_mapping("let", let) options["let"] = let @@ -51,7 +62,8 @@ def __init__(self, target, cursor_class, pipeline, options, # This is the batchSize that will be used for setting the initial # batchSize for the cursor, as well as the subsequent getMores. self._batch_size = common.validate_non_negative_integer_or_none( - "batchSize", self._options.pop("batchSize", None)) + "batchSize", self._options.pop("batchSize", None) + ) # If the cursor option is already specified, avoid overriding it. self._options.setdefault("cursor", {}) @@ -65,10 +77,9 @@ def __init__(self, target, cursor_class, pipeline, options, self._user_fields = user_fields self._result_processor = result_processor - self._collation = validate_collation_or_none( - options.pop('collation', None)) + self._collation = validate_collation_or_none(options.pop("collation", None)) - self._max_await_time_ms = options.pop('maxAwaitTimeMS', None) + self._max_await_time_ms = options.pop("maxAwaitTimeMS", None) self._write_preference = None @property @@ -100,17 +111,16 @@ def get_read_preference(self, session): def get_cursor(self, session, server, sock_info, read_preference): # Serialize command. - cmd = SON([("aggregate", self._aggregation_target), - ("pipeline", self._pipeline)]) + cmd = SON([("aggregate", self._aggregation_target), ("pipeline", self._pipeline)]) cmd.update(self._options) # Apply this target's read concern if: # readConcern has not been specified as a kwarg and either # - server version is >= 4.2 or # - server version is >= 3.2 and pipeline doesn't use $out - if (('readConcern' not in cmd) and - (not self._performs_write or - (sock_info.max_wire_version >= 8))): + if ("readConcern" not in cmd) and ( + not self._performs_write or (sock_info.max_wire_version >= 8) + ): read_concern = self._target.read_concern else: read_concern = None @@ -118,7 +128,7 @@ def get_cursor(self, session, server, sock_info, read_preference): # Apply this target's write concern if: # writeConcern has not been specified as a kwarg and pipeline doesn't # perform a write operation - if 'writeConcern' not in cmd and self._performs_write: + if "writeConcern" not in cmd and self._performs_write: write_concern = self._target._write_concern_for(session) else: write_concern = None @@ -135,14 +145,15 @@ def get_cursor(self, session, server, sock_info, read_preference): collation=self._collation, session=session, client=self._database.client, - user_fields=self._user_fields) + user_fields=self._user_fields, + ) if self._result_processor: self._result_processor(result, sock_info) # Extract cursor from result or mock/fake one if necessary. - if 'cursor' in result: - cursor = result['cursor'] + if "cursor" in result: + cursor = result["cursor"] else: # Unacknowledged $out/$merge write. Fake a cursor. cursor = { @@ -153,16 +164,19 @@ def get_cursor(self, session, server, sock_info, read_preference): # Create and return cursor instance. cmd_cursor = self._cursor_class( - self._cursor_collection(cursor), cursor, sock_info.address, + self._cursor_collection(cursor), + cursor, + sock_info.address, batch_size=self._batch_size or 0, max_await_time_ms=self._max_await_time_ms, - session=session, explicit_session=self._explicit_session) + session=session, + explicit_session=self._explicit_session, + ) cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor class _CollectionAggregationCommand(_AggregationCommand): - @property def _aggregation_target(self): return self._target.name diff --git a/pymongo/auth.py b/pymongo/auth.py index 34f1c7fc94..0a4e7e7324 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -34,7 +34,8 @@ _USE_PRINCIPAL = False try: import winkerberos as kerberos - if tuple(map(int, kerberos.__version__.split('.')[:2])) >= (0, 5): + + if tuple(map(int, kerberos.__version__.split(".")[:2])) >= (0, 5): _USE_PRINCIPAL = True except ImportError: try: @@ -44,21 +45,24 @@ MECHANISMS = frozenset( - ['GSSAPI', - 'MONGODB-CR', - 'MONGODB-X509', - 'MONGODB-AWS', - 'PLAIN', - 'SCRAM-SHA-1', - 'SCRAM-SHA-256', - 'DEFAULT']) + [ + "GSSAPI", + "MONGODB-CR", + "MONGODB-X509", + "MONGODB-AWS", + "PLAIN", + "SCRAM-SHA-1", + "SCRAM-SHA-256", + "DEFAULT", + ] +) """The authentication mechanisms supported by PyMongo.""" class _Cache(object): __slots__ = ("data",) - _hash_val = hash('_Cache') + _hash_val = hash("_Cache") def __init__(self): self.data = None @@ -78,80 +82,69 @@ def __hash__(self): return self._hash_val - MongoCredential = namedtuple( - 'MongoCredential', - ['mechanism', - 'source', - 'username', - 'password', - 'mechanism_properties', - 'cache']) + "MongoCredential", + ["mechanism", "source", "username", "password", "mechanism_properties", "cache"], +) """A hashable namedtuple of values used for authentication.""" -GSSAPIProperties = namedtuple('GSSAPIProperties', - ['service_name', - 'canonicalize_host_name', - 'service_realm']) +GSSAPIProperties = namedtuple( + "GSSAPIProperties", ["service_name", "canonicalize_host_name", "service_realm"] +) """Mechanism properties for GSSAPI authentication.""" -_AWSProperties = namedtuple('_AWSProperties', ['aws_session_token']) +_AWSProperties = namedtuple("_AWSProperties", ["aws_session_token"]) """Mechanism properties for MONGODB-AWS authentication.""" def _build_credentials_tuple(mech, source, user, passwd, extra, database): - """Build and return a mechanism specific credentials tuple. - """ - if mech not in ('MONGODB-X509', 'MONGODB-AWS') and user is None: + """Build and return a mechanism specific credentials tuple.""" + if mech not in ("MONGODB-X509", "MONGODB-AWS") and user is None: raise ConfigurationError("%s requires a username." % (mech,)) - if mech == 'GSSAPI': - if source is not None and source != '$external': - raise ValueError( - "authentication source must be $external or None for GSSAPI") - properties = extra.get('authmechanismproperties', {}) - service_name = properties.get('SERVICE_NAME', 'mongodb') - canonicalize = properties.get('CANONICALIZE_HOST_NAME', False) - service_realm = properties.get('SERVICE_REALM') - props = GSSAPIProperties(service_name=service_name, - canonicalize_host_name=canonicalize, - service_realm=service_realm) + if mech == "GSSAPI": + if source is not None and source != "$external": + raise ValueError("authentication source must be $external or None for GSSAPI") + properties = extra.get("authmechanismproperties", {}) + service_name = properties.get("SERVICE_NAME", "mongodb") + canonicalize = properties.get("CANONICALIZE_HOST_NAME", False) + service_realm = properties.get("SERVICE_REALM") + props = GSSAPIProperties( + service_name=service_name, + canonicalize_host_name=canonicalize, + service_realm=service_realm, + ) # Source is always $external. - return MongoCredential(mech, '$external', user, passwd, props, None) - elif mech == 'MONGODB-X509': + return MongoCredential(mech, "$external", user, passwd, props, None) + elif mech == "MONGODB-X509": if passwd is not None: - raise ConfigurationError( - "Passwords are not supported by MONGODB-X509") - if source is not None and source != '$external': - raise ValueError( - "authentication source must be " - "$external or None for MONGODB-X509") + raise ConfigurationError("Passwords are not supported by MONGODB-X509") + if source is not None and source != "$external": + raise ValueError("authentication source must be " "$external or None for MONGODB-X509") # Source is always $external, user can be None. - return MongoCredential(mech, '$external', user, None, None, None) - elif mech == 'MONGODB-AWS': + return MongoCredential(mech, "$external", user, None, None, None) + elif mech == "MONGODB-AWS": if user is not None and passwd is None: + raise ConfigurationError("username without a password is not supported by MONGODB-AWS") + if source is not None and source != "$external": raise ConfigurationError( - "username without a password is not supported by MONGODB-AWS") - if source is not None and source != '$external': - raise ConfigurationError( - "authentication source must be " - "$external or None for MONGODB-AWS") + "authentication source must be " "$external or None for MONGODB-AWS" + ) - properties = extra.get('authmechanismproperties', {}) - aws_session_token = properties.get('AWS_SESSION_TOKEN') + properties = extra.get("authmechanismproperties", {}) + aws_session_token = properties.get("AWS_SESSION_TOKEN") aws_props = _AWSProperties(aws_session_token=aws_session_token) # user can be None for temporary link-local EC2 credentials. - return MongoCredential(mech, '$external', user, passwd, aws_props, None) - elif mech == 'PLAIN': - source_database = source or database or '$external' + return MongoCredential(mech, "$external", user, passwd, aws_props, None) + elif mech == "PLAIN": + source_database = source or database or "$external" return MongoCredential(mech, source_database, user, passwd, None, None) else: - source_database = source or database or 'admin' + source_database = source or database or "admin" if passwd is None: raise ConfigurationError("A password is required.") - return MongoCredential( - mech, source_database, user, passwd, None, _Cache()) + return MongoCredential(mech, source_database, user, passwd, None, _Cache()) def _xor(fir, sec): @@ -170,18 +163,22 @@ def _authenticate_scram_start(credentials, mechanism): nonce = standard_b64encode(os.urandom(32)) first_bare = b"n=" + user + b",r=" + nonce - cmd = SON([('saslStart', 1), - ('mechanism', mechanism), - ('payload', Binary(b"n,," + first_bare)), - ('autoAuthorize', 1), - ('options', {'skipEmptyExchange': True})]) + cmd = SON( + [ + ("saslStart", 1), + ("mechanism", mechanism), + ("payload", Binary(b"n,," + first_bare)), + ("autoAuthorize", 1), + ("options", {"skipEmptyExchange": True}), + ] + ) return nonce, first_bare, cmd def _authenticate_scram(credentials, sock_info, mechanism): """Authenticate using SCRAM.""" username = credentials.username - if mechanism == 'SCRAM-SHA-256': + if mechanism == "SCRAM-SHA-256": digest = "sha256" digestmod = hashlib.sha256 data = saslprep(credentials.password).encode("utf-8") @@ -200,17 +197,16 @@ def _authenticate_scram(credentials, sock_info, mechanism): nonce, first_bare = ctx.scram_data res = ctx.speculative_authenticate else: - nonce, first_bare, cmd = _authenticate_scram_start( - credentials, mechanism) + nonce, first_bare, cmd = _authenticate_scram_start(credentials, mechanism) res = sock_info.command(source, cmd) - server_first = res['payload'] + server_first = res["payload"] parsed = _parse_scram_response(server_first) - iterations = int(parsed[b'i']) + iterations = int(parsed[b"i"]) if iterations < 4096: raise OperationFailure("Server returned an invalid iteration count.") - salt = parsed[b's'] - rnonce = parsed[b'r'] + salt = parsed[b"s"] + rnonce = parsed[b"r"] if not rnonce.startswith(nonce): raise OperationFailure("Server returned an invalid nonce.") @@ -223,8 +219,7 @@ def _authenticate_scram(credentials, sock_info, mechanism): # Salt and / or iterations could change for a number of different # reasons. Either changing invalidates the cache. if not client_key or salt != csalt or iterations != citerations: - salted_pass = hashlib.pbkdf2_hmac( - digest, data, standard_b64decode(salt), iterations) + salted_pass = hashlib.pbkdf2_hmac(digest, data, standard_b64decode(salt), iterations) client_key = _hmac(salted_pass, b"Client Key", digestmod).digest() server_key = _hmac(salted_pass, b"Server Key", digestmod).digest() cache.data = (client_key, server_key, salt, iterations) @@ -234,32 +229,38 @@ def _authenticate_scram(credentials, sock_info, mechanism): client_proof = b"p=" + standard_b64encode(_xor(client_key, client_sig)) client_final = b",".join((without_proof, client_proof)) - server_sig = standard_b64encode( - _hmac(server_key, auth_msg, digestmod).digest()) + server_sig = standard_b64encode(_hmac(server_key, auth_msg, digestmod).digest()) - cmd = SON([('saslContinue', 1), - ('conversationId', res['conversationId']), - ('payload', Binary(client_final))]) + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", res["conversationId"]), + ("payload", Binary(client_final)), + ] + ) res = sock_info.command(source, cmd) - parsed = _parse_scram_response(res['payload']) - if not hmac.compare_digest(parsed[b'v'], server_sig): + parsed = _parse_scram_response(res["payload"]) + if not hmac.compare_digest(parsed[b"v"], server_sig): raise OperationFailure("Server returned an invalid signature.") # A third empty challenge may be required if the server does not support # skipEmptyExchange: SERVER-44857. - if not res['done']: - cmd = SON([('saslContinue', 1), - ('conversationId', res['conversationId']), - ('payload', Binary(b''))]) + if not res["done"]: + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", res["conversationId"]), + ("payload", Binary(b"")), + ] + ) res = sock_info.command(source, cmd) - if not res['done']: - raise OperationFailure('SASL conversation failed to complete.') + if not res["done"]: + raise OperationFailure("SASL conversation failed to complete.") def _password_digest(username, password): - """Get a password digest to use for authentication. - """ + """Get a password digest to use for authentication.""" if not isinstance(password, str): raise TypeError("password must be an instance of str") if len(password) == 0: @@ -269,17 +270,16 @@ def _password_digest(username, password): md5hash = hashlib.md5() data = "%s:mongo:%s" % (username, password) - md5hash.update(data.encode('utf-8')) + md5hash.update(data.encode("utf-8")) return md5hash.hexdigest() def _auth_key(nonce, username, password): - """Get an auth key to use for authentication. - """ + """Get an auth key to use for authentication.""" digest = _password_digest(username, password) md5hash = hashlib.md5() data = "%s%s%s" % (nonce, username, digest) - md5hash.update(data.encode('utf-8')) + md5hash.update(data.encode("utf-8")) return md5hash.hexdigest() @@ -287,7 +287,8 @@ def _canonicalize_hostname(hostname): """Canonicalize hostname following MIT-krb5 behavior.""" # https://github.com/krb5/krb5/blob/d406afa363554097ac48646a29249c04f498c88e/src/util/k5test.py#L505-L520 af, socktype, proto, canonname, sockaddr = socket.getaddrinfo( - hostname, None, 0, 0, socket.IPPROTO_TCP, socket.AI_CANONNAME)[0] + hostname, None, 0, 0, socket.IPPROTO_TCP, socket.AI_CANONNAME + )[0] try: name = socket.getnameinfo(sockaddr, socket.NI_NAMEREQD) @@ -298,11 +299,11 @@ def _canonicalize_hostname(hostname): def _authenticate_gssapi(credentials, sock_info): - """Authenticate using GSSAPI. - """ + """Authenticate using GSSAPI.""" if not HAVE_KERBEROS: - raise ConfigurationError('The "kerberos" module must be ' - 'installed to use GSSAPI authentication.') + raise ConfigurationError( + 'The "kerberos" module must be ' "installed to use GSSAPI authentication." + ) try: username = credentials.username @@ -313,9 +314,9 @@ def _authenticate_gssapi(credentials, sock_info): host = sock_info.address[0] if props.canonicalize_host_name: host = _canonicalize_hostname(host) - service = props.service_name + '@' + host + service = props.service_name + "@" + host if props.service_realm is not None: - service = service + '@' + props.service_realm + service = service + "@" + props.service_realm if password is not None: if _USE_PRINCIPAL: @@ -324,81 +325,88 @@ def _authenticate_gssapi(credentials, sock_info): # by WinKerberos) doesn't support +. principal = ":".join((quote(username), quote(password))) result, ctx = kerberos.authGSSClientInit( - service, principal, gssflags=kerberos.GSS_C_MUTUAL_FLAG) + service, principal, gssflags=kerberos.GSS_C_MUTUAL_FLAG + ) else: - if '@' in username: - user, domain = username.split('@', 1) + if "@" in username: + user, domain = username.split("@", 1) else: user, domain = username, None result, ctx = kerberos.authGSSClientInit( - service, gssflags=kerberos.GSS_C_MUTUAL_FLAG, - user=user, domain=domain, password=password) + service, + gssflags=kerberos.GSS_C_MUTUAL_FLAG, + user=user, + domain=domain, + password=password, + ) else: - result, ctx = kerberos.authGSSClientInit( - service, gssflags=kerberos.GSS_C_MUTUAL_FLAG) + result, ctx = kerberos.authGSSClientInit(service, gssflags=kerberos.GSS_C_MUTUAL_FLAG) if result != kerberos.AUTH_GSS_COMPLETE: - raise OperationFailure('Kerberos context failed to initialize.') + raise OperationFailure("Kerberos context failed to initialize.") try: # pykerberos uses a weird mix of exceptions and return values # to indicate errors. # 0 == continue, 1 == complete, -1 == error # Only authGSSClientStep can return 0. - if kerberos.authGSSClientStep(ctx, '') != 0: - raise OperationFailure('Unknown kerberos ' - 'failure in step function.') + if kerberos.authGSSClientStep(ctx, "") != 0: + raise OperationFailure("Unknown kerberos " "failure in step function.") # Start a SASL conversation with mongod/s # Note: pykerberos deals with base64 encoded byte strings. # Since mongo accepts base64 strings as the payload we don't # have to use bson.binary.Binary. payload = kerberos.authGSSClientResponse(ctx) - cmd = SON([('saslStart', 1), - ('mechanism', 'GSSAPI'), - ('payload', payload), - ('autoAuthorize', 1)]) - response = sock_info.command('$external', cmd) + cmd = SON( + [ + ("saslStart", 1), + ("mechanism", "GSSAPI"), + ("payload", payload), + ("autoAuthorize", 1), + ] + ) + response = sock_info.command("$external", cmd) # Limit how many times we loop to catch protocol / library issues for _ in range(10): - result = kerberos.authGSSClientStep(ctx, - str(response['payload'])) + result = kerberos.authGSSClientStep(ctx, str(response["payload"])) if result == -1: - raise OperationFailure('Unknown kerberos ' - 'failure in step function.') + raise OperationFailure("Unknown kerberos " "failure in step function.") - payload = kerberos.authGSSClientResponse(ctx) or '' + payload = kerberos.authGSSClientResponse(ctx) or "" - cmd = SON([('saslContinue', 1), - ('conversationId', response['conversationId']), - ('payload', payload)]) - response = sock_info.command('$external', cmd) + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", response["conversationId"]), + ("payload", payload), + ] + ) + response = sock_info.command("$external", cmd) if result == kerberos.AUTH_GSS_COMPLETE: break else: - raise OperationFailure('Kerberos ' - 'authentication failed to complete.') + raise OperationFailure("Kerberos " "authentication failed to complete.") # Once the security context is established actually authenticate. # See RFC 4752, Section 3.1, last two paragraphs. - if kerberos.authGSSClientUnwrap(ctx, - str(response['payload'])) != 1: - raise OperationFailure('Unknown kerberos ' - 'failure during GSS_Unwrap step.') + if kerberos.authGSSClientUnwrap(ctx, str(response["payload"])) != 1: + raise OperationFailure("Unknown kerberos " "failure during GSS_Unwrap step.") - if kerberos.authGSSClientWrap(ctx, - kerberos.authGSSClientResponse(ctx), - username) != 1: - raise OperationFailure('Unknown kerberos ' - 'failure during GSS_Wrap step.') + if kerberos.authGSSClientWrap(ctx, kerberos.authGSSClientResponse(ctx), username) != 1: + raise OperationFailure("Unknown kerberos " "failure during GSS_Wrap step.") payload = kerberos.authGSSClientResponse(ctx) - cmd = SON([('saslContinue', 1), - ('conversationId', response['conversationId']), - ('payload', payload)]) - sock_info.command('$external', cmd) + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", response["conversationId"]), + ("payload", payload), + ] + ) + sock_info.command("$external", cmd) finally: kerberos.authGSSClientClean(ctx) @@ -408,47 +416,45 @@ def _authenticate_gssapi(credentials, sock_info): def _authenticate_plain(credentials, sock_info): - """Authenticate using SASL PLAIN (RFC 4616) - """ + """Authenticate using SASL PLAIN (RFC 4616)""" source = credentials.source username = credentials.username password = credentials.password - payload = ('\x00%s\x00%s' % (username, password)).encode('utf-8') - cmd = SON([('saslStart', 1), - ('mechanism', 'PLAIN'), - ('payload', Binary(payload)), - ('autoAuthorize', 1)]) + payload = ("\x00%s\x00%s" % (username, password)).encode("utf-8") + cmd = SON( + [ + ("saslStart", 1), + ("mechanism", "PLAIN"), + ("payload", Binary(payload)), + ("autoAuthorize", 1), + ] + ) sock_info.command(source, cmd) def _authenticate_x509(credentials, sock_info): - """Authenticate using MONGODB-X509. - """ + """Authenticate using MONGODB-X509.""" ctx = sock_info.auth_ctx if ctx and ctx.speculate_succeeded(): # MONGODB-X509 is done after the speculative auth step. return cmd = _X509Context(credentials).speculate_command() - sock_info.command('$external', cmd) + sock_info.command("$external", cmd) def _authenticate_mongo_cr(credentials, sock_info): - """Authenticate using MONGODB-CR. - """ + """Authenticate using MONGODB-CR.""" source = credentials.source username = credentials.username password = credentials.password # Get a nonce - response = sock_info.command(source, {'getnonce': 1}) - nonce = response['nonce'] + response = sock_info.command(source, {"getnonce": 1}) + nonce = response["nonce"] key = _auth_key(nonce, username, password) # Actually authenticate - query = SON([('authenticate', 1), - ('user', username), - ('nonce', nonce), - ('key', key)]) + query = SON([("authenticate", 1), ("user", username), ("nonce", nonce), ("key", key)]) sock_info.command(source, query) @@ -459,29 +465,27 @@ def _authenticate_default(credentials, sock_info): else: source = credentials.source cmd = sock_info.hello_cmd() - cmd['saslSupportedMechs'] = source + '.' + credentials.username - mechs = sock_info.command( - source, cmd, publish_events=False).get( - 'saslSupportedMechs', []) - if 'SCRAM-SHA-256' in mechs: - return _authenticate_scram(credentials, sock_info, 'SCRAM-SHA-256') + cmd["saslSupportedMechs"] = source + "." + credentials.username + mechs = sock_info.command(source, cmd, publish_events=False).get( + "saslSupportedMechs", [] + ) + if "SCRAM-SHA-256" in mechs: + return _authenticate_scram(credentials, sock_info, "SCRAM-SHA-256") else: - return _authenticate_scram(credentials, sock_info, 'SCRAM-SHA-1') + return _authenticate_scram(credentials, sock_info, "SCRAM-SHA-1") else: - return _authenticate_scram(credentials, sock_info, 'SCRAM-SHA-1') + return _authenticate_scram(credentials, sock_info, "SCRAM-SHA-1") _AUTH_MAP: Mapping[str, Callable] = { - 'GSSAPI': _authenticate_gssapi, - 'MONGODB-CR': _authenticate_mongo_cr, - 'MONGODB-X509': _authenticate_x509, - 'MONGODB-AWS': _authenticate_aws, - 'PLAIN': _authenticate_plain, - 'SCRAM-SHA-1': functools.partial( - _authenticate_scram, mechanism='SCRAM-SHA-1'), - 'SCRAM-SHA-256': functools.partial( - _authenticate_scram, mechanism='SCRAM-SHA-256'), - 'DEFAULT': _authenticate_default, + "GSSAPI": _authenticate_gssapi, + "MONGODB-CR": _authenticate_mongo_cr, + "MONGODB-X509": _authenticate_x509, + "MONGODB-AWS": _authenticate_aws, + "PLAIN": _authenticate_plain, + "SCRAM-SHA-1": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-1"), + "SCRAM-SHA-256": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-256"), + "DEFAULT": _authenticate_default, } @@ -514,10 +518,9 @@ def __init__(self, credentials, mechanism): self.mechanism = mechanism def speculate_command(self): - nonce, first_bare, cmd = _authenticate_scram_start( - self.credentials, self.mechanism) + nonce, first_bare, cmd = _authenticate_scram_start(self.credentials, self.mechanism) # The 'db' field is included only on the speculative command. - cmd['db'] = self.credentials.source + cmd["db"] = self.credentials.source # Save for later use. self.scram_data = (nonce, first_bare) return cmd @@ -525,19 +528,17 @@ def speculate_command(self): class _X509Context(_AuthContext): def speculate_command(self): - cmd = SON([('authenticate', 1), - ('mechanism', 'MONGODB-X509')]) + cmd = SON([("authenticate", 1), ("mechanism", "MONGODB-X509")]) if self.credentials.username is not None: - cmd['user'] = self.credentials.username + cmd["user"] = self.credentials.username return cmd _SPECULATIVE_AUTH_MAP: Mapping[str, Callable] = { - 'MONGODB-X509': _X509Context, - 'SCRAM-SHA-1': functools.partial(_ScramContext, mechanism='SCRAM-SHA-1'), - 'SCRAM-SHA-256': functools.partial(_ScramContext, - mechanism='SCRAM-SHA-256'), - 'DEFAULT': functools.partial(_ScramContext, mechanism='SCRAM-SHA-256'), + "MONGODB-X509": _X509Context, + "SCRAM-SHA-1": functools.partial(_ScramContext, mechanism="SCRAM-SHA-1"), + "SCRAM-SHA-256": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), + "DEFAULT": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), } @@ -546,4 +547,3 @@ def authenticate(credentials, sock_info): mechanism = credentials.mechanism auth_func = _AUTH_MAP[mechanism] auth_func(credentials, sock_info) - diff --git a/pymongo/auth_aws.py b/pymongo/auth_aws.py index 0233d192d4..4b2af35ea4 100644 --- a/pymongo/auth_aws.py +++ b/pymongo/auth_aws.py @@ -16,13 +16,15 @@ try: import pymongo_auth_aws - from pymongo_auth_aws import (AwsCredential, AwsSaslContext, - PyMongoAuthAwsError) + from pymongo_auth_aws import AwsCredential, AwsSaslContext, PyMongoAuthAwsError + _HAVE_MONGODB_AWS = True except ImportError: + class AwsSaslContext(object): # type: ignore def __init__(self, credentials): pass + _HAVE_MONGODB_AWS = False import bson @@ -47,38 +49,46 @@ def bson_decode(self, data): def _authenticate_aws(credentials, sock_info): - """Authenticate using MONGODB-AWS. - """ + """Authenticate using MONGODB-AWS.""" if not _HAVE_MONGODB_AWS: raise ConfigurationError( "MONGODB-AWS authentication requires pymongo-auth-aws: " - "install with: python -m pip install 'pymongo[aws]'") + "install with: python -m pip install 'pymongo[aws]'" + ) if sock_info.max_wire_version < 9: - raise ConfigurationError( - "MONGODB-AWS authentication requires MongoDB version 4.4 or later") + raise ConfigurationError("MONGODB-AWS authentication requires MongoDB version 4.4 or later") try: - ctx = _AwsSaslContext(AwsCredential( - credentials.username, credentials.password, - credentials.mechanism_properties.aws_session_token)) + ctx = _AwsSaslContext( + AwsCredential( + credentials.username, + credentials.password, + credentials.mechanism_properties.aws_session_token, + ) + ) client_payload = ctx.step(None) - client_first = SON([('saslStart', 1), - ('mechanism', 'MONGODB-AWS'), - ('payload', client_payload)]) - server_first = sock_info.command('$external', client_first) + client_first = SON( + [("saslStart", 1), ("mechanism", "MONGODB-AWS"), ("payload", client_payload)] + ) + server_first = sock_info.command("$external", client_first) res = server_first # Limit how many times we loop to catch protocol / library issues for _ in range(10): - client_payload = ctx.step(res['payload']) - cmd = SON([('saslContinue', 1), - ('conversationId', server_first['conversationId']), - ('payload', client_payload)]) - res = sock_info.command('$external', cmd) - if res['done']: + client_payload = ctx.step(res["payload"]) + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", server_first["conversationId"]), + ("payload", client_payload), + ] + ) + res = sock_info.command("$external", cmd) + if res["done"]: # SASL complete. break except PyMongoAuthAwsError as exc: # Convert to OperationFailure and include pymongo-auth-aws version. - raise OperationFailure('%s (pymongo-auth-aws version %s)' % ( - exc, pymongo_auth_aws.__version__)) + raise OperationFailure( + "%s (pymongo-auth-aws version %s)" % (exc, pymongo_auth_aws.__version__) + ) diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 8d343bb2c6..e043e09fdd 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -24,13 +24,27 @@ from bson.son import SON from pymongo.client_session import _validate_session_write_concern from pymongo.collation import validate_collation_or_none -from pymongo.common import (validate_is_document_type, validate_is_mapping, - validate_ok_for_replace, validate_ok_for_update) -from pymongo.errors import (BulkWriteError, ConfigurationError, - InvalidOperation, OperationFailure) +from pymongo.common import ( + validate_is_document_type, + validate_is_mapping, + validate_ok_for_replace, + validate_ok_for_update, +) +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + InvalidOperation, + OperationFailure, +) from pymongo.helpers import _RETRYABLE_ERROR_CODES, _get_wce_doc -from pymongo.message import (_DELETE, _INSERT, _UPDATE, _BulkWriteContext, - _EncryptedBulkWriteContext, _randint) +from pymongo.message import ( + _DELETE, + _INSERT, + _UPDATE, + _BulkWriteContext, + _EncryptedBulkWriteContext, + _randint, +) from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -42,15 +56,14 @@ _UNKNOWN_ERROR = 8 _WRITE_CONCERN_ERROR = 64 -_COMMANDS = ('insert', 'update', 'delete') +_COMMANDS = ("insert", "update", "delete") class _Run(object): - """Represents a batch of write operations. - """ + """Represents a batch of write operations.""" + def __init__(self, op_type): - """Initialize a new Run object. - """ + """Initialize a new Run object.""" self.op_type = op_type self.index_map = [] self.ops = [] @@ -77,8 +90,7 @@ def add(self, original_index, operation): def _merge_command(run, full_result, offset, result): - """Merge a write command result into the full bulk result. - """ + """Merge a write command result into the full bulk result.""" affected = result.get("n", 0) if run.op_type == _INSERT: @@ -95,7 +107,7 @@ def _merge_command(run, full_result, offset, result): doc["index"] = run.index(doc["index"] + offset) full_result["upserted"].extend(upserted) full_result["nUpserted"] += n_upserted - full_result["nMatched"] += (affected - n_upserted) + full_result["nMatched"] += affected - n_upserted else: full_result["nMatched"] += affected full_result["nModified"] += result["nModified"] @@ -117,24 +129,22 @@ def _merge_command(run, full_result, offset, result): def _raise_bulk_write_error(full_result): - """Raise a BulkWriteError from the full bulk api result. - """ + """Raise a BulkWriteError from the full bulk api result.""" if full_result["writeErrors"]: - full_result["writeErrors"].sort( - key=lambda error: error["index"]) + full_result["writeErrors"].sort(key=lambda error: error["index"]) raise BulkWriteError(full_result) class _Bulk(object): - """The private guts of the bulk write API. - """ + """The private guts of the bulk write API.""" + def __init__(self, collection, ordered, bypass_document_validation): - """Initialize a _Bulk instance. - """ + """Initialize a _Bulk instance.""" self.collection = collection.with_options( codec_options=collection.codec_options._replace( - unicode_decode_error_handler='replace', - document_class=dict)) + unicode_decode_error_handler="replace", document_class=dict + ) + ) self.ordered = ordered self.ops = [] self.executed = False @@ -159,63 +169,64 @@ def bulk_ctx_class(self): return _BulkWriteContext def add_insert(self, document): - """Add an insert document to the list of ops. - """ + """Add an insert document to the list of ops.""" validate_is_document_type("document", document) # Generate ObjectId client side. - if not (isinstance(document, RawBSONDocument) or '_id' in document): - document['_id'] = ObjectId() + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() self.ops.append((_INSERT, document)) - def add_update(self, selector, update, multi=False, upsert=False, - collation=None, array_filters=None, hint=None): - """Create an update document and add it to the list of ops. - """ + def add_update( + self, + selector, + update, + multi=False, + upsert=False, + collation=None, + array_filters=None, + hint=None, + ): + """Create an update document and add it to the list of ops.""" validate_ok_for_update(update) - cmd = SON([('q', selector), ('u', update), - ('multi', multi), ('upsert', upsert)]) + cmd = SON([("q", selector), ("u", update), ("multi", multi), ("upsert", upsert)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True - cmd['collation'] = collation + cmd["collation"] = collation if array_filters is not None: self.uses_array_filters = True - cmd['arrayFilters'] = array_filters + cmd["arrayFilters"] = array_filters if hint is not None: self.uses_hint_update = True - cmd['hint'] = hint + cmd["hint"] = hint if multi: # A bulk_write containing an update_many is not retryable. self.is_retryable = False self.ops.append((_UPDATE, cmd)) - def add_replace(self, selector, replacement, upsert=False, - collation=None, hint=None): - """Create a replace document and add it to the list of ops. - """ + def add_replace(self, selector, replacement, upsert=False, collation=None, hint=None): + """Create a replace document and add it to the list of ops.""" validate_ok_for_replace(replacement) - cmd = SON([('q', selector), ('u', replacement), - ('multi', False), ('upsert', upsert)]) + cmd = SON([("q", selector), ("u", replacement), ("multi", False), ("upsert", upsert)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True - cmd['collation'] = collation + cmd["collation"] = collation if hint is not None: self.uses_hint_update = True - cmd['hint'] = hint + cmd["hint"] = hint self.ops.append((_UPDATE, cmd)) def add_delete(self, selector, limit, collation=None, hint=None): - """Create a delete document and add it to the list of ops. - """ - cmd = SON([('q', selector), ('limit', limit)]) + """Create a delete document and add it to the list of ops.""" + cmd = SON([("q", selector), ("limit", limit)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True - cmd['collation'] = collation + cmd["collation"] = collation if hint is not None: self.uses_hint_delete = True - cmd['hint'] = hint + cmd["hint"] = hint if limit == _DELETE_ALL: # A bulk_write containing a delete_many is not retryable. self.is_retryable = False @@ -247,9 +258,17 @@ def gen_unordered(self): if run.ops: yield run - def _execute_command(self, generator, write_concern, session, - sock_info, op_id, retryable, full_result, - final_write_concern=None): + def _execute_command( + self, + generator, + write_concern, + session, + sock_info, + op_id, + retryable, + full_result, + final_write_concern=None, + ): db_name = self.collection.database.name client = self.collection.database.client listeners = client._event_listeners @@ -272,8 +291,15 @@ def _execute_command(self, generator, write_concern, session, cmd_name = _COMMANDS[run.op_type] bwc = self.bulk_ctx_class( - db_name, cmd_name, sock_info, op_id, listeners, session, - run.op_type, self.collection.codec_options) + db_name, + cmd_name, + sock_info, + op_id, + listeners, + session, + run.op_type, + self.collection.codec_options, + ) while run.idx_offset < len(run.ops): # If this is the last possible operation, use the @@ -281,20 +307,18 @@ def _execute_command(self, generator, write_concern, session, if last_run and (len(run.ops) - run.idx_offset) == 1: write_concern = final_write_concern or write_concern - cmd = SON([(cmd_name, self.collection.name), - ('ordered', self.ordered)]) + cmd = SON([(cmd_name, self.collection.name), ("ordered", self.ordered)]) if not write_concern.is_server_default: - cmd['writeConcern'] = write_concern.document + cmd["writeConcern"] = write_concern.document if self.bypass_doc_val: - cmd['bypassDocumentValidation'] = True + cmd["bypassDocumentValidation"] = True if session: # Start a new retryable write unless one was already # started for this command. if retryable and not self.started_retryable_write: session._start_retryable_write() self.started_retryable_write = True - session._apply_to(cmd, retryable, ReadPreference.PRIMARY, - sock_info) + session._apply_to(cmd, retryable, ReadPreference.PRIMARY, sock_info) sock_info.send_cluster_time(cmd, session, client) sock_info.add_server_api(cmd) ops = islice(run.ops, run.idx_offset, None) @@ -304,8 +328,8 @@ def _execute_command(self, generator, write_concern, session, result, to_send = bwc.execute(cmd, ops, client) # Retryable writeConcernErrors halt the execution of this run. - wce = result.get('writeConcernError', {}) - if wce.get('code', 0) in _RETRYABLE_ERROR_CODES: + wce = result.get("writeConcernError", {}) + if wce.get("code", 0) in _RETRYABLE_ERROR_CODES: # Synthesize the full bulk result without modifying the # current one because this write operation may be retried. full = copy.deepcopy(full_result) @@ -327,14 +351,13 @@ def _execute_command(self, generator, write_concern, session, # We're supposed to continue if errors are # at the write concern level (e.g. wtimeout) - if self.ordered and full_result['writeErrors']: + if self.ordered and full_result["writeErrors"]: break # Reset our state self.current_run = run = self.next_run def execute_command(self, generator, write_concern, session): - """Execute using write commands. - """ + """Execute using write commands.""" # nModified is only reported for write commands, not legacy ops. full_result = { "writeErrors": [], @@ -350,21 +373,19 @@ def execute_command(self, generator, write_concern, session): def retryable_bulk(session, sock_info, retryable): self._execute_command( - generator, write_concern, session, sock_info, op_id, - retryable, full_result) + generator, write_concern, session, sock_info, op_id, retryable, full_result + ) client = self.collection.database.client with client._tmp_session(session) as s: - client._retry_with_session( - self.is_retryable, retryable_bulk, s, self) + client._retry_with_session(self.is_retryable, retryable_bulk, s, self) if full_result["writeErrors"] or full_result["writeConcernErrors"]: _raise_bulk_write_error(full_result) return full_result def execute_op_msg_no_results(self, sock_info, generator): - """Execute write commands with OP_MSG and w=0 writeConcern, unordered. - """ + """Execute write commands with OP_MSG and w=0 writeConcern, unordered.""" db_name = self.collection.database.name client = self.collection.database.client listeners = client._event_listeners @@ -377,13 +398,24 @@ def execute_op_msg_no_results(self, sock_info, generator): while run: cmd_name = _COMMANDS[run.op_type] bwc = self.bulk_ctx_class( - db_name, cmd_name, sock_info, op_id, listeners, None, - run.op_type, self.collection.codec_options) + db_name, + cmd_name, + sock_info, + op_id, + listeners, + None, + run.op_type, + self.collection.codec_options, + ) while run.idx_offset < len(run.ops): - cmd = SON([(cmd_name, self.collection.name), - ('ordered', False), - ('writeConcern', {'w': 0})]) + cmd = SON( + [ + (cmd_name, self.collection.name), + ("ordered", False), + ("writeConcern", {"w": 0}), + ] + ) sock_info.add_server_api(cmd) ops = islice(run.ops, run.idx_offset, None) # Run as many ops as possible. @@ -392,8 +424,7 @@ def execute_op_msg_no_results(self, sock_info, generator): self.current_run = run = next(generator, None) def execute_command_no_results(self, sock_info, generator, write_concern): - """Execute write commands with OP_MSG and w=0 WriteConcern, ordered. - """ + """Execute write commands with OP_MSG and w=0 WriteConcern, ordered.""" full_result = { "writeErrors": [], "writeConcernErrors": [], @@ -411,45 +442,50 @@ def execute_command_no_results(self, sock_info, generator, write_concern): op_id = _randint() try: self._execute_command( - generator, initial_write_concern, None, - sock_info, op_id, False, full_result, write_concern) + generator, + initial_write_concern, + None, + sock_info, + op_id, + False, + full_result, + write_concern, + ) except OperationFailure: pass def execute_no_results(self, sock_info, generator, write_concern): - """Execute all operations, returning no results (w=0). - """ + """Execute all operations, returning no results (w=0).""" if self.uses_collation: - raise ConfigurationError( - 'Collation is unsupported for unacknowledged writes.') + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") if self.uses_array_filters: - raise ConfigurationError( - 'arrayFilters is unsupported for unacknowledged writes.') + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") # Guard against unsupported unacknowledged writes. unack = write_concern and not write_concern.acknowledged if unack and self.uses_hint_delete and sock_info.max_wire_version < 9: raise ConfigurationError( - 'Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands.') + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." + ) if unack and self.uses_hint_update and sock_info.max_wire_version < 8: raise ConfigurationError( - 'Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands.') + "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." + ) # Cannot have both unacknowledged writes and bypass document validation. if self.bypass_doc_val: - raise OperationFailure("Cannot set bypass_document_validation with" - " unacknowledged write concern") + raise OperationFailure( + "Cannot set bypass_document_validation with" " unacknowledged write concern" + ) if self.ordered: return self.execute_command_no_results(sock_info, generator, write_concern) return self.execute_op_msg_no_results(sock_info, generator) def execute(self, write_concern, session): - """Execute operations. - """ + """Execute operations.""" if not self.ops: - raise InvalidOperation('No operations to execute') + raise InvalidOperation("No operations to execute") if self.executed: - raise InvalidOperation('Bulk operations can ' - 'only be executed once.') + raise InvalidOperation("Bulk operations can " "only be executed once.") self.executed = True write_concern = write_concern or self.collection.write_concern session = _validate_session_write_concern(session, write_concern) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 69446fdecf..a35c9cb844 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -15,43 +15,51 @@ """Watch changes on a collection, a database, or the entire cluster.""" import copy -from typing import (TYPE_CHECKING, Any, Dict, Generic, Iterator, Mapping, - Optional, Union) +from typing import TYPE_CHECKING, Any, Dict, Generic, Iterator, Mapping, Optional, Union from bson import _bson_to_dict from bson.raw_bson import RawBSONDocument from bson.timestamp import Timestamp from pymongo import common -from pymongo.aggregation import (_CollectionAggregationCommand, - _DatabaseAggregationCommand) +from pymongo.aggregation import ( + _CollectionAggregationCommand, + _DatabaseAggregationCommand, +) from pymongo.collation import validate_collation_or_none from pymongo.command_cursor import CommandCursor -from pymongo.errors import (ConnectionFailure, CursorNotFound, - InvalidOperation, OperationFailure, PyMongoError) +from pymongo.errors import ( + ConnectionFailure, + CursorNotFound, + InvalidOperation, + OperationFailure, + PyMongoError, +) from pymongo.typings import _CollationIn, _DocumentType, _Pipeline # The change streams spec considers the following server errors from the # getMore command non-resumable. All other getMore errors are resumable. -_RESUMABLE_GETMORE_ERRORS = frozenset([ - 6, # HostUnreachable - 7, # HostNotFound - 89, # NetworkTimeout - 91, # ShutdownInProgress - 189, # PrimarySteppedDown - 262, # ExceededTimeLimit - 9001, # SocketException - 10107, # NotWritablePrimary - 11600, # InterruptedAtShutdown - 11602, # InterruptedDueToReplStateChange - 13435, # NotPrimaryNoSecondaryOk - 13436, # NotPrimaryOrSecondary - 63, # StaleShardVersion - 150, # StaleEpoch - 13388, # StaleConfig - 234, # RetryChangeStream - 133, # FailedToSatisfyReadPreference - 216, # ElectionInProgress -]) +_RESUMABLE_GETMORE_ERRORS = frozenset( + [ + 6, # HostUnreachable + 7, # HostNotFound + 89, # NetworkTimeout + 91, # ShutdownInProgress + 189, # PrimarySteppedDown + 262, # ExceededTimeLimit + 9001, # SocketException + 10107, # NotWritablePrimary + 11600, # InterruptedAtShutdown + 11602, # InterruptedDueToReplStateChange + 13435, # NotPrimaryNoSecondaryOk + 13436, # NotPrimaryOrSecondary + 63, # StaleShardVersion + 150, # StaleEpoch + 13388, # StaleConfig + 234, # RetryChangeStream + 133, # FailedToSatisfyReadPreference + 216, # ElectionInProgress + ] +) if TYPE_CHECKING: @@ -72,9 +80,12 @@ class ChangeStream(Generic[_DocumentType]): .. versionadded:: 3.6 .. seealso:: The MongoDB documentation on `changeStreams `_. """ + def __init__( self, - target: Union["MongoClient[_DocumentType]", "Database[_DocumentType]", "Collection[_DocumentType]"], + target: Union[ + "MongoClient[_DocumentType]", "Database[_DocumentType]", "Collection[_DocumentType]" + ], pipeline: Optional[_Pipeline], full_document: Optional[str], resume_after: Optional[Mapping[str, Any]], @@ -87,8 +98,8 @@ def __init__( ) -> None: if pipeline is None: pipeline = [] - pipeline = common.validate_list('pipeline', pipeline) - common.validate_string_or_none('full_document', full_document) + pipeline = common.validate_list("pipeline", pipeline) + common.validate_string_or_none("full_document", full_document) validate_collation_or_none(collation) common.validate_non_negative_integer_or_none("batchSize", batch_size) @@ -99,8 +110,8 @@ def __init__( # Keep the type registry so that we support encoding custom types # in the pipeline. self._target = target.with_options( # type: ignore - codec_options=target.codec_options.with_options( - document_class=RawBSONDocument)) + codec_options=target.codec_options.with_options(document_class=RawBSONDocument) + ) else: self._target = target @@ -126,24 +137,24 @@ def _aggregation_command_class(self): @property def _client(self): """The client against which the aggregation commands for - this ChangeStream will be run. """ + this ChangeStream will be run.""" raise NotImplementedError def _change_stream_options(self): """Return the options dict for the $changeStream pipeline stage.""" options: Dict[str, Any] = {} if self._full_document is not None: - options['fullDocument'] = self._full_document + options["fullDocument"] = self._full_document resume_token = self.resume_token if resume_token is not None: if self._uses_start_after: - options['startAfter'] = resume_token + options["startAfter"] = resume_token else: - options['resumeAfter'] = resume_token + options["resumeAfter"] = resume_token if self._start_at_operation_time is not None: - options['startAtOperationTime'] = self._start_at_operation_time + options["startAtOperationTime"] = self._start_at_operation_time return options def _command_options(self): @@ -158,7 +169,7 @@ def _command_options(self): def _aggregation_pipeline(self): """Return the full aggregation pipeline for this ChangeStream.""" options = self._change_stream_options() - full_pipeline: list = [{'$changeStream': options}] + full_pipeline: list = [{"$changeStream": options}] full_pipeline.extend(self._pipeline) return full_pipeline @@ -170,38 +181,43 @@ def _process_result(self, result, sock_info): This is implemented as a callback because we need access to the wire version in order to determine whether to cache this value. """ - if not result['cursor']['firstBatch']: - if 'postBatchResumeToken' in result['cursor']: - self._resume_token = result['cursor']['postBatchResumeToken'] - elif (self._start_at_operation_time is None and - self._uses_resume_after is False and - self._uses_start_after is False and - sock_info.max_wire_version >= 7): + if not result["cursor"]["firstBatch"]: + if "postBatchResumeToken" in result["cursor"]: + self._resume_token = result["cursor"]["postBatchResumeToken"] + elif ( + self._start_at_operation_time is None + and self._uses_resume_after is False + and self._uses_start_after is False + and sock_info.max_wire_version >= 7 + ): self._start_at_operation_time = result.get("operationTime") # PYTHON-2181: informative error on missing operationTime. if self._start_at_operation_time is None: raise OperationFailure( "Expected field 'operationTime' missing from command " - "response : %r" % (result, )) + "response : %r" % (result,) + ) def _run_aggregation_cmd(self, session, explicit_session): """Run the full aggregation pipeline for this ChangeStream and return the corresponding CommandCursor. """ cmd = self._aggregation_command_class( - self._target, CommandCursor, self._aggregation_pipeline(), - self._command_options(), explicit_session, - result_processor=self._process_result) + self._target, + CommandCursor, + self._aggregation_pipeline(), + self._command_options(), + explicit_session, + result_processor=self._process_result, + ) return self._client._retryable_read( - cmd.get_cursor, self._target._read_preference_for(session), - session) + cmd.get_cursor, self._target._read_preference_for(session), session + ) def _create_cursor(self): with self._client._tmp_session(self._session, close=False) as s: - return self._run_aggregation_cmd( - session=s, - explicit_session=self._session is not None) + return self._run_aggregation_cmd(session=s, explicit_session=self._session is not None) def _resume(self): """Reestablish this change stream after a resumable error.""" @@ -321,10 +337,9 @@ def try_next(self) -> Optional[_DocumentType]: except OperationFailure as exc: if exc._max_wire_version is None: raise - is_resumable = ((exc._max_wire_version >= 9 and - exc.has_error_label("ResumableChangeStreamError")) or - (exc._max_wire_version < 9 and - exc.code in _RESUMABLE_GETMORE_ERRORS)) + is_resumable = ( + exc._max_wire_version >= 9 and exc.has_error_label("ResumableChangeStreamError") + ) or (exc._max_wire_version < 9 and exc.code in _RESUMABLE_GETMORE_ERRORS) if not is_resumable: raise self._resume() @@ -343,17 +358,16 @@ def try_next(self) -> Optional[_DocumentType]: # Else, changes are available. try: - resume_token = change['_id'] + resume_token = change["_id"] except KeyError: self.close() raise InvalidOperation( - "Cannot provide resume functionality when the resume " - "token is missing.") + "Cannot provide resume functionality when the resume " "token is missing." + ) # If this is the last change document from the current batch, cache the # postBatchResumeToken. - if (not self._cursor._has_next() and - self._cursor._post_batch_resume_token): + if not self._cursor._has_next() and self._cursor._post_batch_resume_token: resume_token = self._cursor._post_batch_resume_token # Hereafter, don't use startAfter; instead use resumeAfter. @@ -383,6 +397,7 @@ class CollectionChangeStream(ChangeStream, Generic[_DocumentType]): .. versionadded:: 3.7 """ + @property def _aggregation_command_class(self): return _CollectionAggregationCommand @@ -400,6 +415,7 @@ class DatabaseChangeStream(ChangeStream, Generic[_DocumentType]): .. versionadded:: 3.7 """ + @property def _aggregation_command_class(self): return _DatabaseAggregationCommand @@ -417,6 +433,7 @@ class ClusterChangeStream(DatabaseChangeStream, Generic[_DocumentType]): .. versionadded:: 3.7 """ + def _change_stream_options(self): options = super(ClusterChangeStream, self)._change_stream_options() options["allChangesForCluster"] = True diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 14ef0f781e..4987601d5c 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -23,8 +23,7 @@ from pymongo.monitoring import _EventListeners from pymongo.pool import PoolOptions from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import (make_read_preference, - read_pref_mode_from_name) +from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name from pymongo.server_selectors import any_server_selector from pymongo.ssl_support import get_ssl_context from pymongo.write_concern import WriteConcern @@ -32,63 +31,69 @@ def _parse_credentials(username, password, database, options): """Parse authentication credentials.""" - mechanism = options.get('authmechanism', 'DEFAULT' if username else None) - source = options.get('authsource') + mechanism = options.get("authmechanism", "DEFAULT" if username else None) + source = options.get("authsource") if username or mechanism: - return _build_credentials_tuple( - mechanism, source, username, password, options, database) + return _build_credentials_tuple(mechanism, source, username, password, options, database) return None def _parse_read_preference(options): """Parse read preference options.""" - if 'read_preference' in options: - return options['read_preference'] + if "read_preference" in options: + return options["read_preference"] - name = options.get('readpreference', 'primary') + name = options.get("readpreference", "primary") mode = read_pref_mode_from_name(name) - tags = options.get('readpreferencetags') - max_staleness = options.get('maxstalenessseconds', -1) + tags = options.get("readpreferencetags") + max_staleness = options.get("maxstalenessseconds", -1) return make_read_preference(mode, tags, max_staleness) def _parse_write_concern(options): """Parse write concern options.""" - concern = options.get('w') - wtimeout = options.get('wtimeoutms') - j = options.get('journal') - fsync = options.get('fsync') + concern = options.get("w") + wtimeout = options.get("wtimeoutms") + j = options.get("journal") + fsync = options.get("fsync") return WriteConcern(concern, wtimeout, j, fsync) def _parse_read_concern(options): """Parse read concern options.""" - concern = options.get('readconcernlevel') + concern = options.get("readconcernlevel") return ReadConcern(concern) def _parse_ssl_options(options): """Parse ssl options.""" - use_tls = options.get('tls') + use_tls = options.get("tls") if use_tls is not None: - validate_boolean('tls', use_tls) + validate_boolean("tls", use_tls) - certfile = options.get('tlscertificatekeyfile') - passphrase = options.get('tlscertificatekeyfilepassword') - ca_certs = options.get('tlscafile') - crlfile = options.get('tlscrlfile') - allow_invalid_certificates = options.get('tlsallowinvalidcertificates', False) - allow_invalid_hostnames = options.get('tlsallowinvalidhostnames', False) - disable_ocsp_endpoint_check = options.get('tlsdisableocspendpointcheck', False) + certfile = options.get("tlscertificatekeyfile") + passphrase = options.get("tlscertificatekeyfilepassword") + ca_certs = options.get("tlscafile") + crlfile = options.get("tlscrlfile") + allow_invalid_certificates = options.get("tlsallowinvalidcertificates", False) + allow_invalid_hostnames = options.get("tlsallowinvalidhostnames", False) + disable_ocsp_endpoint_check = options.get("tlsdisableocspendpointcheck", False) enabled_tls_opts = [] - for opt in ('tlscertificatekeyfile', 'tlscertificatekeyfilepassword', - 'tlscafile', 'tlscrlfile'): + for opt in ( + "tlscertificatekeyfile", + "tlscertificatekeyfilepassword", + "tlscafile", + "tlscrlfile", + ): # Any non-null value of these options implies tls=True. if opt in options and options[opt]: enabled_tls_opts.append(opt) - for opt in ('tlsallowinvalidcertificates', 'tlsallowinvalidhostnames', - 'tlsdisableocspendpointcheck'): + for opt in ( + "tlsallowinvalidcertificates", + "tlsallowinvalidhostnames", + "tlsdisableocspendpointcheck", + ): # A value of False for these options implies tls=True. if opt in options and not options[opt]: enabled_tls_opts.append(opt) @@ -99,10 +104,11 @@ def _parse_ssl_options(options): use_tls = True elif not use_tls: # Error since tls is explicitly disabled but a tls option is set. - raise ConfigurationError("TLS has not been enabled but the " - "following tls parameters have been set: " - "%s. Please set `tls=True` or remove." - % ', '.join(enabled_tls_opts)) + raise ConfigurationError( + "TLS has not been enabled but the " + "following tls parameters have been set: " + "%s. Please set `tls=True` or remove." % ", ".join(enabled_tls_opts) + ) if use_tls: ctx = get_ssl_context( @@ -112,7 +118,8 @@ def _parse_ssl_options(options): crlfile, allow_invalid_certificates, allow_invalid_hostnames, - disable_ocsp_endpoint_check) + disable_ocsp_endpoint_check, + ) return ctx, allow_invalid_hostnames return None, allow_invalid_hostnames @@ -120,40 +127,42 @@ def _parse_ssl_options(options): def _parse_pool_options(username, password, database, options): """Parse connection pool options.""" credentials = _parse_credentials(username, password, database, options) - max_pool_size = options.get('maxpoolsize', common.MAX_POOL_SIZE) - min_pool_size = options.get('minpoolsize', common.MIN_POOL_SIZE) - max_idle_time_seconds = options.get( - 'maxidletimems', common.MAX_IDLE_TIME_SEC) + max_pool_size = options.get("maxpoolsize", common.MAX_POOL_SIZE) + min_pool_size = options.get("minpoolsize", common.MIN_POOL_SIZE) + max_idle_time_seconds = options.get("maxidletimems", common.MAX_IDLE_TIME_SEC) if max_pool_size is not None and min_pool_size > max_pool_size: raise ValueError("minPoolSize must be smaller or equal to maxPoolSize") - connect_timeout = options.get('connecttimeoutms', common.CONNECT_TIMEOUT) - socket_timeout = options.get('sockettimeoutms') - wait_queue_timeout = options.get( - 'waitqueuetimeoutms', common.WAIT_QUEUE_TIMEOUT) - event_listeners = options.get('event_listeners') - appname = options.get('appname') - driver = options.get('driver') - server_api = options.get('server_api') + connect_timeout = options.get("connecttimeoutms", common.CONNECT_TIMEOUT) + socket_timeout = options.get("sockettimeoutms") + wait_queue_timeout = options.get("waitqueuetimeoutms", common.WAIT_QUEUE_TIMEOUT) + event_listeners = options.get("event_listeners") + appname = options.get("appname") + driver = options.get("driver") + server_api = options.get("server_api") compression_settings = CompressionSettings( - options.get('compressors', []), - options.get('zlibcompressionlevel', -1)) + options.get("compressors", []), options.get("zlibcompressionlevel", -1) + ) ssl_context, tls_allow_invalid_hostnames = _parse_ssl_options(options) - load_balanced = options.get('loadbalanced') - max_connecting = options.get('maxconnecting', common.MAX_CONNECTING) - return PoolOptions(max_pool_size, - min_pool_size, - max_idle_time_seconds, - connect_timeout, socket_timeout, - wait_queue_timeout, - ssl_context, tls_allow_invalid_hostnames, - _EventListeners(event_listeners), - appname, - driver, - compression_settings, - max_connecting=max_connecting, - server_api=server_api, - load_balanced=load_balanced, - credentials=credentials) + load_balanced = options.get("loadbalanced") + max_connecting = options.get("maxconnecting", common.MAX_CONNECTING) + return PoolOptions( + max_pool_size, + min_pool_size, + max_idle_time_seconds, + connect_timeout, + socket_timeout, + wait_queue_timeout, + ssl_context, + tls_allow_invalid_hostnames, + _EventListeners(event_listeners), + appname, + driver, + compression_settings, + max_connecting=max_connecting, + server_api=server_api, + load_balanced=load_balanced, + credentials=credentials, + ) class ClientOptions(object): @@ -167,28 +176,25 @@ class ClientOptions(object): def __init__(self, username, password, database, options): self.__options = options self.__codec_options = _parse_codec_options(options) - self.__direct_connection = options.get('directconnection') - self.__local_threshold_ms = options.get( - 'localthresholdms', common.LOCAL_THRESHOLD_MS) + self.__direct_connection = options.get("directconnection") + self.__local_threshold_ms = options.get("localthresholdms", common.LOCAL_THRESHOLD_MS) # self.__server_selection_timeout is in seconds. Must use full name for # common.SERVER_SELECTION_TIMEOUT because it is set directly by tests. self.__server_selection_timeout = options.get( - 'serverselectiontimeoutms', common.SERVER_SELECTION_TIMEOUT) - self.__pool_options = _parse_pool_options( - username, password, database, options) + "serverselectiontimeoutms", common.SERVER_SELECTION_TIMEOUT + ) + self.__pool_options = _parse_pool_options(username, password, database, options) self.__read_preference = _parse_read_preference(options) - self.__replica_set_name = options.get('replicaset') + self.__replica_set_name = options.get("replicaset") self.__write_concern = _parse_write_concern(options) self.__read_concern = _parse_read_concern(options) - self.__connect = options.get('connect') - self.__heartbeat_frequency = options.get( - 'heartbeatfrequencyms', common.HEARTBEAT_FREQUENCY) - self.__retry_writes = options.get('retrywrites', common.RETRY_WRITES) - self.__retry_reads = options.get('retryreads', common.RETRY_READS) - self.__server_selector = options.get( - 'server_selector', any_server_selector) - self.__auto_encryption_opts = options.get('auto_encryption_opts') - self.__load_balanced = options.get('loadbalanced') + self.__connect = options.get("connect") + self.__heartbeat_frequency = options.get("heartbeatfrequencyms", common.HEARTBEAT_FREQUENCY) + self.__retry_writes = options.get("retrywrites", common.RETRY_WRITES) + self.__retry_reads = options.get("retryreads", common.RETRY_READS) + self.__server_selector = options.get("server_selector", any_server_selector) + self.__auto_encryption_opts = options.get("auto_encryption_opts") + self.__load_balanced = options.get("loadbalanced") @property def _options(self): diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 3d4ad514e5..44381c0241 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -135,17 +135,30 @@ import time import uuid from collections.abc import Mapping as _Mapping -from typing import (TYPE_CHECKING, Any, Callable, ContextManager, Generic, - Mapping, Optional, TypeVar) +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ContextManager, + Generic, + Mapping, + Optional, + TypeVar, +) from bson.binary import Binary from bson.int64 import Int64 from bson.son import SON from bson.timestamp import Timestamp from pymongo.cursor import _SocketManager -from pymongo.errors import (ConfigurationError, ConnectionFailure, - InvalidOperation, OperationFailure, PyMongoError, - WTimeoutError) +from pymongo.errors import ( + ConfigurationError, + ConnectionFailure, + InvalidOperation, + OperationFailure, + PyMongoError, + WTimeoutError, +) from pymongo.helpers import _RETRYABLE_ERROR_CODES from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference, _ServerMode @@ -170,6 +183,7 @@ class SessionOptions(object): .. versionchanged:: 3.12 Added the ``snapshot`` parameter. """ + def __init__( self, causal_consistency: Optional[bool] = None, @@ -178,8 +192,7 @@ def __init__( ) -> None: if snapshot: if causal_consistency: - raise ConfigurationError('snapshot reads do not support ' - 'causal_consistency=True') + raise ConfigurationError("snapshot reads do not support " "causal_consistency=True") causal_consistency = False elif causal_consistency is None: causal_consistency = True @@ -188,8 +201,9 @@ def __init__( if not isinstance(default_transaction_options, TransactionOptions): raise TypeError( "default_transaction_options must be an instance of " - "pymongo.client_session.TransactionOptions, not: %r" % - (default_transaction_options,)) + "pymongo.client_session.TransactionOptions, not: %r" + % (default_transaction_options,) + ) self._default_transaction_options = default_transaction_options self._snapshot = snapshot @@ -243,12 +257,13 @@ class TransactionOptions(object): .. versionadded:: 3.7 """ + def __init__( self, read_concern: Optional[ReadConcern] = None, write_concern: Optional[WriteConcern] = None, read_preference: Optional[_ServerMode] = None, - max_commit_time_ms: Optional[int] = None + max_commit_time_ms: Optional[int] = None, ) -> None: self._read_concern = read_concern self._write_concern = write_concern @@ -256,27 +271,31 @@ def __init__( self._max_commit_time_ms = max_commit_time_ms if read_concern is not None: if not isinstance(read_concern, ReadConcern): - raise TypeError("read_concern must be an instance of " - "pymongo.read_concern.ReadConcern, not: %r" % - (read_concern,)) + raise TypeError( + "read_concern must be an instance of " + "pymongo.read_concern.ReadConcern, not: %r" % (read_concern,) + ) if write_concern is not None: if not isinstance(write_concern, WriteConcern): - raise TypeError("write_concern must be an instance of " - "pymongo.write_concern.WriteConcern, not: %r" % - (write_concern,)) + raise TypeError( + "write_concern must be an instance of " + "pymongo.write_concern.WriteConcern, not: %r" % (write_concern,) + ) if not write_concern.acknowledged: raise ConfigurationError( "transactions do not support unacknowledged write concern" - ": %r" % (write_concern,)) + ": %r" % (write_concern,) + ) if read_preference is not None: if not isinstance(read_preference, _ServerMode): - raise TypeError("%r is not valid for read_preference. See " - "pymongo.read_preferences for valid " - "options." % (read_preference,)) + raise TypeError( + "%r is not valid for read_preference. See " + "pymongo.read_preferences for valid " + "options." % (read_preference,) + ) if max_commit_time_ms is not None: if not isinstance(max_commit_time_ms, int): - raise TypeError( - "max_commit_time_ms must be an integer or None") + raise TypeError("max_commit_time_ms must be an integer or None") @property def read_concern(self) -> Optional[ReadConcern]: @@ -290,8 +309,7 @@ def write_concern(self) -> Optional[WriteConcern]: @property def read_preference(self) -> Optional[_ServerMode]: - """This transaction's :class:`~pymongo.read_preferences.ReadPreference`. - """ + """This transaction's :class:`~pymongo.read_preferences.ReadPreference`.""" return self._read_preference @property @@ -319,14 +337,15 @@ def _validate_session_write_concern(session, write_concern): return None else: raise ConfigurationError( - 'Explicit sessions are incompatible with ' - 'unacknowledged write concern: %r' % ( - write_concern,)) + "Explicit sessions are incompatible with " + "unacknowledged write concern: %r" % (write_concern,) + ) return session class _TransactionContext(object): """Internal transaction context manager for start_transaction.""" + def __init__(self, session): self.__session = session @@ -352,6 +371,7 @@ class _TxnState(object): class _Transaction(object): """Internal class to hold transaction information in a ClientSession.""" + def __init__(self, opts, client): self.opts = opts self.state = _TxnState.NONE @@ -415,10 +435,12 @@ def _max_time_expired_error(exc): # From the transactions spec, all the retryable writes errors plus # WriteConcernFailed. -_UNKNOWN_COMMIT_ERROR_CODES = _RETRYABLE_ERROR_CODES | frozenset([ - 64, # WriteConcernFailed - 50, # MaxTimeMSExpired -]) +_UNKNOWN_COMMIT_ERROR_CODES = _RETRYABLE_ERROR_CODES | frozenset( + [ + 64, # WriteConcernFailed + 50, # MaxTimeMSExpired + ] +) # From the Convenient API for Transactions spec, with_transaction must # halt retries after 120 seconds. @@ -450,8 +472,13 @@ class ClientSession(Generic[_DocumentType]): :class:`ClientSession`, call :meth:`~pymongo.mongo_client.MongoClient.start_session`. """ + def __init__( - self, client: "MongoClient[_DocumentType]", server_session: Any, options: SessionOptions, implicit: bool + self, + client: "MongoClient[_DocumentType]", + server_session: Any, + options: SessionOptions, + implicit: bool, ) -> None: # A MongoClient, a _ServerSession, a SessionOptions, and a set. self._client: MongoClient[_DocumentType] = client @@ -630,17 +657,17 @@ def callback(session, custom_arg, custom_kwarg=None): """ start_time = time.monotonic() while True: - self.start_transaction( - read_concern, write_concern, read_preference, - max_commit_time_ms) + self.start_transaction(read_concern, write_concern, read_preference, max_commit_time_ms) try: ret = callback(self) except Exception as exc: if self.in_transaction: self.abort_transaction() - if (isinstance(exc, PyMongoError) and - exc.has_error_label("TransientTransactionError") and - _within_time_limit(start_time)): + if ( + isinstance(exc, PyMongoError) + and exc.has_error_label("TransientTransactionError") + and _within_time_limit(start_time) + ): # Retry the entire transaction. continue raise @@ -653,14 +680,17 @@ def callback(session, custom_arg, custom_kwarg=None): try: self.commit_transaction() except PyMongoError as exc: - if (exc.has_error_label("UnknownTransactionCommitResult") - and _within_time_limit(start_time) - and not _max_time_expired_error(exc)): + if ( + exc.has_error_label("UnknownTransactionCommitResult") + and _within_time_limit(start_time) + and not _max_time_expired_error(exc) + ): # Retry the commit. continue - if (exc.has_error_label("TransientTransactionError") and - _within_time_limit(start_time)): + if exc.has_error_label("TransientTransactionError") and _within_time_limit( + start_time + ): # Retry the entire transaction. break raise @@ -687,23 +717,22 @@ def start_transaction( self._check_ended() if self.options.snapshot: - raise InvalidOperation("Transactions are not supported in " - "snapshot sessions") + raise InvalidOperation("Transactions are not supported in " "snapshot sessions") if self.in_transaction: raise InvalidOperation("Transaction already in progress") read_concern = self._inherit_option("read_concern", read_concern) write_concern = self._inherit_option("write_concern", write_concern) - read_preference = self._inherit_option( - "read_preference", read_preference) + read_preference = self._inherit_option("read_preference", read_preference) if max_commit_time_ms is None: opts = self.options.default_transaction_options if opts: max_commit_time_ms = opts.max_commit_time_ms self._transaction.opts = TransactionOptions( - read_concern, write_concern, read_preference, max_commit_time_ms) + read_concern, write_concern, read_preference, max_commit_time_ms + ) self._transaction.reset() self._transaction.state = _TxnState.STARTING self._start_retryable_write() @@ -723,8 +752,7 @@ def commit_transaction(self) -> None: self._transaction.state = _TxnState.COMMITTED_EMPTY return elif state is _TxnState.ABORTED: - raise InvalidOperation( - "Cannot call commitTransaction after calling abortTransaction") + raise InvalidOperation("Cannot call commitTransaction after calling abortTransaction") elif state is _TxnState.COMMITTED: # We're explicitly retrying the commit, move the state back to # "in progress" so that in_transaction returns true. @@ -770,8 +798,7 @@ def abort_transaction(self) -> None: elif state is _TxnState.ABORTED: raise InvalidOperation("Cannot call abortTransaction twice") elif state in (_TxnState.COMMITTED, _TxnState.COMMITTED_EMPTY): - raise InvalidOperation( - "Cannot call abortTransaction after calling commitTransaction") + raise InvalidOperation("Cannot call abortTransaction after calling commitTransaction") try: self._finish_transaction_with_retry("abortTransaction") @@ -788,8 +815,10 @@ def _finish_transaction_with_retry(self, command_name): :Parameters: - `command_name`: Either "commitTransaction" or "abortTransaction". """ + def func(session, sock_info, retryable): return self._finish_transaction(sock_info, command_name) + return self._client._retry_internal(True, func, self, None) def _finish_transaction(self, sock_info, command_name): @@ -799,7 +828,7 @@ def _finish_transaction(self, sock_info, command_name): cmd = SON([(command_name, 1)]) if command_name == "commitTransaction": if opts.max_commit_time_ms: - cmd['maxTimeMS'] = opts.max_commit_time_ms + cmd["maxTimeMS"] = opts.max_commit_time_ms # Transaction spec says that after the initial commit attempt, # subsequent commitTransaction commands should be upgraded to use @@ -811,14 +840,11 @@ def _finish_transaction(self, sock_info, command_name): wc = WriteConcern(**wc_doc) if self._transaction.recovery_token: - cmd['recoveryToken'] = self._transaction.recovery_token + cmd["recoveryToken"] = self._transaction.recovery_token return self._client.admin._command( - sock_info, - cmd, - session=self, - write_concern=wc, - parse_write_concern_error=True) + sock_info, cmd, session=self, write_concern=wc, parse_write_concern_error=True + ) def _advance_cluster_time(self, cluster_time): """Internal cluster time helper.""" @@ -837,8 +863,7 @@ def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: another `ClientSession` instance. """ if not isinstance(cluster_time, _Mapping): - raise TypeError( - "cluster_time must be a subclass of collections.Mapping") + raise TypeError("cluster_time must be a subclass of collections.Mapping") if not isinstance(cluster_time.get("clusterTime"), Timestamp): raise ValueError("Invalid cluster_time") self._advance_cluster_time(cluster_time) @@ -860,22 +885,21 @@ def advance_operation_time(self, operation_time: Timestamp) -> None: another `ClientSession` instance. """ if not isinstance(operation_time, Timestamp): - raise TypeError("operation_time must be an instance " - "of bson.timestamp.Timestamp") + raise TypeError("operation_time must be an instance " "of bson.timestamp.Timestamp") self._advance_operation_time(operation_time) def _process_response(self, reply): """Process a response to a command that was run with this session.""" - self._advance_cluster_time(reply.get('$clusterTime')) - self._advance_operation_time(reply.get('operationTime')) + self._advance_cluster_time(reply.get("$clusterTime")) + self._advance_operation_time(reply.get("operationTime")) if self._options.snapshot and self._snapshot_time is None: - if 'cursor' in reply: - ct = reply['cursor'].get('atClusterTime') + if "cursor" in reply: + ct = reply["cursor"].get("atClusterTime") else: - ct = reply.get('atClusterTime') + ct = reply.get("atClusterTime") self._snapshot_time = ct if self.in_transaction and self._transaction.sharded: - recovery_token = reply.get('recoveryToken') + recovery_token = reply.get("recoveryToken") if recovery_token: self._transaction.recovery_token = recovery_token @@ -894,8 +918,7 @@ def in_transaction(self) -> bool: @property def _starting_transaction(self): - """True if this session is starting a multi-statement transaction. - """ + """True if this session is starting a multi-statement transaction.""" return self._transaction.starting() @property @@ -931,58 +954,56 @@ def _apply_to(self, command, is_retryable, read_preference, sock_info): self._update_read_concern(command, sock_info) self._server_session.last_use = time.monotonic() - command['lsid'] = self._server_session.session_id + command["lsid"] = self._server_session.session_id if is_retryable: - command['txnNumber'] = self._server_session.transaction_id + command["txnNumber"] = self._server_session.transaction_id return if self.in_transaction: if read_preference != ReadPreference.PRIMARY: raise InvalidOperation( - 'read preference in a transaction must be primary, not: ' - '%r' % (read_preference,)) + "read preference in a transaction must be primary, not: " + "%r" % (read_preference,) + ) if self._transaction.state == _TxnState.STARTING: # First command begins a new transaction. self._transaction.state = _TxnState.IN_PROGRESS - command['startTransaction'] = True + command["startTransaction"] = True if self._transaction.opts.read_concern: rc = self._transaction.opts.read_concern.document if rc: - command['readConcern'] = rc + command["readConcern"] = rc self._update_read_concern(command, sock_info) - command['txnNumber'] = self._server_session.transaction_id - command['autocommit'] = False + command["txnNumber"] = self._server_session.transaction_id + command["autocommit"] = False def _start_retryable_write(self): self._check_ended() self._server_session.inc_transaction_id() def _update_read_concern(self, cmd, sock_info): - if (self.options.causal_consistency - and self.operation_time is not None): - cmd.setdefault('readConcern', {})[ - 'afterClusterTime'] = self.operation_time + if self.options.causal_consistency and self.operation_time is not None: + cmd.setdefault("readConcern", {})["afterClusterTime"] = self.operation_time if self.options.snapshot: if sock_info.max_wire_version < 13: - raise ConfigurationError( - 'Snapshot reads require MongoDB 5.0 or later') - rc = cmd.setdefault('readConcern', {}) - rc['level'] = 'snapshot' + raise ConfigurationError("Snapshot reads require MongoDB 5.0 or later") + rc = cmd.setdefault("readConcern", {}) + rc["level"] = "snapshot" if self._snapshot_time is not None: - rc['atClusterTime'] = self._snapshot_time + rc["atClusterTime"] = self._snapshot_time def __copy__(self): - raise TypeError('A ClientSession cannot be copied, create a new session instead') + raise TypeError("A ClientSession cannot be copied, create a new session instead") class _ServerSession(object): def __init__(self, generation): # Ensure id is type 4, regardless of CodecOptions.uuid_representation. - self.session_id = {'id': Binary(uuid.uuid4().bytes, 4)} + self.session_id = {"id": Binary(uuid.uuid4().bytes, 4)} self.last_use = time.monotonic() self._transaction_id = 0 self.dirty = False @@ -1016,6 +1037,7 @@ class _ServerSessionPool(collections.deque): This class is not thread-safe, access it while holding the Topology lock. """ + def __init__(self, *args, **kwargs): super(_ServerSessionPool, self).__init__(*args, **kwargs) self.generation = 0 @@ -1056,8 +1078,7 @@ def return_server_session(self, server_session, session_timeout_minutes): def return_server_session_no_lock(self, server_session): # Discard sessions from an old pool to avoid duplicate sessions in the # child process after a fork. - if (server_session.generation == self.generation and - not server_session.dirty): + if server_session.generation == self.generation and not server_session.dirty: self.appendleft(server_session) def _clear_stale(self, session_timeout_minutes): diff --git a/pymongo/collation.py b/pymongo/collation.py index e398264ac2..aef480b932 100644 --- a/pymongo/collation.py +++ b/pymongo/collation.py @@ -49,10 +49,10 @@ class CollationAlternate(object): :class:`~pymongo.collation.Collation`. """ - NON_IGNORABLE = 'non-ignorable' + NON_IGNORABLE = "non-ignorable" """Spaces and punctuation are treated as base characters.""" - SHIFTED = 'shifted' + SHIFTED = "shifted" """Spaces and punctuation are *not* considered base characters. Spaces and punctuation are distinguished regardless when the @@ -68,10 +68,10 @@ class CollationMaxVariable(object): :class:`~pymongo.collation.Collation`. """ - PUNCT = 'punct' + PUNCT = "punct" """Both punctuation and spaces are ignored.""" - SPACE = 'space' + SPACE = "space" """Spaces alone are ignored.""" @@ -81,13 +81,13 @@ class CollationCaseFirst(object): :class:`~pymongo.collation.Collation`. """ - UPPER = 'upper' + UPPER = "upper" """Sort uppercase characters first.""" - LOWER = 'lower' + LOWER = "lower" """Sort lowercase characters first.""" - OFF = 'off' + OFF = "off" """Default for locale or collation strength.""" @@ -152,42 +152,41 @@ class Collation(object): __slots__ = ("__document",) - def __init__(self, locale: str, - caseLevel: Optional[bool] = None, - caseFirst: Optional[str] = None, - strength: Optional[int] = None, - numericOrdering: Optional[bool] = None, - alternate: Optional[str] = None, - maxVariable: Optional[str] = None, - normalization: Optional[bool] = None, - backwards: Optional[bool] = None, - **kwargs: Any) -> None: - locale = common.validate_string('locale', locale) - self.__document: Dict[str, Any] = {'locale': locale} + def __init__( + self, + locale: str, + caseLevel: Optional[bool] = None, + caseFirst: Optional[str] = None, + strength: Optional[int] = None, + numericOrdering: Optional[bool] = None, + alternate: Optional[str] = None, + maxVariable: Optional[str] = None, + normalization: Optional[bool] = None, + backwards: Optional[bool] = None, + **kwargs: Any + ) -> None: + locale = common.validate_string("locale", locale) + self.__document: Dict[str, Any] = {"locale": locale} if caseLevel is not None: - self.__document['caseLevel'] = common.validate_boolean( - 'caseLevel', caseLevel) + self.__document["caseLevel"] = common.validate_boolean("caseLevel", caseLevel) if caseFirst is not None: - self.__document['caseFirst'] = common.validate_string( - 'caseFirst', caseFirst) + self.__document["caseFirst"] = common.validate_string("caseFirst", caseFirst) if strength is not None: - self.__document['strength'] = common.validate_integer( - 'strength', strength) + self.__document["strength"] = common.validate_integer("strength", strength) if numericOrdering is not None: - self.__document['numericOrdering'] = common.validate_boolean( - 'numericOrdering', numericOrdering) + self.__document["numericOrdering"] = common.validate_boolean( + "numericOrdering", numericOrdering + ) if alternate is not None: - self.__document['alternate'] = common.validate_string( - 'alternate', alternate) + self.__document["alternate"] = common.validate_string("alternate", alternate) if maxVariable is not None: - self.__document['maxVariable'] = common.validate_string( - 'maxVariable', maxVariable) + self.__document["maxVariable"] = common.validate_string("maxVariable", maxVariable) if normalization is not None: - self.__document['normalization'] = common.validate_boolean( - 'normalization', normalization) + self.__document["normalization"] = common.validate_boolean( + "normalization", normalization + ) if backwards is not None: - self.__document['backwards'] = common.validate_boolean( - 'backwards', backwards) + self.__document["backwards"] = common.validate_boolean("backwards", backwards) self.__document.update(kwargs) @property @@ -202,8 +201,7 @@ def document(self) -> Dict[str, Any]: def __repr__(self): document = self.document - return 'Collation(%s)' % ( - ', '.join('%s=%r' % (key, document[key]) for key in document),) + return "Collation(%s)" % (", ".join("%s=%r" % (key, document[key]) for key in document),) def __eq__(self, other: Any) -> bool: if isinstance(other, Collation): @@ -214,13 +212,13 @@ def __ne__(self, other: Any) -> bool: return not self == other -def validate_collation_or_none(value: Optional[Union[Mapping[str, Any], Collation]]) -> Optional[Dict[str, Any]]: +def validate_collation_or_none( + value: Optional[Union[Mapping[str, Any], Collation]] +) -> Optional[Dict[str, Any]]: if value is None: return None if isinstance(value, Collation): return value.document if isinstance(value, dict): return value - raise TypeError( - 'collation must be a dict, an instance of collation.Collation, ' - 'or None.') + raise TypeError("collation must be a dict, an instance of collation.Collation, " "or None.") diff --git a/pymongo/collection.py b/pymongo/collection.py index aa2d148fbe..b17bb61f34 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -15,8 +15,19 @@ """Collection level utilities for Mongo.""" from collections import abc -from typing import (TYPE_CHECKING, Any, Generic, Iterable, List, Mapping, - MutableMapping, Optional, Sequence, Tuple, Union) +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Iterable, + List, + Mapping, + MutableMapping, + Optional, + Sequence, + Tuple, + Union, +) from bson.code import Code from bson.codec_options import CodecOptions @@ -25,26 +36,44 @@ from bson.son import SON from bson.timestamp import Timestamp from pymongo import common, helpers, message -from pymongo.aggregation import (_CollectionAggregationCommand, - _CollectionRawAggregationCommand) +from pymongo.aggregation import ( + _CollectionAggregationCommand, + _CollectionRawAggregationCommand, +) from pymongo.bulk import _Bulk from pymongo.change_stream import CollectionChangeStream from pymongo.collation import validate_collation_or_none from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor from pymongo.cursor import Cursor, RawBatchCursor -from pymongo.errors import (ConfigurationError, InvalidName, InvalidOperation, - OperationFailure) +from pymongo.errors import ( + ConfigurationError, + InvalidName, + InvalidOperation, + OperationFailure, +) from pymongo.helpers import _check_write_command_response from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS -from pymongo.operations import (DeleteMany, DeleteOne, IndexModel, InsertOne, - ReplaceOne, UpdateMany, UpdateOne) +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, +) from pymongo.read_preferences import ReadPreference, _ServerMode -from pymongo.results import (BulkWriteResult, DeleteResult, InsertManyResult, - InsertOneResult, UpdateResult) +from pymongo.results import ( + BulkWriteResult, + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) from pymongo.typings import _CollationIn, _DocumentIn, _DocumentType, _Pipeline from pymongo.write_concern import WriteConcern -_FIND_AND_MODIFY_DOC_FIELDS = {'value': 1} +_FIND_AND_MODIFY_DOC_FIELDS = {"value": 1} _WriteOp = Union[InsertOne, DeleteOne, DeleteMany, ReplaceOne, UpdateOne, UpdateMany] @@ -58,6 +87,7 @@ class ReturnDocument(object): :meth:`~pymongo.collection.Collection.find_one_and_replace` and :meth:`~pymongo.collection.Collection.find_one_and_update`. """ + BEFORE = False """Return the original document before it was updated/replaced, or ``None`` if no document matches the query. @@ -73,8 +103,7 @@ class ReturnDocument(object): class Collection(common.BaseObject, Generic[_DocumentType]): - """A Mongo collection. - """ + """A Mongo collection.""" def __init__( self, @@ -166,24 +195,21 @@ def __init__( codec_options or database.codec_options, read_preference or database.read_preference, write_concern or database.write_concern, - read_concern or database.read_concern) + read_concern or database.read_concern, + ) if not isinstance(name, str): raise TypeError("name must be an instance of str") if not name or ".." in name: raise InvalidName("collection names cannot be empty") - if "$" in name and not (name.startswith("oplog.$main") or - name.startswith("$cmd")): - raise InvalidName("collection names must not " - "contain '$': %r" % name) + if "$" in name and not (name.startswith("oplog.$main") or name.startswith("$cmd")): + raise InvalidName("collection names must not " "contain '$': %r" % name) if name[0] == "." or name[-1] == ".": - raise InvalidName("collection names must not start " - "or end with '.': %r" % name) + raise InvalidName("collection names must not start " "or end with '.': %r" % name) if "\x00" in name: - raise InvalidName("collection names must not contain the " - "null character") - collation = validate_collation_or_none(kwargs.pop('collation', None)) + raise InvalidName("collection names must not contain the " "null character") + collation = validate_collation_or_none(kwargs.pop("collation", None)) self.__database: Database[_DocumentType] = database self.__name = name @@ -192,25 +218,30 @@ def __init__( self.__create(kwargs, collation, session) self.__write_response_codec_options = self.codec_options._replace( - unicode_decode_error_handler='replace', - document_class=dict) + unicode_decode_error_handler="replace", document_class=dict + ) def _socket_for_reads(self, session): - return self.__database.client._socket_for_reads( - self._read_preference_for(session), session) + return self.__database.client._socket_for_reads(self._read_preference_for(session), session) def _socket_for_writes(self, session): return self.__database.client._socket_for_writes(session) - def _command(self, sock_info, command, - read_preference=None, - codec_options=None, check=True, allowable_errors=None, - read_concern=None, - write_concern=None, - collation=None, - session=None, - retryable_write=False, - user_fields=None): + def _command( + self, + sock_info, + command, + read_preference=None, + codec_options=None, + check=True, + allowable_errors=None, + read_concern=None, + write_concern=None, + collation=None, + session=None, + retryable_write=False, + user_fields=None, + ): """Internal command helper. :Parameters: @@ -252,11 +283,11 @@ def _command(self, sock_info, command, session=s, client=self.__database.client, retryable_write=retryable_write, - user_fields=user_fields) + user_fields=user_fields, + ) def __create(self, options, collation, session): - """Sends a create command with the given options. - """ + """Sends a create command with the given options.""" cmd = SON([("create", self.__name)]) if options: if "size" in options: @@ -264,9 +295,13 @@ def __create(self, options, collation, session): cmd.update(options) with self._socket_for_writes(session) as sock_info: self._command( - sock_info, cmd, read_preference=ReadPreference.PRIMARY, + sock_info, + cmd, + read_preference=ReadPreference.PRIMARY, write_concern=self._write_concern_for(session), - collation=collation, session=session) + collation=collation, + session=session, + ) def __getattr__(self, name: str) -> "Collection[_DocumentType]": """Get a sub-collection of this collection by name. @@ -276,30 +311,31 @@ def __getattr__(self, name: str) -> "Collection[_DocumentType]": :Parameters: - `name`: the name of the collection to get """ - if name.startswith('_'): + if name.startswith("_"): full_name = "%s.%s" % (self.__name, name) raise AttributeError( "Collection has no attribute %r. To access the %s" - " collection, use database['%s']." % ( - name, full_name, full_name)) + " collection, use database['%s']." % (name, full_name, full_name) + ) return self.__getitem__(name) def __getitem__(self, name: str) -> "Collection[_DocumentType]": - return Collection(self.__database, - "%s.%s" % (self.__name, name), - False, - self.codec_options, - self.read_preference, - self.write_concern, - self.read_concern) + return Collection( + self.__database, + "%s.%s" % (self.__name, name), + False, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) def __repr__(self): return "Collection(%r, %r)" % (self.__database, self.__name) def __eq__(self, other: Any) -> bool: if isinstance(other, Collection): - return (self.__database == other.database and - self.__name == other.name) + return self.__database == other.database and self.__name == other.name return NotImplemented def __ne__(self, other: Any) -> bool: @@ -309,9 +345,11 @@ def __hash__(self) -> int: return hash((self.__database, self.__name)) def __bool__(self) -> bool: - raise NotImplementedError("Collection objects do not implement truth " - "value testing or bool(). Please compare " - "with None instead: collection is not None") + raise NotImplementedError( + "Collection objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: collection is not None" + ) @property def full_name(self) -> str: @@ -369,20 +407,22 @@ def with_options( default) the :attr:`read_concern` of this :class:`Collection` is used. """ - return Collection(self.__database, - self.__name, - False, - codec_options or self.codec_options, - read_preference or self.read_preference, - write_concern or self.write_concern, - read_concern or self.read_concern) + return Collection( + self.__database, + self.__name, + False, + codec_options or self.codec_options, + read_preference or self.read_preference, + write_concern or self.write_concern, + read_concern or self.read_concern, + ) def bulk_write( self, requests: Sequence[_WriteOp], ordered: bool = True, bypass_document_validation: bool = False, - session: Optional["ClientSession"] = None + session: Optional["ClientSession"] = None, ) -> BulkWriteResult: """Send a batch of write operations to the server. @@ -464,20 +504,17 @@ def bulk_write( return BulkWriteResult(bulk_api_result, True) return BulkWriteResult({}, False) - def _insert_one( - self, doc, ordered, write_concern, op_id, bypass_doc_val, session): + def _insert_one(self, doc, ordered, write_concern, op_id, bypass_doc_val, session): """Internal helper for inserting a single document.""" write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged - command = SON([('insert', self.name), - ('ordered', ordered), - ('documents', [doc])]) + command = SON([("insert", self.name), ("ordered", ordered), ("documents", [doc])]) if not write_concern.is_server_default: - command['writeConcern'] = write_concern.document + command["writeConcern"] = write_concern.document def _insert_command(session, sock_info, retryable_write): if bypass_doc_val: - command['bypassDocumentValidation'] = True + command["bypassDocumentValidation"] = True result = sock_info.command( self.__database.name, @@ -486,19 +523,21 @@ def _insert_command(session, sock_info, retryable_write): codec_options=self.__write_response_codec_options, session=session, client=self.__database.client, - retryable_write=retryable_write) + retryable_write=retryable_write, + ) _check_write_command_response(result) - self.__database.client._retryable_write( - acknowledged, _insert_command, session) + self.__database.client._retryable_write(acknowledged, _insert_command, session) if not isinstance(doc, RawBSONDocument): - return doc.get('_id') + return doc.get("_id") - def insert_one(self, document: _DocumentIn, + def insert_one( + self, + document: _DocumentIn, bypass_document_validation: bool = False, - session: Optional["ClientSession"] = None + session: Optional["ClientSession"] = None, ) -> InsertOneResult: """Insert a single document. @@ -543,16 +582,22 @@ def insert_one(self, document: _DocumentIn, write_concern = self._write_concern_for(session) return InsertOneResult( self._insert_one( - document, ordered=True, - write_concern=write_concern, op_id=None, - bypass_doc_val=bypass_document_validation, session=session), - write_concern.acknowledged) + document, + ordered=True, + write_concern=write_concern, + op_id=None, + bypass_doc_val=bypass_document_validation, + session=session, + ), + write_concern.acknowledged, + ) - def insert_many(self, + def insert_many( + self, documents: Iterable[_DocumentIn], ordered: bool = True, bypass_document_validation: bool = False, - session: Optional["ClientSession"] = None + session: Optional["ClientSession"] = None, ) -> InsertManyResult: """Insert an iterable of documents. @@ -593,11 +638,14 @@ def insert_many(self, .. versionadded:: 3.0 """ - if (not isinstance(documents, abc.Iterable) - or isinstance(documents, abc.Mapping) - or not documents): + if ( + not isinstance(documents, abc.Iterable) + or isinstance(documents, abc.Mapping) + or not documents + ): raise TypeError("documents must be a non-empty list") inserted_ids: List[ObjectId] = [] + def gen(): """A generator that validates documents and handles _ids.""" for document in documents: @@ -614,51 +662,59 @@ def gen(): blk.execute(write_concern, session=session) return InsertManyResult(inserted_ids, write_concern.acknowledged) - def _update(self, sock_info, criteria, document, upsert=False, - multi=False, write_concern=None, op_id=None, ordered=True, - bypass_doc_val=False, collation=None, array_filters=None, - hint=None, session=None, retryable_write=False, let=None): + def _update( + self, + sock_info, + criteria, + document, + upsert=False, + multi=False, + write_concern=None, + op_id=None, + ordered=True, + bypass_doc_val=False, + collation=None, + array_filters=None, + hint=None, + session=None, + retryable_write=False, + let=None, + ): """Internal update / replace helper.""" common.validate_boolean("upsert", upsert) collation = validate_collation_or_none(collation) write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged - update_doc = SON([('q', criteria), - ('u', document), - ('multi', multi), - ('upsert', upsert)]) + update_doc = SON([("q", criteria), ("u", document), ("multi", multi), ("upsert", upsert)]) if collation is not None: if not acknowledged: - raise ConfigurationError( - 'Collation is unsupported for unacknowledged writes.') + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") else: - update_doc['collation'] = collation + update_doc["collation"] = collation if array_filters is not None: if not acknowledged: - raise ConfigurationError( - 'arrayFilters is unsupported for unacknowledged writes.') + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") else: - update_doc['arrayFilters'] = array_filters + update_doc["arrayFilters"] = array_filters if hint is not None: if not acknowledged and sock_info.max_wire_version < 8: raise ConfigurationError( - 'Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands.') + "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." + ) if not isinstance(hint, str): hint = helpers._index_document(hint) - update_doc['hint'] = hint + update_doc["hint"] = hint - command = SON([('update', self.name), - ('ordered', ordered), - ('updates', [update_doc])]) + command = SON([("update", self.name), ("ordered", ordered), ("updates", [update_doc])]) if let: common.validate_is_mapping("let", let) command["let"] = let if not write_concern.is_server_default: - command['writeConcern'] = write_concern.document + command["writeConcern"] = write_concern.document # Update command. if bypass_doc_val: - command['bypassDocumentValidation'] = True + command["bypassDocumentValidation"] = True # The command result has to be published for APM unmodified # so we make a shallow copy here before adding updatedExisting. @@ -669,41 +725,66 @@ def _update(self, sock_info, criteria, document, upsert=False, codec_options=self.__write_response_codec_options, session=session, client=self.__database.client, - retryable_write=retryable_write).copy() + retryable_write=retryable_write, + ).copy() _check_write_command_response(result) # Add the updatedExisting field for compatibility. - if result.get('n') and 'upserted' not in result: - result['updatedExisting'] = True + if result.get("n") and "upserted" not in result: + result["updatedExisting"] = True else: - result['updatedExisting'] = False + result["updatedExisting"] = False # MongoDB >= 2.6.0 returns the upsert _id in an array # element. Break it out for backward compatibility. - if 'upserted' in result: - result['upserted'] = result['upserted'][0]['_id'] + if "upserted" in result: + result["upserted"] = result["upserted"][0]["_id"] if not acknowledged: return None return result def _update_retryable( - self, criteria, document, upsert=False, multi=False, - write_concern=None, op_id=None, ordered=True, - bypass_doc_val=False, collation=None, array_filters=None, - hint=None, session=None, let=None): + self, + criteria, + document, + upsert=False, + multi=False, + write_concern=None, + op_id=None, + ordered=True, + bypass_doc_val=False, + collation=None, + array_filters=None, + hint=None, + session=None, + let=None, + ): """Internal update / replace helper.""" + def _update(session, sock_info, retryable_write): return self._update( - sock_info, criteria, document, upsert=upsert, multi=multi, - write_concern=write_concern, op_id=op_id, ordered=ordered, - bypass_doc_val=bypass_doc_val, collation=collation, - array_filters=array_filters, hint=hint, session=session, - retryable_write=retryable_write, let=let) + sock_info, + criteria, + document, + upsert=upsert, + multi=multi, + write_concern=write_concern, + op_id=op_id, + ordered=ordered, + bypass_doc_val=bypass_doc_val, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + retryable_write=retryable_write, + let=let, + ) return self.__database.client._retryable_write( - (write_concern or self.write_concern).acknowledged and not multi, - _update, session) + (write_concern or self.write_concern).acknowledged and not multi, _update, session + ) - def replace_one(self, + def replace_one( + self, filter: Mapping[str, Any], replacement: Mapping[str, Any], upsert: bool = False, @@ -711,7 +792,7 @@ def replace_one(self, collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None + let: Optional[Mapping[str, Any]] = None, ) -> UpdateResult: """Replace a single document matching the filter. @@ -788,13 +869,21 @@ def replace_one(self, write_concern = self._write_concern_for(session) return UpdateResult( self._update_retryable( - filter, replacement, upsert, + filter, + replacement, + upsert, write_concern=write_concern, bypass_doc_val=bypass_document_validation, - collation=collation, hint=hint, session=session, let=let), - write_concern.acknowledged) + collation=collation, + hint=hint, + session=session, + let=let, + ), + write_concern.acknowledged, + ) - def update_one(self, + def update_one( + self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], upsert: bool = False, @@ -803,7 +892,7 @@ def update_one(self, array_filters: Optional[Sequence[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None + let: Optional[Mapping[str, Any]] = None, ) -> UpdateResult: """Update a single document matching the filter. @@ -870,19 +959,27 @@ def update_one(self, """ common.validate_is_mapping("filter", filter) common.validate_ok_for_update(update) - common.validate_list_or_none('array_filters', array_filters) + common.validate_list_or_none("array_filters", array_filters) write_concern = self._write_concern_for(session) return UpdateResult( self._update_retryable( - filter, update, upsert, + filter, + update, + upsert, write_concern=write_concern, bypass_doc_val=bypass_document_validation, - collation=collation, array_filters=array_filters, - hint=hint, session=session, let=let), - write_concern.acknowledged) + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + let=let, + ), + write_concern.acknowledged, + ) - def update_many(self, + def update_many( + self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], upsert: bool = False, @@ -891,7 +988,7 @@ def update_many(self, collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None + let: Optional[Mapping[str, Any]] = None, ) -> UpdateResult: """Update one or more documents that match the filter. @@ -958,17 +1055,25 @@ def update_many(self, """ common.validate_is_mapping("filter", filter) common.validate_ok_for_update(update) - common.validate_list_or_none('array_filters', array_filters) + common.validate_list_or_none("array_filters", array_filters) write_concern = self._write_concern_for(session) return UpdateResult( self._update_retryable( - filter, update, upsert, multi=True, + filter, + update, + upsert, + multi=True, write_concern=write_concern, bypass_doc_val=bypass_document_validation, - collation=collation, array_filters=array_filters, - hint=hint, session=session, let=let), - write_concern.acknowledged) + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + let=let, + ), + write_concern.acknowledged, + ) def drop(self, session: Optional["ClientSession"] = None) -> None: """Alias for :meth:`~pymongo.database.Database.drop_collection`. @@ -993,39 +1098,46 @@ def drop(self, session: Optional["ClientSession"] = None) -> None: self.codec_options, self.read_preference, self.write_concern, - self.read_concern) + self.read_concern, + ) dbo.drop_collection(self.__name, session=session) def _delete( - self, sock_info, criteria, multi, - write_concern=None, op_id=None, ordered=True, - collation=None, hint=None, session=None, retryable_write=False, - let=None): + self, + sock_info, + criteria, + multi, + write_concern=None, + op_id=None, + ordered=True, + collation=None, + hint=None, + session=None, + retryable_write=False, + let=None, + ): """Internal delete helper.""" common.validate_is_mapping("filter", criteria) write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged - delete_doc = SON([('q', criteria), - ('limit', int(not multi))]) + delete_doc = SON([("q", criteria), ("limit", int(not multi))]) collation = validate_collation_or_none(collation) if collation is not None: if not acknowledged: - raise ConfigurationError( - 'Collation is unsupported for unacknowledged writes.') + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") else: - delete_doc['collation'] = collation + delete_doc["collation"] = collation if hint is not None: if not acknowledged and sock_info.max_wire_version < 9: raise ConfigurationError( - 'Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands.') + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." + ) if not isinstance(hint, str): hint = helpers._index_document(hint) - delete_doc['hint'] = hint - command = SON([('delete', self.name), - ('ordered', ordered), - ('deletes', [delete_doc])]) + delete_doc["hint"] = hint + command = SON([("delete", self.name), ("ordered", ordered), ("deletes", [delete_doc])]) if not write_concern.is_server_default: - command['writeConcern'] = write_concern.document + command["writeConcern"] = write_concern.document if let: common.validate_is_document_type("let", let) @@ -1039,32 +1151,51 @@ def _delete( codec_options=self.__write_response_codec_options, session=session, client=self.__database.client, - retryable_write=retryable_write) + retryable_write=retryable_write, + ) _check_write_command_response(result) return result def _delete_retryable( - self, criteria, multi, - write_concern=None, op_id=None, ordered=True, - collation=None, hint=None, session=None, let=None): + self, + criteria, + multi, + write_concern=None, + op_id=None, + ordered=True, + collation=None, + hint=None, + session=None, + let=None, + ): """Internal delete helper.""" + def _delete(session, sock_info, retryable_write): return self._delete( - sock_info, criteria, multi, - write_concern=write_concern, op_id=op_id, ordered=ordered, - collation=collation, hint=hint, session=session, - retryable_write=retryable_write, let=let) + sock_info, + criteria, + multi, + write_concern=write_concern, + op_id=op_id, + ordered=ordered, + collation=collation, + hint=hint, + session=session, + retryable_write=retryable_write, + let=let, + ) return self.__database.client._retryable_write( - (write_concern or self.write_concern).acknowledged and not multi, - _delete, session) + (write_concern or self.write_concern).acknowledged and not multi, _delete, session + ) - def delete_one(self, + def delete_one( + self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None + let: Optional[Mapping[str, Any]] = None, ) -> DeleteResult: """Delete a single document matching the filter. @@ -1109,17 +1240,24 @@ def delete_one(self, write_concern = self._write_concern_for(session) return DeleteResult( self._delete_retryable( - filter, False, + filter, + False, write_concern=write_concern, - collation=collation, hint=hint, session=session, let=let), - write_concern.acknowledged) + collation=collation, + hint=hint, + session=session, + let=let, + ), + write_concern.acknowledged, + ) - def delete_many(self, + def delete_many( + self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None + let: Optional[Mapping[str, Any]] = None, ) -> DeleteResult: """Delete one or more documents matching the filter. @@ -1164,12 +1302,20 @@ def delete_many(self, write_concern = self._write_concern_for(session) return DeleteResult( self._delete_retryable( - filter, True, + filter, + True, write_concern=write_concern, - collation=collation, hint=hint, session=session, let=let), - write_concern.acknowledged) - - def find_one(self, filter: Optional[Any] = None, *args: Any, **kwargs: Any) -> Optional[_DocumentType]: + collation=collation, + hint=hint, + session=session, + let=let, + ), + write_concern.acknowledged, + ) + + def find_one( + self, filter: Optional[Any] = None, *args: Any, **kwargs: Any + ) -> Optional[_DocumentType]: """Get a single document from the database. All arguments to :meth:`find` are also valid arguments for @@ -1194,8 +1340,7 @@ def find_one(self, filter: Optional[Any] = None, *args: Any, **kwargs: Any) -> O >>> collection.find_one(max_time_ms=100) """ - if (filter is not None and not - isinstance(filter, abc.Mapping)): + if filter is not None and not isinstance(filter, abc.Mapping): filter = {"_id": filter} cursor = self.find(filter, *args, **kwargs) @@ -1420,8 +1565,7 @@ def find_raw_batches(self, *args: Any, **kwargs: Any) -> RawBatchCursor[_Documen """ # OP_MSG is required to support encryption. if self.__database.client._encrypter: - raise InvalidOperation( - "find_raw_batches does not support auto encryption") + raise InvalidOperation("find_raw_batches does not support auto encryption") return RawBatchCursor(self, *args, **kwargs) @@ -1437,13 +1581,13 @@ def _count_cmd(self, session, sock_info, read_preference, cmd, collation): codec_options=self.__write_response_codec_options, read_concern=self.read_concern, collation=collation, - session=session) + session=session, + ) if res.get("errmsg", "") == "ns missing": return 0 return int(res["n"]) - def _aggregate_one_result( - self, sock_info, read_preference, cmd, collation, session): + def _aggregate_one_result(self, sock_info, read_preference, cmd, collation, session): """Internal helper to run an aggregate that returns a single result.""" result = self._command( sock_info, @@ -1453,11 +1597,12 @@ def _aggregate_one_result( codec_options=self.__write_response_codec_options, read_concern=self.read_concern, collation=collation, - session=session) + session=session, + ) # cursor will not be present for NamespaceNotFound errors. - if 'cursor' not in result: + if "cursor" not in result: return None - batch = result['cursor']['firstBatch'] + batch = result["cursor"]["firstBatch"] return batch[0] if batch else None def estimated_document_count(self, **kwargs: Any) -> int: @@ -1478,38 +1623,35 @@ def estimated_document_count(self, **kwargs: Any) -> int: .. versionadded:: 3.7 """ - if 'session' in kwargs: - raise ConfigurationError( - 'estimated_document_count does not support sessions') + if "session" in kwargs: + raise ConfigurationError("estimated_document_count does not support sessions") def _cmd(session, server, sock_info, read_preference): if sock_info.max_wire_version >= 12: # MongoDB 4.9+ pipeline = [ - {'$collStats': {'count': {}}}, - {'$group': {'_id': 1, 'n': {'$sum': '$count'}}}, + {"$collStats": {"count": {}}}, + {"$group": {"_id": 1, "n": {"$sum": "$count"}}}, ] - cmd = SON([('aggregate', self.__name), - ('pipeline', pipeline), - ('cursor', {})]) + cmd = SON([("aggregate", self.__name), ("pipeline", pipeline), ("cursor", {})]) cmd.update(kwargs) result = self._aggregate_one_result( - sock_info, read_preference, cmd, collation=None, - session=session) + sock_info, read_preference, cmd, collation=None, session=session + ) if not result: return 0 - return int(result['n']) + return int(result["n"]) else: # MongoDB < 4.9 - cmd = SON([('count', self.__name)]) + cmd = SON([("count", self.__name)]) cmd.update(kwargs) - return self._count_cmd( - None, sock_info, read_preference, cmd, collation=None) + return self._count_cmd(None, sock_info, read_preference, cmd, collation=None) - return self.__database.client._retryable_read( - _cmd, self.read_preference, None) + return self.__database.client._retryable_read(_cmd, self.read_preference, None) - def count_documents(self, filter: Mapping[str, Any], session: Optional["ClientSession"] = None, **kwargs: Any) -> int: + def count_documents( + self, filter: Mapping[str, Any], session: Optional["ClientSession"] = None, **kwargs: Any + ) -> int: """Count the number of documents in this collection. .. note:: For a fast count of the total documents in a collection see @@ -1563,31 +1705,34 @@ def count_documents(self, filter: Mapping[str, Any], session: Optional["ClientSe .. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center .. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere """ - pipeline = [{'$match': filter}] - if 'skip' in kwargs: - pipeline.append({'$skip': kwargs.pop('skip')}) - if 'limit' in kwargs: - pipeline.append({'$limit': kwargs.pop('limit')}) - pipeline.append({'$group': {'_id': 1, 'n': {'$sum': 1}}}) - cmd = SON([('aggregate', self.__name), - ('pipeline', pipeline), - ('cursor', {})]) + pipeline = [{"$match": filter}] + if "skip" in kwargs: + pipeline.append({"$skip": kwargs.pop("skip")}) + if "limit" in kwargs: + pipeline.append({"$limit": kwargs.pop("limit")}) + pipeline.append({"$group": {"_id": 1, "n": {"$sum": 1}}}) + cmd = SON([("aggregate", self.__name), ("pipeline", pipeline), ("cursor", {})]) if "hint" in kwargs and not isinstance(kwargs["hint"], str): kwargs["hint"] = helpers._index_document(kwargs["hint"]) - collation = validate_collation_or_none(kwargs.pop('collation', None)) + collation = validate_collation_or_none(kwargs.pop("collation", None)) cmd.update(kwargs) def _cmd(session, server, sock_info, read_preference): - result = self._aggregate_one_result( - sock_info, read_preference, cmd, collation, session) + result = self._aggregate_one_result(sock_info, read_preference, cmd, collation, session) if not result: return 0 - return result['n'] + return result["n"] return self.__database.client._retryable_read( - _cmd, self._read_preference_for(session), session) + _cmd, self._read_preference_for(session), session + ) - def create_indexes(self, indexes: Sequence[IndexModel], session: Optional["ClientSession"] = None, **kwargs: Any) -> List[str]: + def create_indexes( + self, + indexes: Sequence[IndexModel], + session: Optional["ClientSession"] = None, + **kwargs: Any, + ) -> List[str]: """Create one or more indexes on this collection. >>> from pymongo import IndexModel, ASCENDING, DESCENDING @@ -1619,7 +1764,7 @@ def create_indexes(self, indexes: Sequence[IndexModel], session: Optional["Clien .. _createIndexes: https://docs.mongodb.com/manual/reference/command/createIndexes/ """ - common.validate_list('indexes', indexes) + common.validate_list("indexes", indexes) return self.__create_indexes(indexes, session, **kwargs) def __create_indexes(self, indexes, session, **kwargs): @@ -1641,28 +1786,33 @@ def gen_indexes(): for index in indexes: if not isinstance(index, IndexModel): raise TypeError( - "%r is not an instance of " - "pymongo.operations.IndexModel" % (index,)) + "%r is not an instance of " "pymongo.operations.IndexModel" % (index,) + ) document = index.document names.append(document["name"]) yield document - cmd = SON([('createIndexes', self.name), - ('indexes', list(gen_indexes()))]) + cmd = SON([("createIndexes", self.name), ("indexes", list(gen_indexes()))]) cmd.update(kwargs) - if 'commitQuorum' in kwargs and not supports_quorum: + if "commitQuorum" in kwargs and not supports_quorum: raise ConfigurationError( "Must be connected to MongoDB 4.4+ to use the " - "commitQuorum option for createIndexes") + "commitQuorum option for createIndexes" + ) self._command( - sock_info, cmd, read_preference=ReadPreference.PRIMARY, + sock_info, + cmd, + read_preference=ReadPreference.PRIMARY, codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, write_concern=self._write_concern_for(session), - session=session) + session=session, + ) return names - def create_index(self, keys: _IndexKeyHint, session: Optional["ClientSession"] = None, **kwargs: Any) -> str: + def create_index( + self, keys: _IndexKeyHint, session: Optional["ClientSession"] = None, **kwargs: Any + ) -> str: """Creates an index on this collection. Takes either a single key or a list of (key, direction) pairs. @@ -1791,7 +1941,9 @@ def drop_indexes(self, session: Optional["ClientSession"] = None, **kwargs: Any) """ self.drop_index("*", session=session, **kwargs) - def drop_index(self, index_or_name: _IndexKeyHint, session: Optional["ClientSession"] = None, **kwargs: Any) -> None: + def drop_index( + self, index_or_name: _IndexKeyHint, session: Optional["ClientSession"] = None, **kwargs: Any + ) -> None: """Drops the specified index on this collection. Can be used on non-existant collections or collections with no @@ -1837,14 +1989,18 @@ def drop_index(self, index_or_name: _IndexKeyHint, session: Optional["ClientSess cmd = SON([("dropIndexes", self.__name), ("index", name)]) cmd.update(kwargs) with self._socket_for_writes(session) as sock_info: - self._command(sock_info, - cmd, - read_preference=ReadPreference.PRIMARY, - allowable_errors=["ns not found", 26], - write_concern=self._write_concern_for(session), - session=session) - - def list_indexes(self, session: Optional["ClientSession"] = None) -> CommandCursor[MutableMapping[str, Any]]: + self._command( + sock_info, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + write_concern=self._write_concern_for(session), + session=session, + ) + + def list_indexes( + self, session: Optional["ClientSession"] = None + ) -> CommandCursor[MutableMapping[str, Any]]: """Get a cursor over the index documents for this collection. >>> for index in db.test.list_indexes(): @@ -1865,35 +2021,35 @@ def list_indexes(self, session: Optional["ClientSession"] = None) -> CommandCurs .. versionadded:: 3.0 """ codec_options = CodecOptions(SON) - coll = self.with_options(codec_options=codec_options, - read_preference=ReadPreference.PRIMARY) - read_pref = ((session and session._txn_read_preference()) - or ReadPreference.PRIMARY) + coll = self.with_options( + codec_options=codec_options, read_preference=ReadPreference.PRIMARY + ) + read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY def _cmd(session, server, sock_info, read_preference): cmd = SON([("listIndexes", self.__name), ("cursor", {})]) with self.__database.client._tmp_session(session, False) as s: try: - cursor = self._command(sock_info, cmd, - read_preference, - codec_options, - session=s)["cursor"] + cursor = self._command( + sock_info, cmd, read_preference, codec_options, session=s + )["cursor"] except OperationFailure as exc: # Ignore NamespaceNotFound errors to match the behavior # of reading from *.system.indexes. if exc.code != 26: raise - cursor = {'id': 0, 'firstBatch': []} + cursor = {"id": 0, "firstBatch": []} cmd_cursor = CommandCursor( - coll, cursor, sock_info.address, session=s, - explicit_session=session is not None) + coll, cursor, sock_info.address, session=s, explicit_session=session is not None + ) cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor - return self.__database.client._retryable_read( - _cmd, read_pref, session) + return self.__database.client._retryable_read(_cmd, read_pref, session) - def index_information(self, session: Optional["ClientSession"] = None) -> MutableMapping[str, Any]: + def index_information( + self, session: Optional["ClientSession"] = None + ) -> MutableMapping[str, Any]: """Get information on this collection's indexes. Returns a dictionary where the keys are index names (as @@ -1947,9 +2103,9 @@ def options(self, session: Optional["ClientSession"] = None) -> MutableMapping[s self.codec_options, self.read_preference, self.write_concern, - self.read_concern) - cursor = dbo.list_collections( - session=session, filter={"name": self.__name}) + self.read_concern, + ) + cursor = dbo.list_collections(session=session, filter={"name": self.__name}) result = None for doc in cursor: @@ -1966,17 +2122,40 @@ def options(self, session: Optional["ClientSession"] = None) -> MutableMapping[s return options - def _aggregate(self, aggregation_command, pipeline, cursor_class, session, - explicit_session, let=None, **kwargs): + def _aggregate( + self, + aggregation_command, + pipeline, + cursor_class, + session, + explicit_session, + let=None, + **kwargs, + ): cmd = aggregation_command( - self, cursor_class, pipeline, kwargs, explicit_session, let, - user_fields={'cursor': {'firstBatch': 1}}) + self, + cursor_class, + pipeline, + kwargs, + explicit_session, + let, + user_fields={"cursor": {"firstBatch": 1}}, + ) return self.__database.client._retryable_read( - cmd.get_cursor, cmd.get_read_preference(session), session, - retryable=not cmd._performs_write) + cmd.get_cursor, + cmd.get_read_preference(session), + session, + retryable=not cmd._performs_write, + ) - def aggregate(self, pipeline: _Pipeline, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, **kwargs: Any) -> CommandCursor[_DocumentType]: + def aggregate( + self, + pipeline: _Pipeline, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> CommandCursor[_DocumentType]: """Perform an aggregation using the aggregation framework on this collection. @@ -2050,13 +2229,15 @@ def aggregate(self, pipeline: _Pipeline, session: Optional["ClientSession"] = No https://docs.mongodb.com/manual/reference/command/aggregate """ with self.__database.client._tmp_session(session, close=False) as s: - return self._aggregate(_CollectionAggregationCommand, - pipeline, - CommandCursor, - session=s, - explicit_session=session is not None, - let=let, - **kwargs) + return self._aggregate( + _CollectionAggregationCommand, + pipeline, + CommandCursor, + session=s, + explicit_session=session is not None, + let=let, + **kwargs, + ) def aggregate_raw_batches( self, pipeline: _Pipeline, session: Optional["ClientSession"] = None, **kwargs: Any @@ -2086,18 +2267,20 @@ def aggregate_raw_batches( """ # OP_MSG is required to support encryption. if self.__database.client._encrypter: - raise InvalidOperation( - "aggregate_raw_batches does not support auto encryption") + raise InvalidOperation("aggregate_raw_batches does not support auto encryption") with self.__database.client._tmp_session(session, close=False) as s: - return self._aggregate(_CollectionRawAggregationCommand, - pipeline, - RawBatchCommandCursor, - session=s, - explicit_session=session is not None, - **kwargs) - - def watch(self, + return self._aggregate( + _CollectionRawAggregationCommand, + pipeline, + RawBatchCommandCursor, + session=s, + explicit_session=session is not None, + **kwargs, + ) + + def watch( + self, pipeline: Optional[_Pipeline] = None, full_document: Optional[str] = None, resume_after: Optional[Mapping[str, Any]] = None, @@ -2203,11 +2386,21 @@ def watch(self, https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst """ return CollectionChangeStream( - self, pipeline, full_document, resume_after, max_await_time_ms, - batch_size, collation, start_at_operation_time, session, - start_after) - - def rename(self, new_name: str, session: Optional["ClientSession"] = None, **kwargs: Any) -> MutableMapping[str, Any]: + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + ) + + def rename( + self, new_name: str, session: Optional["ClientSession"] = None, **kwargs: Any + ) -> MutableMapping[str, Any]: """Rename this collection. If operating in auth mode, client must be authorized as an @@ -2253,13 +2446,20 @@ def rename(self, new_name: str, session: Optional["ClientSession"] = None, **kwa with self._socket_for_writes(session) as sock_info: with self.__database.client._tmp_session(session) as s: return sock_info.command( - 'admin', cmd, + "admin", + cmd, write_concern=write_concern, parse_write_concern_error=True, - session=s, client=self.__database.client) + session=s, + client=self.__database.client, + ) def distinct( - self, key: str, filter: Optional[Mapping[str, Any]] = None, session: Optional["ClientSession"] = None, **kwargs: Any + self, + key: str, + filter: Optional[Mapping[str, Any]] = None, + session: Optional["ClientSession"] = None, + **kwargs: Any, ) -> List: """Get a list of distinct values for `key` among all documents in this collection. @@ -2296,52 +2496,64 @@ def distinct( """ if not isinstance(key, str): raise TypeError("key must be an instance of str") - cmd = SON([("distinct", self.__name), - ("key", key)]) + cmd = SON([("distinct", self.__name), ("key", key)]) if filter is not None: if "query" in kwargs: raise ConfigurationError("can't pass both filter and query") kwargs["query"] = filter - collation = validate_collation_or_none(kwargs.pop('collation', None)) + collation = validate_collation_or_none(kwargs.pop("collation", None)) cmd.update(kwargs) + def _cmd(session, server, sock_info, read_preference): return self._command( - sock_info, cmd, read_preference=read_preference, + sock_info, + cmd, + read_preference=read_preference, read_concern=self.read_concern, - collation=collation, session=session, - user_fields={"values": 1})["values"] + collation=collation, + session=session, + user_fields={"values": 1}, + )["values"] return self.__database.client._retryable_read( - _cmd, self._read_preference_for(session), session) + _cmd, self._read_preference_for(session), session + ) def _write_concern_for_cmd(self, cmd, session): - raw_wc = cmd.get('writeConcern') + raw_wc = cmd.get("writeConcern") if raw_wc is not None: return WriteConcern(**raw_wc) else: return self._write_concern_for(session) - def __find_and_modify(self, filter, projection, sort, upsert=None, - return_document=ReturnDocument.BEFORE, - array_filters=None, hint=None, session=None, - let=None, **kwargs): + def __find_and_modify( + self, + filter, + projection, + sort, + upsert=None, + return_document=ReturnDocument.BEFORE, + array_filters=None, + hint=None, + session=None, + let=None, + **kwargs, + ): """Internal findAndModify helper.""" common.validate_is_mapping("filter", filter) if not isinstance(return_document, bool): - raise ValueError("return_document must be " - "ReturnDocument.BEFORE or ReturnDocument.AFTER") - collation = validate_collation_or_none(kwargs.pop('collation', None)) - cmd = SON([("findAndModify", self.__name), - ("query", filter), - ("new", return_document)]) + raise ValueError( + "return_document must be " "ReturnDocument.BEFORE or ReturnDocument.AFTER" + ) + collation = validate_collation_or_none(kwargs.pop("collation", None)) + cmd = SON([("findAndModify", self.__name), ("query", filter), ("new", return_document)]) if let: common.validate_is_mapping("let", let) cmd["let"] = let cmd.update(kwargs) if projection is not None: - cmd["fields"] = helpers._fields_list_to_dict(projection, - "projection") + cmd["fields"] = helpers._fields_list_to_dict(projection, "projection") if sort is not None: cmd["sort"] = helpers._index_document(sort) if upsert is not None: @@ -2358,33 +2570,41 @@ def _find_and_modify(session, sock_info, retryable_write): if array_filters is not None: if not acknowledged: raise ConfigurationError( - 'arrayFilters is unsupported for unacknowledged ' - 'writes.') + "arrayFilters is unsupported for unacknowledged " "writes." + ) cmd["arrayFilters"] = list(array_filters) if hint is not None: if sock_info.max_wire_version < 8: raise ConfigurationError( - 'Must be connected to MongoDB 4.2+ to use hint on find and modify commands.') - elif (not acknowledged and sock_info.max_wire_version < 9): + "Must be connected to MongoDB 4.2+ to use hint on find and modify commands." + ) + elif not acknowledged and sock_info.max_wire_version < 9: raise ConfigurationError( - 'Must be connected to MongoDB 4.4+ to use hint on unacknowledged find and modify commands.') - cmd['hint'] = hint + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged find and modify commands." + ) + cmd["hint"] = hint if not write_concern.is_server_default: - cmd['writeConcern'] = write_concern.document - out = self._command(sock_info, cmd, - read_preference=ReadPreference.PRIMARY, - write_concern=write_concern, - collation=collation, session=session, - retryable_write=retryable_write, - user_fields=_FIND_AND_MODIFY_DOC_FIELDS) + cmd["writeConcern"] = write_concern.document + out = self._command( + sock_info, + cmd, + read_preference=ReadPreference.PRIMARY, + write_concern=write_concern, + collation=collation, + session=session, + retryable_write=retryable_write, + user_fields=_FIND_AND_MODIFY_DOC_FIELDS, + ) _check_write_command_response(out) return out.get("value") return self.__database.client._retryable_write( - write_concern.acknowledged, _find_and_modify, session) + write_concern.acknowledged, _find_and_modify, session + ) - def find_one_and_delete(self, + def find_one_and_delete( + self, filter: Mapping[str, Any], projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, sort: Optional[_IndexList] = None, @@ -2463,11 +2683,13 @@ def find_one_and_delete(self, Added the `collation` option. .. versionadded:: 3.0 """ - kwargs['remove'] = True - return self.__find_and_modify(filter, projection, sort, let=let, - hint=hint, session=session, **kwargs) + kwargs["remove"] = True + return self.__find_and_modify( + filter, projection, sort, let=let, hint=hint, session=session, **kwargs + ) - def find_one_and_replace(self, + def find_one_and_replace( + self, filter: Mapping[str, Any], replacement: Mapping[str, Any], projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, @@ -2556,12 +2778,21 @@ def find_one_and_replace(self, .. versionadded:: 3.0 """ common.validate_ok_for_replace(replacement) - kwargs['update'] = replacement - return self.__find_and_modify(filter, projection, - sort, upsert, return_document, let=let, - hint=hint, session=session, **kwargs) + kwargs["update"] = replacement + return self.__find_and_modify( + filter, + projection, + sort, + upsert, + return_document, + let=let, + hint=hint, + session=session, + **kwargs, + ) - def find_one_and_update(self, + def find_one_and_update( + self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, @@ -2692,12 +2923,20 @@ def find_one_and_update(self, .. versionadded:: 3.0 """ common.validate_ok_for_update(update) - common.validate_list_or_none('array_filters', array_filters) - kwargs['update'] = update - return self.__find_and_modify(filter, projection, - sort, upsert, return_document, - array_filters, hint=hint, let=let, - session=session, **kwargs) + common.validate_list_or_none("array_filters", array_filters) + kwargs["update"] = update + return self.__find_and_modify( + filter, + projection, + sort, + upsert, + return_document, + array_filters, + hint=hint, + let=let, + session=session, + **kwargs, + ) def __iter__(self) -> "Collection[_DocumentType]": return self @@ -2708,15 +2947,16 @@ def __next__(self) -> None: next = __next__ def __call__(self, *args: Any, **kwargs: Any) -> None: - """This is only here so that some API misusages are easier to debug. - """ + """This is only here so that some API misusages are easier to debug.""" if "." not in self.__name: - raise TypeError("'Collection' object is not callable. If you " - "meant to call the '%s' method on a 'Database' " - "object it is failing because no such method " - "exists." % - self.__name) - raise TypeError("'Collection' object is not callable. If you meant to " - "call the '%s' method on a 'Collection' object it is " - "failing because no such method exists." % - self.__name.split(".")[-1]) + raise TypeError( + "'Collection' object is not callable. If you " + "meant to call the '%s' method on a 'Database' " + "object it is failing because no such method " + "exists." % self.__name + ) + raise TypeError( + "'Collection' object is not callable. If you meant to " + "call the '%s' method on a 'Collection' object it is " + "failing because no such method exists." % self.__name.split(".")[-1] + ) diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index b7dbf7a8e7..d7a37766b2 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -15,13 +15,11 @@ """CommandCursor class to iterate over command results.""" from collections import deque -from typing import (TYPE_CHECKING, Any, Generic, Iterator, Mapping, Optional, - Tuple) +from typing import TYPE_CHECKING, Any, Generic, Iterator, Mapping, Optional, Tuple from bson import _convert_raw_document_lists_to_streams from pymongo.cursor import _CURSOR_CLOSED_ERRORS, _SocketManager -from pymongo.errors import (ConnectionFailure, InvalidOperation, - OperationFailure) +from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure from pymongo.message import _CursorAddress, _GetMore, _RawBatchGetMore from pymongo.response import PinnedResponse from pymongo.typings import _DocumentType @@ -33,9 +31,11 @@ class CommandCursor(Generic[_DocumentType]): """A cursor / iterator over command cursors.""" + _getmore_class = _GetMore - def __init__(self, + def __init__( + self, collection: "Collection[_DocumentType]", cursor_info: Mapping[str, Any], address: Optional[Tuple[str, Optional[int]]], @@ -47,15 +47,15 @@ def __init__(self, """Create a new command cursor.""" self.__sock_mgr: Any = None self.__collection: Collection[_DocumentType] = collection - self.__id = cursor_info['id'] - self.__data = deque(cursor_info['firstBatch']) - self.__postbatchresumetoken = cursor_info.get('postBatchResumeToken') + self.__id = cursor_info["id"] + self.__data = deque(cursor_info["firstBatch"]) + self.__postbatchresumetoken = cursor_info.get("postBatchResumeToken") self.__address = address self.__batch_size = batch_size self.__max_await_time_ms = max_await_time_ms self.__session = session self.__explicit_session = explicit_session - self.__killed = (self.__id == 0) + self.__killed = self.__id == 0 if self.__killed: self.__end_session(True) @@ -66,22 +66,19 @@ def __init__(self, self.batch_size(batch_size) - if (not isinstance(max_await_time_ms, int) - and max_await_time_ms is not None): + if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: raise TypeError("max_await_time_ms must be an integer or None") def __del__(self) -> None: self.__die() def __die(self, synchronous=False): - """Closes this cursor. - """ + """Closes this cursor.""" already_killed = self.__killed self.__killed = True if self.__id and not already_killed: cursor_id = self.__id - address = _CursorAddress( - self.__address, self.__ns) + address = _CursorAddress(self.__address, self.__ns) else: # Skip killCursors. cursor_id = 0 @@ -92,7 +89,8 @@ def __die(self, synchronous=False): address, self.__sock_mgr, self.__session, - self.__explicit_session) + self.__explicit_session, + ) if not self.__explicit_session: self.__session = None self.__sock_mgr = None @@ -103,8 +101,7 @@ def __end_session(self, synchronous): self.__session = None def close(self) -> None: - """Explicitly close / kill this cursor. - """ + """Explicitly close / kill this cursor.""" self.__die(True) def batch_size(self, batch_size: int) -> "CommandCursor[_DocumentType]": @@ -157,12 +154,12 @@ def _maybe_pin_connection(self, sock_info): self.__sock_mgr = sock_mgr def __send_message(self, operation): - """Send a getmore message and handle the response. - """ + """Send a getmore message and handle the response.""" client = self.__collection.database.client try: response = client._run_operation( - operation, self._unpack_response, address=self.__address) + operation, self._unpack_response, address=self.__address + ) except OperationFailure as exc: if exc.code in _CURSOR_CLOSED_ERRORS: # Don't send killCursors because the cursor is already closed. @@ -182,13 +179,12 @@ def __send_message(self, operation): if isinstance(response, PinnedResponse): if not self.__sock_mgr: - self.__sock_mgr = _SocketManager(response.socket_info, - response.more_to_come) + self.__sock_mgr = _SocketManager(response.socket_info, response.more_to_come) if response.from_command: - cursor = response.docs[0]['cursor'] - documents = cursor['nextBatch'] - self.__postbatchresumetoken = cursor.get('postBatchResumeToken') - self.__id = cursor['id'] + cursor = response.docs[0]["cursor"] + documents = cursor["nextBatch"] + self.__postbatchresumetoken = cursor.get("postBatchResumeToken") + self.__id = cursor["id"] else: documents = response.docs self.__id = response.data.cursor_id @@ -197,10 +193,10 @@ def __send_message(self, operation): self.close() self.__data = deque(documents) - def _unpack_response(self, response, cursor_id, codec_options, - user_fields=None, legacy_response=False): - return response.unpack_response(cursor_id, codec_options, user_fields, - legacy_response) + def _unpack_response( + self, response, cursor_id, codec_options, user_fields=None, legacy_response=False + ): + return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) def _refresh(self): """Refreshes the cursor with more data from the server. @@ -213,19 +209,23 @@ def _refresh(self): return len(self.__data) if self.__id: # Get More - dbname, collname = self.__ns.split('.', 1) + dbname, collname = self.__ns.split(".", 1) read_pref = self.__collection._read_preference_for(self.session) self.__send_message( - self._getmore_class(dbname, - collname, - self.__batch_size, - self.__id, - self.__collection.codec_options, - read_pref, - self.__session, - self.__collection.database.client, - self.__max_await_time_ms, - self.__sock_mgr, False)) + self._getmore_class( + dbname, + collname, + self.__batch_size, + self.__id, + self.__collection.codec_options, + read_pref, + self.__session, + self.__collection.database.client, + self.__max_await_time_ms, + self.__sock_mgr, + False, + ) + ) else: # Cursor id is zero nothing else to return self.__die(True) @@ -305,7 +305,8 @@ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: class RawBatchCommandCursor(CommandCursor, Generic[_DocumentType]): _getmore_class = _RawBatchGetMore - def __init__(self, + def __init__( + self, collection: "Collection[_DocumentType]", cursor_info: Mapping[str, Any], address: Optional[Tuple[str, Optional[int]]], @@ -322,15 +323,21 @@ def __init__(self, .. seealso:: The MongoDB documentation on `cursors `_. """ - assert not cursor_info.get('firstBatch') + assert not cursor_info.get("firstBatch") super(RawBatchCommandCursor, self).__init__( - collection, cursor_info, address, batch_size, - max_await_time_ms, session, explicit_session) - - def _unpack_response(self, response, cursor_id, codec_options, - user_fields=None, legacy_response=False): - raw_response = response.raw_response( - cursor_id, user_fields=user_fields) + collection, + cursor_info, + address, + batch_size, + max_await_time_ms, + session, + explicit_session, + ) + + def _unpack_response( + self, response, cursor_id, codec_options, user_fields=None, legacy_response=False + ): + raw_response = response.raw_response(cursor_id, user_fields=user_fields) if not legacy_response: # OP_MSG returns firstBatch/nextBatch documents as a BSON array # Re-assemble the array of documents into a document stream diff --git a/pymongo/common.py b/pymongo/common.py index fa2fe9bf11..769b277cf3 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -18,8 +18,20 @@ import datetime import warnings from collections import OrderedDict, abc -from typing import (Any, Callable, Dict, List, Mapping, MutableMapping, - Optional, Sequence, Tuple, Type, Union, cast) +from typing import ( + Any, + Callable, + Dict, + List, + Mapping, + MutableMapping, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) from urllib.parse import unquote_plus from bson import SON @@ -27,8 +39,10 @@ from bson.codec_options import CodecOptions, TypeRegistry from bson.raw_bson import RawBSONDocument from pymongo.auth import MECHANISMS -from pymongo.compression_support import (validate_compressors, - validate_zlib_compression_level) +from pymongo.compression_support import ( + validate_compressors, + validate_zlib_compression_level, +) from pymongo.driver_info import DriverInfo from pymongo.errors import ConfigurationError from pymongo.monitoring import _validate_event_listeners @@ -40,7 +54,7 @@ ORDERED_TYPES: Sequence[Type] = (SON, OrderedDict) # Defaults until we connect to a server and get updated limits. -MAX_BSON_SIZE = 16 * (1024 ** 2) +MAX_BSON_SIZE = 16 * (1024**2) MAX_MESSAGE_SIZE: int = 2 * MAX_BSON_SIZE MIN_WIRE_VERSION = 0 MAX_WIRE_VERSION = 0 @@ -121,10 +135,10 @@ def partition_node(node: str) -> Tuple[str, int]: """Split a host:port string into (host, int(port)) pair.""" host = node port = 27017 - idx = node.rfind(':') + idx = node.rfind(":") if idx != -1: - host, port = node[:idx], int(node[idx + 1:]) - if host.startswith('['): + host, port = node[:idx], int(node[idx + 1 :]) + if host.startswith("["): host = host[1:-1] return host, port @@ -147,11 +161,11 @@ def raise_config_error(key: str, dummy: Any) -> None: # Mapping of URI uuid representation options to valid subtypes. _UUID_REPRESENTATIONS = { - 'unspecified': UuidRepresentation.UNSPECIFIED, - 'standard': UuidRepresentation.STANDARD, - 'pythonLegacy': UuidRepresentation.PYTHON_LEGACY, - 'javaLegacy': UuidRepresentation.JAVA_LEGACY, - 'csharpLegacy': UuidRepresentation.CSHARP_LEGACY + "unspecified": UuidRepresentation.UNSPECIFIED, + "standard": UuidRepresentation.STANDARD, + "pythonLegacy": UuidRepresentation.PYTHON_LEGACY, + "javaLegacy": UuidRepresentation.JAVA_LEGACY, + "csharpLegacy": UuidRepresentation.CSHARP_LEGACY, } @@ -165,95 +179,81 @@ def validate_boolean(option: str, value: Any) -> bool: def validate_boolean_or_string(option: str, value: Any) -> bool: """Validates that value is True, False, 'true', or 'false'.""" if isinstance(value, str): - if value not in ('true', 'false'): - raise ValueError("The value of %s must be " - "'true' or 'false'" % (option,)) - return value == 'true' + if value not in ("true", "false"): + raise ValueError("The value of %s must be " "'true' or 'false'" % (option,)) + return value == "true" return validate_boolean(option, value) def validate_integer(option: str, value: Any) -> int: - """Validates that 'value' is an integer (or basestring representation). - """ + """Validates that 'value' is an integer (or basestring representation).""" if isinstance(value, int): return value elif isinstance(value, str): try: return int(value) except ValueError: - raise ValueError("The value of %s must be " - "an integer" % (option,)) + raise ValueError("The value of %s must be " "an integer" % (option,)) raise TypeError("Wrong type for %s, value must be an integer" % (option,)) def validate_positive_integer(option: str, value: Any) -> int: - """Validate that 'value' is a positive integer, which does not include 0. - """ + """Validate that 'value' is a positive integer, which does not include 0.""" val = validate_integer(option, value) if val <= 0: - raise ValueError("The value of %s must be " - "a positive integer" % (option,)) + raise ValueError("The value of %s must be " "a positive integer" % (option,)) return val def validate_non_negative_integer(option: str, value: Any) -> int: - """Validate that 'value' is a positive integer or 0. - """ + """Validate that 'value' is a positive integer or 0.""" val = validate_integer(option, value) if val < 0: - raise ValueError("The value of %s must be " - "a non negative integer" % (option,)) + raise ValueError("The value of %s must be " "a non negative integer" % (option,)) return val def validate_readable(option: str, value: Any) -> Optional[str]: - """Validates that 'value' is file-like and readable. - """ + """Validates that 'value' is file-like and readable.""" if value is None: return value # First make sure its a string py3.3 open(True, 'r') succeeds # Used in ssl cert checking due to poor ssl module error reporting value = validate_string(option, value) - open(value, 'r').close() + open(value, "r").close() return value def validate_positive_integer_or_none(option: str, value: Any) -> Optional[int]: - """Validate that 'value' is a positive integer or None. - """ + """Validate that 'value' is a positive integer or None.""" if value is None: return value return validate_positive_integer(option, value) def validate_non_negative_integer_or_none(option: str, value: Any) -> Optional[int]: - """Validate that 'value' is a positive integer or 0 or None. - """ + """Validate that 'value' is a positive integer or 0 or None.""" if value is None: return value return validate_non_negative_integer(option, value) def validate_string(option: str, value: Any) -> str: - """Validates that 'value' is an instance of `str`. - """ + """Validates that 'value' is an instance of `str`.""" if isinstance(value, str): return value - raise TypeError("Wrong type for %s, value must be an instance of " - "str" % (option,)) + raise TypeError("Wrong type for %s, value must be an instance of " "str" % (option,)) def validate_string_or_none(option: str, value: Any) -> Optional[str]: - """Validates that 'value' is an instance of `basestring` or `None`. - """ + """Validates that 'value' is an instance of `basestring` or `None`.""" if value is None: return value return validate_string(option, value) def validate_int_or_basestring(option: str, value: Any) -> Union[int, str]: - """Validates that 'value' is an integer or string. - """ + """Validates that 'value' is an integer or string.""" if isinstance(value, int): return value elif isinstance(value, str): @@ -261,13 +261,11 @@ def validate_int_or_basestring(option: str, value: Any) -> Union[int, str]: return int(value) except ValueError: return value - raise TypeError("Wrong type for %s, value must be an " - "integer or a string" % (option,)) + raise TypeError("Wrong type for %s, value must be an " "integer or a string" % (option,)) def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[int, str]: - """Validates that 'value' is an integer or string. - """ + """Validates that 'value' is an integer or string.""" if isinstance(value, int): return value elif isinstance(value, str): @@ -276,13 +274,14 @@ def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[in except ValueError: return value return validate_non_negative_integer(option, val) - raise TypeError("Wrong type for %s, value must be an " - "non negative integer or a string" % (option,)) + raise TypeError( + "Wrong type for %s, value must be an " "non negative integer or a string" % (option,) + ) def validate_positive_float(option: str, value: Any) -> float: """Validates that 'value' is a float, or can be converted to one, and is - positive. + positive. """ errmsg = "%s must be an integer or float" % (option,) try: @@ -295,8 +294,7 @@ def validate_positive_float(option: str, value: Any) -> float: # float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at # one billion - this is a reasonable approximation for infinity if not 0 < value < 1e9: - raise ValueError("%s must be greater than 0 and " - "less than one billion" % (option,)) + raise ValueError("%s must be greater than 0 and " "less than one billion" % (option,)) return value @@ -325,7 +323,7 @@ def validate_timeout_or_zero(option: str, value: Any) -> float: config error. """ if value is None: - raise ConfigurationError("%s cannot be None" % (option, )) + raise ConfigurationError("%s cannot be None" % (option,)) if value == 0 or value == "0": return 0 return validate_positive_float(option, value) / 1000.0 @@ -350,8 +348,7 @@ def validate_max_staleness(option: str, value: Any) -> int: def validate_read_preference(dummy: Any, value: Any) -> _ServerMode: - """Validate a read preference. - """ + """Validate a read preference.""" if not isinstance(value, _ServerMode): raise TypeError("%r is not a read preference." % (value,)) return value @@ -370,33 +367,32 @@ def validate_read_preference_mode(dummy: Any, value: Any) -> _ServerMode: def validate_auth_mechanism(option: str, value: Any) -> str: - """Validate the authMechanism URI option. - """ + """Validate the authMechanism URI option.""" if value not in MECHANISMS: raise ValueError("%s must be in %s" % (option, tuple(MECHANISMS))) return value def validate_uuid_representation(dummy: Any, value: Any) -> int: - """Validate the uuid representation option selected in the URI. - """ + """Validate the uuid representation option selected in the URI.""" try: return _UUID_REPRESENTATIONS[value] except KeyError: - raise ValueError("%s is an invalid UUID representation. " - "Must be one of " - "%s" % (value, tuple(_UUID_REPRESENTATIONS))) + raise ValueError( + "%s is an invalid UUID representation. " + "Must be one of " + "%s" % (value, tuple(_UUID_REPRESENTATIONS)) + ) def validate_read_preference_tags(name: str, value: Any) -> List[Dict[str, str]]: - """Parse readPreferenceTags if passed as a client kwarg. - """ + """Parse readPreferenceTags if passed as a client kwarg.""" if not isinstance(value, list): value = [value] tag_sets: List = [] for tag_set in value: - if tag_set == '': + if tag_set == "": tag_sets.append({}) continue try: @@ -406,37 +402,41 @@ def validate_read_preference_tags(name: str, value: Any) -> List[Dict[str, str]] tags[unquote_plus(key)] = unquote_plus(val) tag_sets.append(tags) except Exception: - raise ValueError("%r not a valid " - "value for %s" % (tag_set, name)) + raise ValueError("%r not a valid " "value for %s" % (tag_set, name)) return tag_sets -_MECHANISM_PROPS = frozenset(['SERVICE_NAME', - 'CANONICALIZE_HOST_NAME', - 'SERVICE_REALM', - 'AWS_SESSION_TOKEN']) +_MECHANISM_PROPS = frozenset( + ["SERVICE_NAME", "CANONICALIZE_HOST_NAME", "SERVICE_REALM", "AWS_SESSION_TOKEN"] +) def validate_auth_mechanism_properties(option: str, value: Any) -> Dict[str, Union[bool, str]]: """Validate authMechanismProperties.""" value = validate_string(option, value) props: Dict[str, Any] = {} - for opt in value.split(','): + for opt in value.split(","): try: - key, val = opt.split(':') + key, val = opt.split(":") except ValueError: # Try not to leak the token. - if 'AWS_SESSION_TOKEN' in opt: - opt = ('AWS_SESSION_TOKEN:, did you forget ' - 'to percent-escape the token with quote_plus?') - raise ValueError("auth mechanism properties must be " - "key:value pairs like SERVICE_NAME:" - "mongodb, not %s." % (opt,)) + if "AWS_SESSION_TOKEN" in opt: + opt = ( + "AWS_SESSION_TOKEN:, did you forget " + "to percent-escape the token with quote_plus?" + ) + raise ValueError( + "auth mechanism properties must be " + "key:value pairs like SERVICE_NAME:" + "mongodb, not %s." % (opt,) + ) if key not in _MECHANISM_PROPS: - raise ValueError("%s is not a supported auth " - "mechanism property. Must be one of " - "%s." % (key, tuple(_MECHANISM_PROPS))) - if key == 'CANONICALIZE_HOST_NAME': + raise ValueError( + "%s is not a supported auth " + "mechanism property. Must be one of " + "%s." % (key, tuple(_MECHANISM_PROPS)) + ) + if key == "CANONICALIZE_HOST_NAME": props[key] = validate_boolean_or_string(key, val) else: props[key] = unquote_plus(val) @@ -444,20 +444,23 @@ def validate_auth_mechanism_properties(option: str, value: Any) -> Dict[str, Uni return props -def validate_document_class(option: str, value: Any) -> Union[Type[MutableMapping], Type[RawBSONDocument]]: +def validate_document_class( + option: str, value: Any +) -> Union[Type[MutableMapping], Type[RawBSONDocument]]: """Validate the document_class option.""" if not issubclass(value, (abc.MutableMapping, RawBSONDocument)): - raise TypeError("%s must be dict, bson.son.SON, " - "bson.raw_bson.RawBSONDocument, or a " - "sublass of collections.MutableMapping" % (option,)) + raise TypeError( + "%s must be dict, bson.son.SON, " + "bson.raw_bson.RawBSONDocument, or a " + "sublass of collections.MutableMapping" % (option,) + ) return value def validate_type_registry(option: Any, value: Any) -> Optional[TypeRegistry]: """Validate the type_registry option.""" if value is not None and not isinstance(value, TypeRegistry): - raise TypeError("%s must be an instance of %s" % ( - option, TypeRegistry)) + raise TypeError("%s must be an instance of %s" % (option, TypeRegistry)) return value @@ -478,26 +481,32 @@ def validate_list_or_none(option: Any, value: Any) -> Optional[List]: def validate_list_or_mapping(option: Any, value: Any) -> None: """Validates that 'value' is a list or a document.""" if not isinstance(value, (abc.Mapping, list)): - raise TypeError("%s must either be a list or an instance of dict, " - "bson.son.SON, or any other type that inherits from " - "collections.Mapping" % (option,)) + raise TypeError( + "%s must either be a list or an instance of dict, " + "bson.son.SON, or any other type that inherits from " + "collections.Mapping" % (option,) + ) def validate_is_mapping(option: str, value: Any) -> None: """Validate the type of method arguments that expect a document.""" if not isinstance(value, abc.Mapping): - raise TypeError("%s must be an instance of dict, bson.son.SON, or " - "any other type that inherits from " - "collections.Mapping" % (option,)) + raise TypeError( + "%s must be an instance of dict, bson.son.SON, or " + "any other type that inherits from " + "collections.Mapping" % (option,) + ) def validate_is_document_type(option: str, value: Any) -> None: """Validate the type of method arguments that expect a MongoDB document.""" if not isinstance(value, (abc.MutableMapping, RawBSONDocument)): - raise TypeError("%s must be an instance of dict, bson.son.SON, " - "bson.raw_bson.RawBSONDocument, or " - "a type that inherits from " - "collections.MutableMapping" % (option,)) + raise TypeError( + "%s must be an instance of dict, bson.son.SON, " + "bson.raw_bson.RawBSONDocument, or " + "a type that inherits from " + "collections.MutableMapping" % (option,) + ) def validate_appname_or_none(option: str, value: Any) -> Optional[str]: @@ -506,7 +515,7 @@ def validate_appname_or_none(option: str, value: Any) -> Optional[str]: return value validate_string(option, value) # We need length in bytes, so encode utf8 first. - if len(value.encode('utf-8')) > 128: + if len(value.encode("utf-8")) > 128: raise ValueError("%s must be <= 128 bytes" % (option,)) return value @@ -544,8 +553,8 @@ def validate_ok_for_replace(replacement: Mapping[str, Any]) -> None: # Replacement can be {} if replacement and not isinstance(replacement, RawBSONDocument): first = next(iter(replacement)) - if first.startswith('$'): - raise ValueError('replacement can not include $ operators') + if first.startswith("$"): + raise ValueError("replacement can not include $ operators") def validate_ok_for_update(update: Any) -> None: @@ -553,30 +562,30 @@ def validate_ok_for_update(update: Any) -> None: validate_list_or_mapping("update", update) # Update cannot be {}. if not update: - raise ValueError('update cannot be empty') + raise ValueError("update cannot be empty") is_document = not isinstance(update, list) first = next(iter(update)) - if is_document and not first.startswith('$'): - raise ValueError('update only works with $ operators') + if is_document and not first.startswith("$"): + raise ValueError("update only works with $ operators") -_UNICODE_DECODE_ERROR_HANDLERS = frozenset(['strict', 'replace', 'ignore']) +_UNICODE_DECODE_ERROR_HANDLERS = frozenset(["strict", "replace", "ignore"]) def validate_unicode_decode_error_handler(dummy: Any, value: str) -> str: - """Validate the Unicode decode error handler option of CodecOptions. - """ + """Validate the Unicode decode error handler option of CodecOptions.""" if value not in _UNICODE_DECODE_ERROR_HANDLERS: - raise ValueError("%s is an invalid Unicode decode error handler. " - "Must be one of " - "%s" % (value, tuple(_UNICODE_DECODE_ERROR_HANDLERS))) + raise ValueError( + "%s is an invalid Unicode decode error handler. " + "Must be one of " + "%s" % (value, tuple(_UNICODE_DECODE_ERROR_HANDLERS)) + ) return value def validate_tzinfo(dummy: Any, value: Any) -> Optional[datetime.tzinfo]: - """Validate the tzinfo option - """ + """Validate the tzinfo option""" if value is not None and not isinstance(value, datetime.tzinfo): raise TypeError("%s must be an instance of datetime.tzinfo" % value) return value @@ -587,9 +596,9 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A if value is None: return value from pymongo.encryption_options import AutoEncryptionOpts + if not isinstance(value, AutoEncryptionOpts): - raise TypeError("%s must be an instance of AutoEncryptionOpts" % ( - option,)) + raise TypeError("%s must be an instance of AutoEncryptionOpts" % (option,)) return value @@ -597,7 +606,7 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A # Dictionary where keys are the names of public URI options, and values # are lists of aliases for that option. URI_OPTIONS_ALIAS_MAP: Dict[str, List[str]] = { - 'tls': ['ssl'], + "tls": ["ssl"], } # Dictionary where keys are the names of URI options, and values @@ -605,73 +614,73 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A # alias uses a different validator than its public counterpart, it should be # included here as a key, value pair. URI_OPTIONS_VALIDATOR_MAP: Dict[str, Callable[[Any, Any], Any]] = { - 'appname': validate_appname_or_none, - 'authmechanism': validate_auth_mechanism, - 'authmechanismproperties': validate_auth_mechanism_properties, - 'authsource': validate_string, - 'compressors': validate_compressors, - 'connecttimeoutms': validate_timeout_or_none_or_zero, - 'directconnection': validate_boolean_or_string, - 'heartbeatfrequencyms': validate_timeout_or_none, - 'journal': validate_boolean_or_string, - 'localthresholdms': validate_positive_float_or_zero, - 'maxidletimems': validate_timeout_or_none, - 'maxconnecting': validate_positive_integer, - 'maxpoolsize': validate_non_negative_integer_or_none, - 'maxstalenessseconds': validate_max_staleness, - 'readconcernlevel': validate_string_or_none, - 'readpreference': validate_read_preference_mode, - 'readpreferencetags': validate_read_preference_tags, - 'replicaset': validate_string_or_none, - 'retryreads': validate_boolean_or_string, - 'retrywrites': validate_boolean_or_string, - 'loadbalanced': validate_boolean_or_string, - 'serverselectiontimeoutms': validate_timeout_or_zero, - 'sockettimeoutms': validate_timeout_or_none_or_zero, - 'tls': validate_boolean_or_string, - 'tlsallowinvalidcertificates': validate_boolean_or_string, - 'tlsallowinvalidhostnames': validate_boolean_or_string, - 'tlscafile': validate_readable, - 'tlscertificatekeyfile': validate_readable, - 'tlscertificatekeyfilepassword': validate_string_or_none, - 'tlsdisableocspendpointcheck': validate_boolean_or_string, - 'tlsinsecure': validate_boolean_or_string, - 'w': validate_non_negative_int_or_basestring, - 'wtimeoutms': validate_non_negative_integer, - 'zlibcompressionlevel': validate_zlib_compression_level, - 'srvservicename': validate_string, - 'srvmaxhosts': validate_non_negative_integer + "appname": validate_appname_or_none, + "authmechanism": validate_auth_mechanism, + "authmechanismproperties": validate_auth_mechanism_properties, + "authsource": validate_string, + "compressors": validate_compressors, + "connecttimeoutms": validate_timeout_or_none_or_zero, + "directconnection": validate_boolean_or_string, + "heartbeatfrequencyms": validate_timeout_or_none, + "journal": validate_boolean_or_string, + "localthresholdms": validate_positive_float_or_zero, + "maxidletimems": validate_timeout_or_none, + "maxconnecting": validate_positive_integer, + "maxpoolsize": validate_non_negative_integer_or_none, + "maxstalenessseconds": validate_max_staleness, + "readconcernlevel": validate_string_or_none, + "readpreference": validate_read_preference_mode, + "readpreferencetags": validate_read_preference_tags, + "replicaset": validate_string_or_none, + "retryreads": validate_boolean_or_string, + "retrywrites": validate_boolean_or_string, + "loadbalanced": validate_boolean_or_string, + "serverselectiontimeoutms": validate_timeout_or_zero, + "sockettimeoutms": validate_timeout_or_none_or_zero, + "tls": validate_boolean_or_string, + "tlsallowinvalidcertificates": validate_boolean_or_string, + "tlsallowinvalidhostnames": validate_boolean_or_string, + "tlscafile": validate_readable, + "tlscertificatekeyfile": validate_readable, + "tlscertificatekeyfilepassword": validate_string_or_none, + "tlsdisableocspendpointcheck": validate_boolean_or_string, + "tlsinsecure": validate_boolean_or_string, + "w": validate_non_negative_int_or_basestring, + "wtimeoutms": validate_non_negative_integer, + "zlibcompressionlevel": validate_zlib_compression_level, + "srvservicename": validate_string, + "srvmaxhosts": validate_non_negative_integer, } # Dictionary where keys are the names of URI options specific to pymongo, # and values are functions that validate user-input values for those options. NONSPEC_OPTIONS_VALIDATOR_MAP: Dict[str, Callable[[Any, Any], Any]] = { - 'connect': validate_boolean_or_string, - 'driver': validate_driver_or_none, - 'server_api': validate_server_api_or_none, - 'fsync': validate_boolean_or_string, - 'minpoolsize': validate_non_negative_integer, - 'tlscrlfile': validate_readable, - 'tz_aware': validate_boolean_or_string, - 'unicode_decode_error_handler': validate_unicode_decode_error_handler, - 'uuidrepresentation': validate_uuid_representation, - 'waitqueuemultiple': validate_non_negative_integer_or_none, - 'waitqueuetimeoutms': validate_timeout_or_none, + "connect": validate_boolean_or_string, + "driver": validate_driver_or_none, + "server_api": validate_server_api_or_none, + "fsync": validate_boolean_or_string, + "minpoolsize": validate_non_negative_integer, + "tlscrlfile": validate_readable, + "tz_aware": validate_boolean_or_string, + "unicode_decode_error_handler": validate_unicode_decode_error_handler, + "uuidrepresentation": validate_uuid_representation, + "waitqueuemultiple": validate_non_negative_integer_or_none, + "waitqueuetimeoutms": validate_timeout_or_none, } # Dictionary where keys are the names of keyword-only options for the # MongoClient constructor, and values are functions that validate user-input # values for those options. KW_VALIDATORS: Dict[str, Callable[[Any, Any], Any]] = { - 'document_class': validate_document_class, - 'type_registry': validate_type_registry, - 'read_preference': validate_read_preference, - 'event_listeners': _validate_event_listeners, - 'tzinfo': validate_tzinfo, - 'username': validate_string_or_none, - 'password': validate_string_or_none, - 'server_selector': validate_is_callable_or_none, - 'auto_encryption_opts': validate_auto_encryption_opts_or_none, + "document_class": validate_document_class, + "type_registry": validate_type_registry, + "read_preference": validate_read_preference, + "event_listeners": _validate_event_listeners, + "tzinfo": validate_tzinfo, + "username": validate_string_or_none, + "password": validate_string_or_none, + "server_selector": validate_is_callable_or_none, + "auto_encryption_opts": validate_auto_encryption_opts_or_none, } # Dictionary where keys are any URI option name, and values are the @@ -679,7 +688,7 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A # variant need not be included here. Options whose public and internal # names are the same need not be included here. INTERNAL_URI_OPTION_NAME_MAP: Dict[str, str] = { - 'ssl': 'tls', + "ssl": "tls", } # Map from deprecated URI option names to a tuple indicating the method of @@ -701,8 +710,7 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A for optname, aliases in URI_OPTIONS_ALIAS_MAP.items(): for alias in aliases: if alias not in URI_OPTIONS_VALIDATOR_MAP: - URI_OPTIONS_VALIDATOR_MAP[alias] = ( - URI_OPTIONS_VALIDATOR_MAP[optname]) + URI_OPTIONS_VALIDATOR_MAP[alias] = URI_OPTIONS_VALIDATOR_MAP[optname] # Map containing all URI option and keyword argument validators. VALIDATORS: Dict[str, Callable[[Any, Any], Any]] = URI_OPTIONS_VALIDATOR_MAP.copy() @@ -710,39 +718,38 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A # List of timeout-related options. TIMEOUT_OPTIONS: List[str] = [ - 'connecttimeoutms', - 'heartbeatfrequencyms', - 'maxidletimems', - 'maxstalenessseconds', - 'serverselectiontimeoutms', - 'sockettimeoutms', - 'waitqueuetimeoutms', + "connecttimeoutms", + "heartbeatfrequencyms", + "maxidletimems", + "maxstalenessseconds", + "serverselectiontimeoutms", + "sockettimeoutms", + "waitqueuetimeoutms", ] -_AUTH_OPTIONS = frozenset(['authmechanismproperties']) +_AUTH_OPTIONS = frozenset(["authmechanismproperties"]) def validate_auth_option(option: str, value: Any) -> Tuple[str, Any]: - """Validate optional authentication parameters. - """ + """Validate optional authentication parameters.""" lower, value = validate(option, value) if lower not in _AUTH_OPTIONS: - raise ConfigurationError('Unknown ' - 'authentication option: %s' % (option,)) + raise ConfigurationError("Unknown " "authentication option: %s" % (option,)) return option, value def validate(option: str, value: Any) -> Tuple[str, Any]: - """Generic validation function. - """ + """Generic validation function.""" lower = option.lower() validator = VALIDATORS.get(lower, raise_config_error) value = validator(option, value) return option, value -def get_validated_options(options: Mapping[str, Any], warn: bool = True) -> MutableMapping[str, Any]: +def get_validated_options( + options: Mapping[str, Any], warn: bool = True +) -> MutableMapping[str, Any]: """Validate each entry in options and raise a warning if it is not valid. Returns a copy of options with invalid entries removed. @@ -765,8 +772,7 @@ def get_validated_options(options: Mapping[str, Any], warn: bool = True) -> Muta for opt, value in options.items(): normed_key = get_normed_key(opt) try: - validator = URI_OPTIONS_VALIDATOR_MAP.get( - normed_key, raise_config_error) + validator = URI_OPTIONS_VALIDATOR_MAP.get(normed_key, raise_config_error) value = validator(opt, value) except (ValueError, TypeError, ConfigurationError) as exc: if warn: @@ -779,14 +785,7 @@ def get_validated_options(options: Mapping[str, Any], warn: bool = True) -> Muta # List of write-concern-related options. -WRITE_CONCERN_OPTIONS = frozenset([ - 'w', - 'wtimeout', - 'wtimeoutms', - 'fsync', - 'j', - 'journal' -]) +WRITE_CONCERN_OPTIONS = frozenset(["w", "wtimeout", "wtimeoutms", "fsync", "j", "journal"]) class BaseObject(object): @@ -796,28 +795,38 @@ class BaseObject(object): SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO MONGODB. """ - def __init__(self, codec_options: CodecOptions, read_preference: _ServerMode, write_concern: WriteConcern, - read_concern: ReadConcern) -> None: + def __init__( + self, + codec_options: CodecOptions, + read_preference: _ServerMode, + write_concern: WriteConcern, + read_concern: ReadConcern, + ) -> None: if not isinstance(codec_options, CodecOptions): - raise TypeError("codec_options must be an instance of " - "bson.codec_options.CodecOptions") + raise TypeError( + "codec_options must be an instance of " "bson.codec_options.CodecOptions" + ) self.__codec_options = codec_options if not isinstance(read_preference, _ServerMode): - raise TypeError("%r is not valid for read_preference. See " - "pymongo.read_preferences for valid " - "options." % (read_preference,)) + raise TypeError( + "%r is not valid for read_preference. See " + "pymongo.read_preferences for valid " + "options." % (read_preference,) + ) self.__read_preference = read_preference if not isinstance(write_concern, WriteConcern): - raise TypeError("write_concern must be an instance of " - "pymongo.write_concern.WriteConcern") + raise TypeError( + "write_concern must be an instance of " "pymongo.write_concern.WriteConcern" + ) self.__write_concern = write_concern if not isinstance(read_concern, ReadConcern): - raise TypeError("read_concern must be an instance of " - "pymongo.read_concern.ReadConcern") + raise TypeError( + "read_concern must be an instance of " "pymongo.read_concern.ReadConcern" + ) self.__read_concern = read_concern @property @@ -838,8 +847,7 @@ def write_concern(self) -> WriteConcern: return self.__write_concern def _write_concern_for(self, session): - """Read only access to the write concern of this instance or session. - """ + """Read only access to the write concern of this instance or session.""" # Override this operation's write concern with the transaction's. if session and session.in_transaction: return DEFAULT_WRITE_CONCERN @@ -855,8 +863,7 @@ def read_preference(self) -> _ServerMode: return self.__read_preference def _read_preference_for(self, session): - """Read only access to the read preference of this instance or session. - """ + """Read only access to the read preference of this instance or session.""" # Override this operation's read preference with the transaction's. if session: return session._txn_read_preference() or self.__read_preference diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index c9cc041aff..72cc232867 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -17,6 +17,7 @@ try: import snappy + _HAVE_SNAPPY = True except ImportError: # python-snappy isn't available. @@ -24,6 +25,7 @@ try: import zlib + _HAVE_ZLIB = True except ImportError: # Python built without zlib support. @@ -31,6 +33,7 @@ try: from zstandard import ZstdCompressor, ZstdDecompressor + _HAVE_ZSTD = True except ImportError: _HAVE_ZSTD = False @@ -59,17 +62,20 @@ def validate_compressors(dummy, value): compressors.remove(compressor) warnings.warn( "Wire protocol compression with snappy is not available. " - "You must install the python-snappy module for snappy support.") + "You must install the python-snappy module for snappy support." + ) elif compressor == "zlib" and not _HAVE_ZLIB: compressors.remove(compressor) warnings.warn( "Wire protocol compression with zlib is not available. " - "The zlib module is not available.") + "The zlib module is not available." + ) elif compressor == "zstd" and not _HAVE_ZSTD: compressors.remove(compressor) warnings.warn( "Wire protocol compression with zstandard is not available. " - "You must install the zstandard module for zstandard support.") + "You must install the zstandard module for zstandard support." + ) return compressors @@ -79,8 +85,7 @@ def validate_zlib_compression_level(option, value): except: raise TypeError("%s must be an integer, not %r." % (option, value)) if level < -1 or level > 9: - raise ValueError( - "%s must be between -1 and 9, not %d." % (option, level)) + raise ValueError("%s must be between -1 and 9, not %d." % (option, level)) return level diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 152acaca65..ba9e5956f2 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -17,52 +17,74 @@ import threading import warnings from collections import deque -from typing import (TYPE_CHECKING, Any, Dict, Generic, Iterable, List, Mapping, - MutableMapping, Optional, Sequence, Tuple, Union, cast, overload) +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generic, + Iterable, + List, + Mapping, + MutableMapping, + Optional, + Sequence, + Tuple, + Union, + cast, + overload, +) from bson import RE_TYPE, _convert_raw_document_lists_to_streams from bson.code import Code from bson.son import SON from pymongo import helpers from pymongo.collation import validate_collation_or_none -from pymongo.common import (validate_boolean, validate_is_document_type, - validate_is_mapping) -from pymongo.errors import (ConnectionFailure, InvalidOperation, - OperationFailure) -from pymongo.message import (_CursorAddress, _GetMore, _Query, - _RawBatchGetMore, _RawBatchQuery) +from pymongo.common import ( + validate_boolean, + validate_is_document_type, + validate_is_mapping, +) +from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure +from pymongo.message import ( + _CursorAddress, + _GetMore, + _Query, + _RawBatchGetMore, + _RawBatchQuery, +) from pymongo.response import PinnedResponse from pymongo.typings import _CollationIn, _DocumentType # These errors mean that the server has already killed the cursor so there is # no need to send killCursors. -_CURSOR_CLOSED_ERRORS = frozenset([ - 43, # CursorNotFound - 50, # MaxTimeMSExpired - 175, # QueryPlanKilled - 237, # CursorKilled - - # On a tailable cursor, the following errors mean the capped collection - # rolled over. - # MongoDB 2.6: - # {'$err': 'Runner killed during getMore', 'code': 28617, 'ok': 0} - 28617, - # MongoDB 3.0: - # {'$err': 'getMore executor error: UnknownError no details available', - # 'code': 17406, 'ok': 0} - 17406, - # MongoDB 3.2 + 3.4: - # {'ok': 0.0, 'errmsg': 'GetMore command executor error: - # CappedPositionLost: CollectionScan died due to failure to restore - # tailable cursor position. Last seen record id: RecordId(3)', - # 'code': 96} - 96, - # MongoDB 3.6+: - # {'ok': 0.0, 'errmsg': 'errmsg: "CollectionScan died due to failure to - # restore tailable cursor position. Last seen record id: RecordId(3)"', - # 'code': 136, 'codeName': 'CappedPositionLost'} - 136, -]) +_CURSOR_CLOSED_ERRORS = frozenset( + [ + 43, # CursorNotFound + 50, # MaxTimeMSExpired + 175, # QueryPlanKilled + 237, # CursorKilled + # On a tailable cursor, the following errors mean the capped collection + # rolled over. + # MongoDB 2.6: + # {'$err': 'Runner killed during getMore', 'code': 28617, 'ok': 0} + 28617, + # MongoDB 3.0: + # {'$err': 'getMore executor error: UnknownError no details available', + # 'code': 17406, 'ok': 0} + 17406, + # MongoDB 3.2 + 3.4: + # {'ok': 0.0, 'errmsg': 'GetMore command executor error: + # CappedPositionLost: CollectionScan died due to failure to restore + # tailable cursor position. Last seen record id: RecordId(3)', + # 'code': 96} + 96, + # MongoDB 3.6+: + # {'ok': 0.0, 'errmsg': 'errmsg: "CollectionScan died due to failure to + # restore tailable cursor position. Last seen record id: RecordId(3)"', + # 'code': 136, 'codeName': 'CappedPositionLost'} + 136, + ] +) _QUERY_OPTIONS = { "tailable_cursor": 2, @@ -71,7 +93,8 @@ "no_timeout": 16, "await_data": 32, "exhaust": 64, - "partial": 128} + "partial": 128, +} class CursorType(object): @@ -104,8 +127,8 @@ class CursorType(object): class _SocketManager(object): - """Used with exhaust cursors to ensure the socket is returned. - """ + """Used with exhaust cursors to ensure the socket is returned.""" + def __init__(self, sock, more_to_come): self.sock = sock self.more_to_come = more_to_come @@ -116,13 +139,13 @@ def update_exhaust(self, more_to_come): self.more_to_come = more_to_come def close(self): - """Return this instance's socket to the connection pool. - """ + """Return this instance's socket to the connection pool.""" if not self.closed: self.closed = True self.sock.unpin() self.sock = None + _Sort = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] _Hint = Union[str, _Sort] @@ -133,12 +156,13 @@ def close(self): class Cursor(Generic[_DocumentType]): - """A cursor / iterator over Mongo query results. - """ + """A cursor / iterator over Mongo query results.""" + _query_class = _Query _getmore_class = _GetMore - def __init__(self, + def __init__( + self, collection: "Collection[_DocumentType]", filter: Optional[Mapping[str, Any]] = None, projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, @@ -162,7 +186,7 @@ def __init__(self, comment: Any = None, session: Optional["ClientSession"] = None, allow_disk_use: Optional[bool] = None, - let: Optional[bool] = None + let: Optional[bool] = None, ) -> None: """Create a new cursor. @@ -195,15 +219,22 @@ def __init__(self, raise TypeError("limit must be an instance of int") validate_boolean("no_cursor_timeout", no_cursor_timeout) if no_cursor_timeout and not self.__explicit_session: - warnings.warn("use an explicit session with no_cursor_timeout=True " - "otherwise the cursor may still timeout after " - "30 minutes, for more info see " - "https://docs.mongodb.com/v4.4/reference/method/" - "cursor.noCursorTimeout/" - "#session-idle-timeout-overrides-nocursortimeout", - UserWarning, stacklevel=2) - if cursor_type not in (CursorType.NON_TAILABLE, CursorType.TAILABLE, - CursorType.TAILABLE_AWAIT, CursorType.EXHAUST): + warnings.warn( + "use an explicit session with no_cursor_timeout=True " + "otherwise the cursor may still timeout after " + "30 minutes, for more info see " + "https://docs.mongodb.com/v4.4/reference/method/" + "cursor.noCursorTimeout/" + "#session-idle-timeout-overrides-nocursortimeout", + UserWarning, + stacklevel=2, + ) + if cursor_type not in ( + CursorType.NON_TAILABLE, + CursorType.TAILABLE, + CursorType.TAILABLE_AWAIT, + CursorType.EXHAUST, + ): raise ValueError("not a valid value for cursor_type") validate_boolean("allow_partial_results", allow_partial_results) validate_boolean("oplog_replay", oplog_replay) @@ -246,8 +277,7 @@ def __init__(self, # Exhaust cursor support if cursor_type == CursorType.EXHAUST: if self.__collection.database.client.is_mongos: - raise InvalidOperation('Exhaust cursors are ' - 'not supported by mongos') + raise InvalidOperation("Exhaust cursors are " "not supported by mongos") if limit: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = True @@ -290,8 +320,7 @@ def collection(self) -> "Collection[_DocumentType]": @property def retrieved(self) -> int: - """The number of documents retrieved so far. - """ + """The number of documents retrieved so far.""" return self.__retrieved def __del__(self) -> None: @@ -333,28 +362,47 @@ def _clone(self, deepcopy=True, base=None): else: base = self._clone_base(None) - values_to_clone = ("spec", "projection", "skip", "limit", - "max_time_ms", "max_await_time_ms", "comment", - "max", "min", "ordering", "explain", "hint", - "batch_size", "max_scan", - "query_flags", "collation", "empty", - "show_record_id", "return_key", "allow_disk_use", - "snapshot", "exhaust", "has_filter") - data = dict((k, v) for k, v in self.__dict__.items() - if k.startswith('_Cursor__') and k[9:] in values_to_clone) + values_to_clone = ( + "spec", + "projection", + "skip", + "limit", + "max_time_ms", + "max_await_time_ms", + "comment", + "max", + "min", + "ordering", + "explain", + "hint", + "batch_size", + "max_scan", + "query_flags", + "collation", + "empty", + "show_record_id", + "return_key", + "allow_disk_use", + "snapshot", + "exhaust", + "has_filter", + ) + data = dict( + (k, v) + for k, v in self.__dict__.items() + if k.startswith("_Cursor__") and k[9:] in values_to_clone + ) if deepcopy: data = self._deepcopy(data) base.__dict__.update(data) return base def _clone_base(self, session): - """Creates an empty Cursor object for information to be copied into. - """ + """Creates an empty Cursor object for information to be copied into.""" return self.__class__(self.__collection, session=session) def __die(self, synchronous=False): - """Closes this cursor. - """ + """Closes this cursor.""" try: already_killed = self.__killed except AttributeError: @@ -364,8 +412,7 @@ def __die(self, synchronous=False): self.__killed = True if self.__id and not already_killed: cursor_id = self.__id - address = _CursorAddress( - self.__address, "%s.%s" % (self.__dbname, self.__collname)) + address = _CursorAddress(self.__address, "%s.%s" % (self.__dbname, self.__collname)) else: # Skip killCursors. cursor_id = 0 @@ -376,19 +423,18 @@ def __die(self, synchronous=False): address, self.__sock_mgr, self.__session, - self.__explicit_session) + self.__explicit_session, + ) if not self.__explicit_session: self.__session = None self.__sock_mgr = None def close(self) -> None: - """Explicitly close / kill this cursor. - """ + """Explicitly close / kill this cursor.""" self.__die(True) def __query_spec(self): - """Get the spec to use for a query. - """ + """Get the spec to use for a query.""" operators = {} if self.__ordering: operators["$orderby"] = self.__ordering @@ -437,16 +483,15 @@ def __query_spec(self): # that breaks commands like count and find_and_modify. # Checking spec.keys()[0] covers the case that the spec # was passed as an instance of SON or OrderedDict. - elif ("query" in self.__spec and - (len(self.__spec) == 1 or - next(iter(self.__spec)) == "query")): + elif "query" in self.__spec and ( + len(self.__spec) == 1 or next(iter(self.__spec)) == "query" + ): return SON({"$query": self.__spec}) return self.__spec def __check_okay_to_chain(self): - """Check if it is okay to chain more options onto this cursor. - """ + """Check if it is okay to chain more options onto this cursor.""" if self.__retrieved or self.__id is not None: raise InvalidOperation("cannot set options after executing query") @@ -464,8 +509,7 @@ def add_option(self, mask: int) -> "Cursor[_DocumentType]": if self.__limit: raise InvalidOperation("Can't use limit and exhaust together.") if self.__collection.database.client.is_mongos: - raise InvalidOperation('Exhaust cursors are ' - 'not supported by mongos') + raise InvalidOperation("Exhaust cursors are " "not supported by mongos") self.__exhaust = True self.__query_flags |= mask @@ -503,7 +547,7 @@ def allow_disk_use(self, allow_disk_use: bool) -> "Cursor[_DocumentType]": .. versionadded:: 3.11 """ if not isinstance(allow_disk_use, bool): - raise TypeError('allow_disk_use must be a bool') + raise TypeError("allow_disk_use must be a bool") self.__check_okay_to_chain() self.__allow_disk_use = allow_disk_use @@ -594,8 +638,7 @@ def max_time_ms(self, max_time_ms: Optional[int]) -> "Cursor[_DocumentType]": :Parameters: - `max_time_ms`: the time limit after which the operation is aborted """ - if (not isinstance(max_time_ms, int) - and max_time_ms is not None): + if not isinstance(max_time_ms, int) and max_time_ms is not None: raise TypeError("max_time_ms must be an integer or None") self.__check_okay_to_chain() @@ -619,8 +662,7 @@ def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> "Cursor[_Docume .. versionadded:: 3.2 """ - if (not isinstance(max_await_time_ms, int) - and max_await_time_ms is not None): + if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: raise TypeError("max_await_time_ms must be an integer or None") self.__check_okay_to_chain() @@ -688,15 +730,15 @@ def __getitem__(self, index): skip = 0 if index.start is not None: if index.start < 0: - raise IndexError("Cursor instances do not support " - "negative indices") + raise IndexError("Cursor instances do not support " "negative indices") skip = index.start if index.stop is not None: limit = index.stop - skip if limit < 0: - raise IndexError("stop index must be greater than start " - "index for slice %r" % index) + raise IndexError( + "stop index must be greater than start " "index for slice %r" % index + ) if limit == 0: self.__empty = True else: @@ -708,8 +750,7 @@ def __getitem__(self, index): if isinstance(index, int): if index < 0: - raise IndexError("Cursor instances do not support negative " - "indices") + raise IndexError("Cursor instances do not support negative " "indices") clone = self.clone() clone.skip(index + self.__skip) clone.limit(-1) # use a hard limit @@ -717,8 +758,7 @@ def __getitem__(self, index): for doc in clone: return doc raise IndexError("no such item for Cursor instance") - raise TypeError("index %r cannot be applied to Cursor " - "instances" % index) + raise TypeError("index %r cannot be applied to Cursor " "instances" % index) def max_scan(self, max_scan: Optional[int]) -> "Cursor[_DocumentType]": """**DEPRECATED** - Limit the number of documents to scan when @@ -786,7 +826,9 @@ def min(self, spec: _Sort) -> "Cursor[_DocumentType]": self.__min = SON(spec) return self - def sort(self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None) -> "Cursor[_DocumentType]": + def sort( + self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None + ) -> "Cursor[_DocumentType]": """Sorts this cursor's results. Pass a field name and a direction, either @@ -853,14 +895,13 @@ def distinct(self, key: str) -> List: if self.__spec: options["query"] = self.__spec if self.__max_time_ms is not None: - options['maxTimeMS'] = self.__max_time_ms + options["maxTimeMS"] = self.__max_time_ms if self.__comment: - options['comment'] = self.__comment + options["comment"] = self.__comment if self.__collation is not None: - options['collation'] = self.__collation + options["collation"] = self.__collation - return self.__collection.distinct( - key, session=self.__session, **options) + return self.__collection.distinct(key, session=self.__session, **options) def explain(self) -> _DocumentType: """Returns an explain plan record for this cursor. @@ -1005,12 +1046,12 @@ def __send_message(self, operation): client = self.__collection.database.client # OP_MSG is required to support exhaust cursors with encryption. if client._encrypter and self.__exhaust: - raise InvalidOperation( - "exhaust cursors do not support auto encryption") + raise InvalidOperation("exhaust cursors do not support auto encryption") try: response = client._run_operation( - operation, self._unpack_response, address=self.__address) + operation, self._unpack_response, address=self.__address + ) except OperationFailure as exc: if exc.code in _CURSOR_CLOSED_ERRORS or self.__exhaust: # Don't send killCursors because the cursor is already closed. @@ -1020,8 +1061,10 @@ def __send_message(self, operation): # due to capped collection roll over. Setting # self.__killed to True ensures Cursor.alive will be # False. No need to re-raise. - if (exc.code in _CURSOR_CLOSED_ERRORS and - self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]): + if ( + exc.code in _CURSOR_CLOSED_ERRORS + and self.__query_flags & _QUERY_OPTIONS["tailable_cursor"] + ): return raise except ConnectionFailure: @@ -1036,23 +1079,22 @@ def __send_message(self, operation): self.__address = response.address if isinstance(response, PinnedResponse): if not self.__sock_mgr: - self.__sock_mgr = _SocketManager(response.socket_info, - response.more_to_come) + self.__sock_mgr = _SocketManager(response.socket_info, response.more_to_come) cmd_name = operation.name docs = response.docs if response.from_command: if cmd_name != "explain": - cursor = docs[0]['cursor'] - self.__id = cursor['id'] - if cmd_name == 'find': - documents = cursor['firstBatch'] + cursor = docs[0]["cursor"] + self.__id = cursor["id"] + if cmd_name == "find": + documents = cursor["firstBatch"] # Update the namespace used for future getMore commands. - ns = cursor.get('ns') + ns = cursor.get("ns") if ns: - self.__dbname, self.__collname = ns.split('.', 1) + self.__dbname, self.__collname = ns.split(".", 1) else: - documents = cursor['nextBatch'] + documents = cursor["nextBatch"] self.__data = deque(documents) self.__retrieved += len(documents) else: @@ -1072,16 +1114,15 @@ def __send_message(self, operation): if self.__limit and self.__id and self.__limit <= self.__retrieved: self.close() - def _unpack_response(self, response, cursor_id, codec_options, - user_fields=None, legacy_response=False): - return response.unpack_response(cursor_id, codec_options, user_fields, - legacy_response) + def _unpack_response( + self, response, cursor_id, codec_options, user_fields=None, legacy_response=False + ): + return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) def _read_preference(self): if self.__read_preference is None: # Save the read preference for getMore commands. - self.__read_preference = self.__collection._read_preference_for( - self.session) + self.__read_preference = self.__collection._read_preference_for(self.session) return self.__read_preference def _refresh(self): @@ -1101,23 +1142,26 @@ def _refresh(self): if (self.__min or self.__max) and not self.__hint: raise InvalidOperation( "Passing a 'hint' is required when using the min/max query" - " option to ensure the query utilizes the correct index") - q = self._query_class(self.__query_flags, - self.__collection.database.name, - self.__collection.name, - self.__skip, - self.__query_spec(), - self.__projection, - self.__codec_options, - self._read_preference(), - self.__limit, - self.__batch_size, - self.__read_concern, - self.__collation, - self.__session, - self.__collection.database.client, - self.__allow_disk_use, - self.__exhaust) + " option to ensure the query utilizes the correct index" + ) + q = self._query_class( + self.__query_flags, + self.__collection.database.name, + self.__collection.name, + self.__skip, + self.__query_spec(), + self.__projection, + self.__codec_options, + self._read_preference(), + self.__limit, + self.__batch_size, + self.__read_concern, + self.__collation, + self.__session, + self.__collection.database.client, + self.__allow_disk_use, + self.__exhaust, + ) self.__send_message(q) elif self.__id: # Get More if self.__limit: @@ -1127,17 +1171,19 @@ def _refresh(self): else: limit = self.__batch_size # Exhaust cursors don't send getMore messages. - g = self._getmore_class(self.__dbname, - self.__collname, - limit, - self.__id, - self.__codec_options, - self._read_preference(), - self.__session, - self.__collection.database.client, - self.__max_await_time_ms, - self.__sock_mgr, - self.__exhaust) + g = self._getmore_class( + self.__dbname, + self.__collname, + limit, + self.__id, + self.__codec_options, + self._read_preference(), + self.__session, + self.__collection.database.client, + self.__max_await_time_ms, + self.__sock_mgr, + self.__exhaust, + ) self.__send_message(g) return len(self.__data) @@ -1232,7 +1278,7 @@ def _deepcopy(self, x, memo=None): don't have to copy them when cloning. """ y: Any - if not hasattr(x, 'items'): + if not hasattr(x, "items"): y, is_list, iterator = [], True, enumerate(x) else: y, is_list, iterator = {}, False, x.items() @@ -1276,10 +1322,10 @@ def __init__(self, collection: "Collection[_DocumentType]", *args: Any, **kwargs """ super(RawBatchCursor, self).__init__(collection, *args, **kwargs) - def _unpack_response(self, response, cursor_id, codec_options, - user_fields=None, legacy_response=False): - raw_response = response.raw_response( - cursor_id, user_fields=user_fields) + def _unpack_response( + self, response, cursor_id, codec_options, user_fields=None, legacy_response=False + ): + raw_response = response.raw_response(cursor_id, user_fields=user_fields) if not legacy_response: # OP_MSG returns firstBatch/nextBatch documents as a BSON array # Re-assemble the array of documents into a document stream diff --git a/pymongo/daemon.py b/pymongo/daemon.py index f0253547d9..53141751ac 100644 --- a/pymongo/daemon.py +++ b/pymongo/daemon.py @@ -24,7 +24,6 @@ import sys import warnings - # The maximum amount of time to wait for the intermediate subprocess. _WAIT_TIMEOUT = 10 _THIS_FILE = os.path.realpath(__file__) @@ -53,23 +52,29 @@ def _silence_resource_warning(popen): popen.returncode = 0 -if sys.platform == 'win32': +if sys.platform == "win32": # On Windows we spawn the daemon process simply by using DETACHED_PROCESS. - _DETACHED_PROCESS = getattr(subprocess, 'DETACHED_PROCESS', 0x00000008) + _DETACHED_PROCESS = getattr(subprocess, "DETACHED_PROCESS", 0x00000008) def _spawn_daemon(args): """Spawn a daemon process (Windows).""" try: - with open(os.devnull, 'r+b') as devnull: + with open(os.devnull, "r+b") as devnull: popen = subprocess.Popen( args, creationflags=_DETACHED_PROCESS, - stdin=devnull, stderr=devnull, stdout=devnull) + stdin=devnull, + stderr=devnull, + stdout=devnull, + ) _silence_resource_warning(popen) except FileNotFoundError as exc: - warnings.warn(f'Failed to start {args[0]}: is it on your $PATH?\n' - f'Original exception: {exc}', RuntimeWarning, - stacklevel=2) + warnings.warn( + f"Failed to start {args[0]}: is it on your $PATH?\n" f"Original exception: {exc}", + RuntimeWarning, + stacklevel=2, + ) + else: # On Unix we spawn the daemon process with a double Popen. # 1) The first Popen runs this file as a Python script using the current @@ -85,16 +90,16 @@ def _spawn_daemon(args): def _spawn(args): """Spawn the process and silence stdout/stderr.""" try: - with open(os.devnull, 'r+b') as devnull: + with open(os.devnull, "r+b") as devnull: return subprocess.Popen( - args, - close_fds=True, - stdin=devnull, stderr=devnull, stdout=devnull) + args, close_fds=True, stdin=devnull, stderr=devnull, stdout=devnull + ) except FileNotFoundError as exc: - warnings.warn(f'Failed to start {args[0]}: is it on your $PATH?\n' - f'Original exception: {exc}', RuntimeWarning, - stacklevel=2) - + warnings.warn( + f"Failed to start {args[0]}: is it on your $PATH?\n" f"Original exception: {exc}", + RuntimeWarning, + stacklevel=2, + ) def _spawn_daemon_double_popen(args): """Spawn a daemon process using a double subprocess.Popen.""" @@ -105,7 +110,6 @@ def _spawn_daemon_double_popen(args): # processes. _popen_wait(temp_proc, _WAIT_TIMEOUT) - def _spawn_daemon(args): """Spawn a daemon process (Unix).""" # "If Python is unable to retrieve the real path to its executable, @@ -123,10 +127,9 @@ def _spawn_daemon(args): # until the main application exits. _spawn(args) - - if __name__ == '__main__': + if __name__ == "__main__": # Attempt to start a new session to decouple from the parent. - if hasattr(os, 'setsid'): + if hasattr(os, "setsid"): try: os.setsid() except OSError: diff --git a/pymongo/database.py b/pymongo/database.py index 4f5f931352..675db132f7 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -13,8 +13,18 @@ # limitations under the License. """Database level operations.""" -from typing import (TYPE_CHECKING, Any, Dict, Generic, List, Mapping, MutableMapping, Optional, - Sequence, Union) +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generic, + List, + Mapping, + MutableMapping, + Optional, + Sequence, + Union, +) from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions from bson.dbref import DBRef @@ -31,15 +41,13 @@ def _check_name(name): - """Check if a database name is valid. - """ + """Check if a database name is valid.""" if not name: raise InvalidName("database name cannot be the empty string") - for invalid_char in [' ', '.', '$', '/', '\\', '\x00', '"']: + for invalid_char in [" ", ".", "$", "/", "\\", "\x00", '"']: if invalid_char in name: - raise InvalidName("database names cannot contain the " - "character %r" % invalid_char) + raise InvalidName("database names cannot contain the " "character %r" % invalid_char) if TYPE_CHECKING: @@ -50,9 +58,10 @@ def _check_name(name): class Database(common.BaseObject, Generic[_DocumentType]): - """A Mongo database. - """ - def __init__(self, + """A Mongo database.""" + + def __init__( + self, client: "MongoClient[_DocumentType]", name: str, codec_options: Optional[CodecOptions] = None, @@ -110,12 +119,13 @@ def __init__(self, codec_options or client.codec_options, read_preference or client.read_preference, write_concern or client.write_concern, - read_concern or client.read_concern) + read_concern or client.read_concern, + ) if not isinstance(name, str): raise TypeError("name must be an instance of str") - if name != '$external': + if name != "$external": _check_name(name) self.__name = name @@ -131,7 +141,8 @@ def name(self) -> str: """The name of this :class:`Database`.""" return self.__name - def with_options(self, + def with_options( + self, codec_options: Optional[CodecOptions] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, @@ -168,17 +179,18 @@ def with_options(self, .. versionadded:: 3.8 """ - return Database(self.client, - self.__name, - codec_options or self.codec_options, - read_preference or self.read_preference, - write_concern or self.write_concern, - read_concern or self.read_concern) + return Database( + self.client, + self.__name, + codec_options or self.codec_options, + read_preference or self.read_preference, + write_concern or self.write_concern, + read_concern or self.read_concern, + ) def __eq__(self, other: Any) -> bool: if isinstance(other, Database): - return (self.__client == other.client and - self.__name == other.name) + return self.__client == other.client and self.__name == other.name return NotImplemented def __ne__(self, other: Any) -> bool: @@ -198,10 +210,11 @@ def __getattr__(self, name: str) -> Collection[_DocumentType]: :Parameters: - `name`: the name of the collection to get """ - if name.startswith('_'): + if name.startswith("_"): raise AttributeError( "Database has no attribute %r. To access the %s" - " collection, use database[%r]." % (name, name, name)) + " collection, use database[%r]." % (name, name, name) + ) return self.__getitem__(name) def __getitem__(self, name: str) -> "Collection[_DocumentType]": @@ -214,7 +227,8 @@ def __getitem__(self, name: str) -> "Collection[_DocumentType]": """ return Collection(self, name) - def get_collection(self, + def get_collection( + self, name: str, codec_options: Optional[CodecOptions] = None, read_preference: Optional[_ServerMode] = None, @@ -259,10 +273,11 @@ def get_collection(self, used. """ return Collection( - self, name, False, codec_options, read_preference, - write_concern, read_concern) + self, name, False, codec_options, read_preference, write_concern, read_concern + ) - def create_collection(self, + def create_collection( + self, name: str, codec_options: Optional[CodecOptions] = None, read_preference: Optional[_ServerMode] = None, @@ -351,19 +366,25 @@ def create_collection(self, with self.__client._tmp_session(session) as s: # Skip this check in a transaction where listCollections is not # supported. - if ((not s or not s.in_transaction) and - name in self.list_collection_names( - filter={"name": name}, session=s)): + if (not s or not s.in_transaction) and name in self.list_collection_names( + filter={"name": name}, session=s + ): raise CollectionInvalid("collection %s already exists" % name) - return Collection(self, name, True, codec_options, - read_preference, write_concern, - read_concern, session=s, **kwargs) + return Collection( + self, + name, + True, + codec_options, + read_preference, + write_concern, + read_concern, + session=s, + **kwargs, + ) - def aggregate(self, - pipeline: _Pipeline, - session: Optional["ClientSession"] = None, - **kwargs: Any + def aggregate( + self, pipeline: _Pipeline, session: Optional["ClientSession"] = None, **kwargs: Any ) -> CommandCursor[_DocumentType]: """Perform a database-level aggregation. @@ -428,13 +449,19 @@ def aggregate(self, """ with self.client._tmp_session(session, close=False) as s: cmd = _DatabaseAggregationCommand( - self, CommandCursor, pipeline, kwargs, session is not None, - user_fields={'cursor': {'firstBatch': 1}}) + self, + CommandCursor, + pipeline, + kwargs, + session is not None, + user_fields={"cursor": {"firstBatch": 1}}, + ) return self.client._retryable_read( - cmd.get_cursor, cmd.get_read_preference(s), s, - retryable=not cmd._performs_write) + cmd.get_cursor, cmd.get_read_preference(s), s, retryable=not cmd._performs_write + ) - def watch(self, + def watch( + self, pipeline: Optional[_Pipeline] = None, full_document: Optional[str] = None, resume_after: Optional[Mapping[str, Any]] = None, @@ -530,15 +557,32 @@ def watch(self, https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst """ return DatabaseChangeStream( - self, pipeline, full_document, resume_after, max_await_time_ms, - batch_size, collation, start_at_operation_time, session, - start_after) - - def _command(self, sock_info, command, value=1, check=True, - allowable_errors=None, read_preference=ReadPreference.PRIMARY, - codec_options=DEFAULT_CODEC_OPTIONS, - write_concern=None, - parse_write_concern_error=False, session=None, **kwargs): + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + ) + + def _command( + self, + sock_info, + command, + value=1, + check=True, + allowable_errors=None, + read_preference=ReadPreference.PRIMARY, + codec_options=DEFAULT_CODEC_OPTIONS, + write_concern=None, + parse_write_concern_error=False, + session=None, + **kwargs, + ): """Internal command helper.""" if isinstance(command, str): command = SON([(command, value)]) @@ -555,9 +599,11 @@ def _command(self, sock_info, command, value=1, check=True, write_concern=write_concern, parse_write_concern_error=parse_write_concern_error, session=s, - client=self.__client) + client=self.__client, + ) - def command(self, + def command( + self, command: Union[str, MutableMapping[str, Any]], value: Any = 1, check: bool = True, @@ -650,57 +696,78 @@ def command(self, .. seealso:: The MongoDB documentation on `commands `_. """ if read_preference is None: - read_preference = ((session and session._txn_read_preference()) - or ReadPreference.PRIMARY) - with self.__client._socket_for_reads( - read_preference, session) as (sock_info, read_preference): - return self._command(sock_info, command, value, - check, allowable_errors, read_preference, - codec_options, session=session, **kwargs) - - def _retryable_read_command(self, command, value=1, check=True, - allowable_errors=None, read_preference=None, - codec_options=DEFAULT_CODEC_OPTIONS, session=None, **kwargs): + read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + with self.__client._socket_for_reads(read_preference, session) as ( + sock_info, + read_preference, + ): + return self._command( + sock_info, + command, + value, + check, + allowable_errors, + read_preference, + codec_options, + session=session, + **kwargs, + ) + + def _retryable_read_command( + self, + command, + value=1, + check=True, + allowable_errors=None, + read_preference=None, + codec_options=DEFAULT_CODEC_OPTIONS, + session=None, + **kwargs, + ): """Same as command but used for retryable read commands.""" if read_preference is None: - read_preference = ((session and session._txn_read_preference()) - or ReadPreference.PRIMARY) + read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY def _cmd(session, server, sock_info, read_preference): - return self._command(sock_info, command, value, - check, allowable_errors, read_preference, - codec_options, session=session, **kwargs) + return self._command( + sock_info, + command, + value, + check, + allowable_errors, + read_preference, + codec_options, + session=session, + **kwargs, + ) - return self.__client._retryable_read( - _cmd, read_preference, session) + return self.__client._retryable_read(_cmd, read_preference, session) def _list_collections(self, sock_info, session, read_preference, **kwargs): """Internal listCollections helper.""" - coll = self.get_collection( - "$cmd", read_preference=read_preference) - cmd = SON([("listCollections", 1), - ("cursor", {})]) + coll = self.get_collection("$cmd", read_preference=read_preference) + cmd = SON([("listCollections", 1), ("cursor", {})]) cmd.update(kwargs) - with self.__client._tmp_session( - session, close=False) as tmp_session: + with self.__client._tmp_session(session, close=False) as tmp_session: cursor = self._command( - sock_info, cmd, - read_preference=read_preference, - session=tmp_session)["cursor"] + sock_info, cmd, read_preference=read_preference, session=tmp_session + )["cursor"] cmd_cursor = CommandCursor( coll, cursor, sock_info.address, session=tmp_session, - explicit_session=session is not None) + explicit_session=session is not None, + ) cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor - def list_collections(self, - session: Optional["ClientSession"] = None, - filter: Optional[Mapping[str, Any]] = None, - **kwargs: Any + def list_collections( + self, + session: Optional["ClientSession"] = None, + filter: Optional[Mapping[str, Any]] = None, + **kwargs: Any, ) -> CommandCursor[Dict[str, Any]]: """Get a cursor over the collections of this database. @@ -721,22 +788,21 @@ def list_collections(self, .. versionadded:: 3.6 """ if filter is not None: - kwargs['filter'] = filter - read_pref = ((session and session._txn_read_preference()) - or ReadPreference.PRIMARY) + kwargs["filter"] = filter + read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY def _cmd(session, server, sock_info, read_preference): return self._list_collections( - sock_info, session, read_preference=read_preference, - **kwargs) + sock_info, session, read_preference=read_preference, **kwargs + ) - return self.__client._retryable_read( - _cmd, read_pref, session) + return self.__client._retryable_read(_cmd, read_pref, session) - def list_collection_names(self, + def list_collection_names( + self, session: Optional["ClientSession"] = None, filter: Optional[Mapping[str, Any]] = None, - **kwargs: Any + **kwargs: Any, ) -> List[str]: """Get a list of all the collection names in this database. @@ -771,12 +837,10 @@ def list_collection_names(self, if not filter or (len(filter) == 1 and "name" in filter): kwargs["nameOnly"] = True - return [result["name"] - for result in self.list_collections(session=session, **kwargs)] + return [result["name"] for result in self.list_collections(session=session, **kwargs)] - def drop_collection(self, - name_or_collection: Union[str, Collection], - session: Optional["ClientSession"] = None + def drop_collection( + self, name_or_collection: Union[str, Collection], session: Optional["ClientSession"] = None ) -> Dict[str, Any]: """Drop a collection. @@ -806,13 +870,17 @@ def drop_collection(self, with self.__client._socket_for_writes(session) as sock_info: return self._command( - sock_info, 'drop', value=name, - allowable_errors=['ns not found', 26], + sock_info, + "drop", + value=name, + allowable_errors=["ns not found", 26], write_concern=self._write_concern_for(session), parse_write_concern_error=True, - session=session) + session=session, + ) - def validate_collection(self, + def validate_collection( + self, name_or_collection: Union[str, Collection], scandata: bool = False, full: bool = False, @@ -853,12 +921,9 @@ def validate_collection(self, name = name.name if not isinstance(name, str): - raise TypeError("name_or_collection must be an instance of str or " - "Collection") + raise TypeError("name_or_collection must be an instance of str or " "Collection") - cmd = SON([("validate", name), - ("scandata", scandata), - ("full", full)]) + cmd = SON([("validate", name), ("scandata", scandata), ("full", full)]) if background is not None: cmd["background"] = background @@ -875,10 +940,8 @@ def validate_collection(self, for _, res in result["raw"].items(): if "result" in res: info = res["result"] - if (info.find("exception") != -1 or - info.find("corrupt") != -1): - raise CollectionInvalid("%s invalid: " - "%s" % (name, info)) + if info.find("exception") != -1 or info.find("corrupt") != -1: + raise CollectionInvalid("%s invalid: " "%s" % (name, info)) elif not res.get("valid", False): valid = False break @@ -900,13 +963,14 @@ def __next__(self) -> "Database[_DocumentType]": next = __next__ def __bool__(self) -> bool: - raise NotImplementedError("Database objects do not implement truth " - "value testing or bool(). Please compare " - "with None instead: database is not None") - - def dereference(self, dbref: DBRef, - session: Optional["ClientSession"] = None, - **kwargs: Any + raise NotImplementedError( + "Database objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: database is not None" + ) + + def dereference( + self, dbref: DBRef, session: Optional["ClientSession"] = None, **kwargs: Any ) -> Optional[_DocumentType]: """Dereference a :class:`~bson.dbref.DBRef`, getting the document it points to. @@ -931,8 +995,8 @@ def dereference(self, dbref: DBRef, if not isinstance(dbref, DBRef): raise TypeError("cannot dereference a %s" % type(dbref)) if dbref.database is not None and dbref.database != self.__name: - raise ValueError("trying to dereference a DBRef that points to " - "another database (%r not %r)" % (dbref.database, - self.__name)) - return self[dbref.collection].find_one( - {"_id": dbref.id}, session=session, **kwargs) + raise ValueError( + "trying to dereference a DBRef that points to " + "another database (%r not %r)" % (dbref.database, self.__name) + ) + return self[dbref.collection].find_one({"_id": dbref.id}, session=session, **kwargs) diff --git a/pymongo/driver_info.py b/pymongo/driver_info.py index 1bb599af37..53fbfd3428 100644 --- a/pymongo/driver_info.py +++ b/pymongo/driver_info.py @@ -18,7 +18,7 @@ from typing import Optional -class DriverInfo(namedtuple('DriverInfo', ['name', 'version', 'platform'])): +class DriverInfo(namedtuple("DriverInfo", ["name", "version", "platform"])): """Info about a driver wrapping PyMongo. The MongoDB server logs PyMongo's name, version, and platform whenever @@ -27,11 +27,16 @@ class DriverInfo(namedtuple('DriverInfo', ['name', 'version', 'platform'])): like 'MyDriver', '1.2.3', 'some platform info'. Any of these strings may be None to accept PyMongo's default. """ - def __new__(cls, name: str, version: Optional[str] = None, platform: Optional[str] = None) -> "DriverInfo": + + def __new__( + cls, name: str, version: Optional[str] = None, platform: Optional[str] = None + ) -> "DriverInfo": self = super(DriverInfo, cls).__new__(cls, name, version, platform) for key, value in self._asdict().items(): if value is not None and not isinstance(value, str): - raise TypeError("Wrong type for DriverInfo %s option, value " - "must be an instance of str" % (key,)) + raise TypeError( + "Wrong type for DriverInfo %s option, value " + "must be an instance of str" % (key,) + ) return self diff --git a/pymongo/encryption.py b/pymongo/encryption.py index b076f490f4..4a6653f959 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -22,11 +22,10 @@ try: from pymongocrypt.auto_encrypter import AutoEncrypter from pymongocrypt.errors import MongoCryptError - from pymongocrypt.explicit_encrypter import ( - ExplicitEncrypter - ) + from pymongocrypt.explicit_encrypter import ExplicitEncrypter from pymongocrypt.mongocrypt import MongoCryptOptions from pymongocrypt.state_machine import MongoCryptCallback + _HAVE_PYMONGOCRYPT = True except ImportError: _HAVE_PYMONGOCRYPT = False @@ -36,13 +35,16 @@ from bson.binary import STANDARD, UUID_SUBTYPE, Binary from bson.codec_options import CodecOptions from bson.errors import BSONError -from bson.raw_bson import (DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, - _inflate_bson) +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson from bson.son import SON from pymongo.daemon import _spawn_daemon from pymongo.encryption_options import AutoEncryptionOpts -from pymongo.errors import (ConfigurationError, EncryptionError, - InvalidOperation, ServerSelectionTimeoutError) +from pymongo.errors import ( + ConfigurationError, + EncryptionError, + InvalidOperation, + ServerSelectionTimeoutError, +) from pymongo.mongo_client import MongoClient from pymongo.pool import PoolOptions, _configured_socket from pymongo.read_concern import ReadConcern @@ -57,8 +59,7 @@ _DATA_KEY_OPTS = CodecOptions(document_class=SON, uuid_representation=STANDARD) # Use RawBSONDocument codec options to avoid needlessly decoding # documents from the key vault. -_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument, - uuid_representation=STANDARD) +_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument, uuid_representation=STANDARD) @contextlib.contextmanager @@ -85,8 +86,9 @@ def __init__(self, client, key_vault_coll, mongocryptd_client, opts): self.client_ref = None self.key_vault_coll = key_vault_coll.with_options( codec_options=_KEY_VAULT_OPTS, - read_concern=ReadConcern(level='majority'), - write_concern=WriteConcern(w='majority')) + read_concern=ReadConcern(level="majority"), + write_concern=WriteConcern(w="majority"), + ) self.mongocryptd_client = mongocryptd_client self.opts = opts self._spawned = False @@ -108,16 +110,19 @@ def kms_request(self, kms_context): # Enable strict certificate verification, OCSP, match hostname, and # SNI using the system default CA certificates. ctx = get_ssl_context( - None, # certfile - None, # passphrase - None, # ca_certs - None, # crlfile + None, # certfile + None, # passphrase + None, # ca_certs + None, # crlfile False, # allow_invalid_certificates False, # allow_invalid_hostnames - False) # disable_ocsp_endpoint_check - opts = PoolOptions(connect_timeout=_KMS_CONNECT_TIMEOUT, - socket_timeout=_KMS_CONNECT_TIMEOUT, - ssl_context=ctx) + False, + ) # disable_ocsp_endpoint_check + opts = PoolOptions( + connect_timeout=_KMS_CONNECT_TIMEOUT, + socket_timeout=_KMS_CONNECT_TIMEOUT, + ssl_context=ctx, + ) host, port = parse_host(endpoint, _HTTPS_PORT) conn = _configured_socket((host, port), opts) try: @@ -125,7 +130,7 @@ def kms_request(self, kms_context): while kms_context.bytes_needed > 0: data = conn.recv(kms_context.bytes_needed) if not data: - raise OSError('KMS connection closed') + raise OSError("KMS connection closed") kms_context.feed(data) finally: conn.close() @@ -143,8 +148,7 @@ def collection_info(self, database, filter): :Returns: The first document from the listCollections command response as BSON. """ - with self.client_ref()[database].list_collections( - filter=RawBSONDocument(filter)) as cursor: + with self.client_ref()[database].list_collections(filter=RawBSONDocument(filter)) as cursor: for doc in cursor: return _dict_to_bson(doc, False, _DATA_KEY_OPTS) @@ -155,7 +159,7 @@ def spawn(self): successfully. """ self._spawned = True - args = [self.opts._mongocryptd_spawn_path or 'mongocryptd'] + args = [self.opts._mongocryptd_spawn_path or "mongocryptd"] args.extend(self.opts._mongocryptd_spawn_args) _spawn_daemon(args) @@ -176,15 +180,15 @@ def mark_command(self, database, cmd): inflated_cmd = _inflate_bson(cmd, DEFAULT_RAW_BSON_OPTIONS) try: res = self.mongocryptd_client[database].command( - inflated_cmd, - codec_options=DEFAULT_RAW_BSON_OPTIONS) + inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS + ) except ServerSelectionTimeoutError: if self.opts._mongocryptd_bypass_spawn: raise self.spawn() res = self.mongocryptd_client[database].command( - inflated_cmd, - codec_options=DEFAULT_RAW_BSON_OPTIONS) + inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS + ) return res.raw def fetch_keys(self, filter): @@ -210,9 +214,9 @@ def insert_data_key(self, data_key): The _id of the inserted data key document. """ raw_doc = RawBSONDocument(data_key, _KEY_VAULT_OPTS) - data_key_id = raw_doc.get('_id') + data_key_id = raw_doc.get("_id") if not isinstance(data_key_id, uuid.UUID): - raise TypeError('data_key _id must be a UUID') + raise TypeError("data_key _id must be a UUID") self.key_vault_coll.insert_one(raw_doc) return Binary(data_key_id.bytes, subtype=UUID_SUBTYPE) @@ -247,6 +251,7 @@ class _Encrypter(object): This class is used to support automatic encryption and decryption of MongoDB commands.""" + def __init__(self, client, opts): """Create a _Encrypter for a client. @@ -268,8 +273,7 @@ def _get_internal_client(encrypter, mongo_client): # Else - limited pool size, use an internal client. if encrypter._internal_client is not None: return encrypter._internal_client - internal_client = mongo_client._duplicate( - minPoolSize=0, auto_encryption_opts=None) + internal_client = mongo_client._duplicate(minPoolSize=0, auto_encryption_opts=None) encrypter._internal_client = internal_client return internal_client @@ -283,17 +287,17 @@ def _get_internal_client(encrypter, mongo_client): else: metadata_client = _get_internal_client(self, client) - db, coll = opts._key_vault_namespace.split('.', 1) + db, coll = opts._key_vault_namespace.split(".", 1) key_vault_coll = key_vault_client[db][coll] mongocryptd_client = MongoClient( - opts._mongocryptd_uri, connect=False, - serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS) + opts._mongocryptd_uri, connect=False, serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS + ) - io_callbacks = _EncryptionIO( - metadata_client, key_vault_coll, mongocryptd_client, opts) - self._auto_encrypter = AutoEncrypter(io_callbacks, MongoCryptOptions( - opts._kms_providers, schema_map)) + io_callbacks = _EncryptionIO(metadata_client, key_vault_coll, mongocryptd_client, opts) + self._auto_encrypter = AutoEncrypter( + io_callbacks, MongoCryptOptions(opts._kms_providers, schema_map) + ) self._closed = False def encrypt(self, database, cmd, codec_options): @@ -312,8 +316,7 @@ def encrypt(self, database, cmd, codec_options): with _wrap_encryption_errors(): encrypted_cmd = self._auto_encrypter.encrypt(database, encoded_cmd) # TODO: PYTHON-1922 avoid decoding the encrypted_cmd. - encrypt_cmd = _inflate_bson( - encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS) + encrypt_cmd = _inflate_bson(encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS) return encrypt_cmd def decrypt(self, response): @@ -344,22 +347,21 @@ def close(self): class Algorithm(object): """An enum that defines the supported encryption algorithms.""" - AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic = ( - "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic") - AEAD_AES_256_CBC_HMAC_SHA_512_Random = ( - "AEAD_AES_256_CBC_HMAC_SHA_512-Random") + AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + AEAD_AES_256_CBC_HMAC_SHA_512_Random = "AEAD_AES_256_CBC_HMAC_SHA_512-Random" class ClientEncryption(object): """Explicit client-side field level encryption.""" - def __init__(self, + def __init__( + self, kms_providers: Mapping[str, Any], key_vault_namespace: str, key_vault_client: MongoClient, codec_options: CodecOptions, - kms_tls_options: Optional[Mapping[str, Any]] = None + kms_tls_options: Optional[Mapping[str, Any]] = None, ) -> None: """Explicit client-side field level encryption. @@ -434,30 +436,37 @@ def __init__(self, raise ConfigurationError( "client-side field level encryption requires the pymongocrypt " "library: install a compatible version with: " - "python -m pip install 'pymongo[encryption]'") + "python -m pip install 'pymongo[encryption]'" + ) if not isinstance(codec_options, CodecOptions): - raise TypeError("codec_options must be an instance of " - "bson.codec_options.CodecOptions") + raise TypeError( + "codec_options must be an instance of " "bson.codec_options.CodecOptions" + ) self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace self._key_vault_client = key_vault_client self._codec_options = codec_options - db, coll = key_vault_namespace.split('.', 1) + db, coll = key_vault_namespace.split(".", 1) key_vault_coll = key_vault_client[db][coll] - opts = AutoEncryptionOpts(kms_providers, key_vault_namespace, - kms_tls_options=kms_tls_options) - self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO(None, key_vault_coll, None, opts) + opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace, kms_tls_options=kms_tls_options + ) + self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO( + None, key_vault_coll, None, opts + ) self._encryption = ExplicitEncrypter( - self._io_callbacks, MongoCryptOptions(kms_providers, None)) + self._io_callbacks, MongoCryptOptions(kms_providers, None) + ) - def create_data_key(self, + def create_data_key( + self, kms_provider: str, master_key: Optional[Mapping[str, Any]] = None, - key_alt_names: Optional[Sequence[str]] = None + key_alt_names: Optional[Sequence[str]] = None, ) -> Binary: """Create and insert a new data key into the key vault collection. @@ -527,14 +536,15 @@ def create_data_key(self, self._check_closed() with _wrap_encryption_errors(): return self._encryption.create_data_key( - kms_provider, master_key=master_key, - key_alt_names=key_alt_names) + kms_provider, master_key=master_key, key_alt_names=key_alt_names + ) - def encrypt(self, + def encrypt( + self, value: Any, algorithm: str, key_id: Optional[Binary] = None, - key_alt_name: Optional[str] = None + key_alt_name: Optional[str] = None, ) -> Binary: """Encrypt a BSON value with a given key and algorithm. @@ -554,17 +564,17 @@ def encrypt(self, The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. """ self._check_closed() - if (key_id is not None and not ( - isinstance(key_id, Binary) and - key_id.subtype == UUID_SUBTYPE)): - raise TypeError( - 'key_id must be a bson.binary.Binary with subtype 4') + if key_id is not None and not ( + isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE + ): + raise TypeError("key_id must be a bson.binary.Binary with subtype 4") - doc = encode({'v': value}, codec_options=self._codec_options) + doc = encode({"v": value}, codec_options=self._codec_options) with _wrap_encryption_errors(): encrypted_doc = self._encryption.encrypt( - doc, algorithm, key_id=key_id, key_alt_name=key_alt_name) - return decode(encrypted_doc)['v'] + doc, algorithm, key_id=key_id, key_alt_name=key_alt_name + ) + return decode(encrypted_doc)["v"] def decrypt(self, value: Binary) -> Any: """Decrypt an encrypted value. @@ -578,14 +588,12 @@ def decrypt(self, value: Binary) -> Any: """ self._check_closed() if not (isinstance(value, Binary) and value.subtype == 6): - raise TypeError( - 'value to decrypt must be a bson.binary.Binary with subtype 6') + raise TypeError("value to decrypt must be a bson.binary.Binary with subtype 6") with _wrap_encryption_errors(): - doc = encode({'v': value}) + doc = encode({"v": value}) decrypted_doc = self._encryption.decrypt(doc) - return decode(decrypted_doc, - codec_options=self._codec_options)['v'] + return decode(decrypted_doc, codec_options=self._codec_options)["v"] def __enter__(self) -> "ClientEncryption": return self diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 21a13f6a5e..c206b4c8b5 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -19,6 +19,7 @@ try: import pymongocrypt + _HAVE_PYMONGOCRYPT = True except ImportError: _HAVE_PYMONGOCRYPT = False @@ -27,23 +28,24 @@ from pymongo.uri_parser import _parse_kms_tls_options if TYPE_CHECKING: - from pymongo.mongo_client import MongoClient + from pymongo.mongo_client import MongoClient class AutoEncryptionOpts(object): """Options to configure automatic client-side field level encryption.""" - def __init__(self, + def __init__( + self, kms_providers: Mapping[str, Any], key_vault_namespace: str, key_vault_client: Optional["MongoClient"] = None, schema_map: Optional[Mapping[str, Any]] = None, bypass_auto_encryption: Optional[bool] = False, - mongocryptd_uri: str = 'mongodb://localhost:27020', + mongocryptd_uri: str = "mongodb://localhost:27020", mongocryptd_bypass_spawn: bool = False, - mongocryptd_spawn_path: str = 'mongocryptd', + mongocryptd_spawn_path: str = "mongocryptd", mongocryptd_spawn_args: Optional[List[str]] = None, - kms_tls_options: Optional[Mapping[str, Any]] = None + kms_tls_options: Optional[Mapping[str, Any]] = None, ) -> None: """Options to configure automatic client-side field level encryption. @@ -149,7 +151,8 @@ def __init__(self, raise ConfigurationError( "client side encryption requires the pymongocrypt library: " "install a compatible version with: " - "python -m pip install 'pymongo[encryption]'") + "python -m pip install 'pymongo[encryption]'" + ) self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace @@ -160,12 +163,11 @@ def __init__(self, self._mongocryptd_bypass_spawn = mongocryptd_bypass_spawn self._mongocryptd_spawn_path = mongocryptd_spawn_path if mongocryptd_spawn_args is None: - mongocryptd_spawn_args = ['--idleShutdownTimeoutSecs=60'] + mongocryptd_spawn_args = ["--idleShutdownTimeoutSecs=60"] self._mongocryptd_spawn_args = mongocryptd_spawn_args if not isinstance(self._mongocryptd_spawn_args, list): - raise TypeError('mongocryptd_spawn_args must be a list') - if not any('idleShutdownTimeoutSecs' in s - for s in self._mongocryptd_spawn_args): - self._mongocryptd_spawn_args.append('--idleShutdownTimeoutSecs=60') + raise TypeError("mongocryptd_spawn_args must be a list") + if not any("idleShutdownTimeoutSecs" in s for s in self._mongocryptd_spawn_args): + self._mongocryptd_spawn_args.append("--idleShutdownTimeoutSecs=60") # Maps KMS provider name to a SSLContext. self._kms_ssl_contexts = _parse_kms_tls_options(kms_tls_options) diff --git a/pymongo/errors.py b/pymongo/errors.py index 89c45730c9..a98a5a7fb8 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -13,8 +13,7 @@ # limitations under the License. """Exceptions raised by PyMongo.""" -from typing import (Any, Iterable, List, Mapping, Optional, Sequence, Tuple, - Union) +from typing import Any, Iterable, List, Mapping, Optional, Sequence, Tuple, Union from bson.errors import * @@ -25,16 +24,15 @@ try: from ssl import CertificateError as _CertificateError except ImportError: + class _CertificateError(ValueError): # type: ignore pass class PyMongoError(Exception): """Base class for all PyMongo exceptions.""" - def __init__(self, - message: str = '', - error_labels: Optional[Iterable[str]] = None - ) -> None: + + def __init__(self, message: str = "", error_labels: Optional[Iterable[str]] = None) -> None: super(PyMongoError, self).__init__(message) self._message = message self._error_labels = set(error_labels or []) @@ -75,17 +73,17 @@ class AutoReconnect(ConnectionFailure): Subclass of :exc:`~pymongo.errors.ConnectionFailure`. """ + errors: Union[Mapping[str, Any], Sequence] details: Union[Mapping[str, Any], Sequence] - def __init__(self, - message: str = '', - errors: Optional[Union[Mapping[str, Any], Sequence]] = None + def __init__( + self, message: str = "", errors: Optional[Union[Mapping[str, Any], Sequence]] = None ) -> None: error_labels = None if errors is not None: if isinstance(errors, dict): - error_labels = errors.get('errorLabels') + error_labels = errors.get("errorLabels") super(AutoReconnect, self).__init__(message, error_labels) self.errors = self.details = errors or [] @@ -121,12 +119,13 @@ class NotPrimaryError(AutoReconnect): .. versionadded:: 3.12 """ - def __init__(self, - message: str = '', - errors: Optional[Union[Mapping[str, Any], List]] = None + + def __init__( + self, message: str = "", errors: Optional[Union[Mapping[str, Any], List]] = None ) -> None: super(NotPrimaryError, self).__init__( - _format_detailed_error(message, errors), errors=errors) + _format_detailed_error(message, errors), errors=errors + ) class ServerSelectionTimeoutError(AutoReconnect): @@ -143,8 +142,7 @@ class ServerSelectionTimeoutError(AutoReconnect): class ConfigurationError(PyMongoError): - """Raised when something is incorrectly configured. - """ + """Raised when something is incorrectly configured.""" class OperationFailure(PyMongoError): @@ -154,7 +152,8 @@ class OperationFailure(PyMongoError): The :attr:`details` attribute. """ - def __init__(self, + def __init__( + self, error: str, code: Optional[int] = None, details: Optional[Mapping[str, Any]] = None, @@ -162,9 +161,10 @@ def __init__(self, ) -> None: error_labels = None if details is not None: - error_labels = details.get('errorLabels') + error_labels = details.get("errorLabels") super(OperationFailure, self).__init__( - _format_detailed_error(error, details), error_labels=error_labels) + _format_detailed_error(error, details), error_labels=error_labels + ) self.__code = code self.__details = details self.__max_wire_version = max_wire_version @@ -175,8 +175,7 @@ def _max_wire_version(self): @property def code(self) -> Optional[int]: - """The error code returned by the server, if any. - """ + """The error code returned by the server, if any.""" return self.__code @property @@ -192,7 +191,6 @@ def details(self) -> Optional[Mapping[str, Any]]: return self.__details - class CursorNotFound(OperationFailure): """Raised while iterating query results if the cursor is invalidated on the server. @@ -245,17 +243,16 @@ class BulkWriteError(OperationFailure): .. versionadded:: 2.7 """ + details: Mapping[str, Any] def __init__(self, results: Mapping[str, Any]) -> None: - super(BulkWriteError, self).__init__( - "batch op errors occurred", 65, results) + super(BulkWriteError, self).__init__("batch op errors occurred", 65, results) def __reduce__(self) -> Tuple[Any, Any]: return self.__class__, (self.details,) - class InvalidOperation(PyMongoError): """Raised when a client attempts to perform an invalid operation.""" @@ -273,8 +270,8 @@ class InvalidURI(ConfigurationError): class DocumentTooLarge(InvalidDocument): - """Raised when an encoded document is too large for the connected server. - """ + """Raised when an encoded document is too large for the connected server.""" + pass @@ -298,6 +295,6 @@ def cause(self) -> Exception: class _OperationCancelled(AutoReconnect): - """Internal error raised when a socket operation is cancelled. - """ + """Internal error raised when a socket operation is cancelled.""" + pass diff --git a/pymongo/event_loggers.py b/pymongo/event_loggers.py index f0857f8f45..0b92d9fa2b 100644 --- a/pymongo/event_loggers.py +++ b/pymongo/event_loggers.py @@ -40,22 +40,29 @@ class CommandLogger(monitoring.CommandListener): logs them at the `INFO` severity level using :mod:`logging`. .. versionadded:: 3.11 """ + def started(self, event: monitoring.CommandStartedEvent) -> None: - logging.info("Command {0.command_name} with request id " - "{0.request_id} started on server " - "{0.connection_id}".format(event)) + logging.info( + "Command {0.command_name} with request id " + "{0.request_id} started on server " + "{0.connection_id}".format(event) + ) def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: - logging.info("Command {0.command_name} with request id " - "{0.request_id} on server {0.connection_id} " - "succeeded in {0.duration_micros} " - "microseconds".format(event)) + logging.info( + "Command {0.command_name} with request id " + "{0.request_id} on server {0.connection_id} " + "succeeded in {0.duration_micros} " + "microseconds".format(event) + ) def failed(self, event: monitoring.CommandFailedEvent) -> None: - logging.info("Command {0.command_name} with request id " - "{0.request_id} on server {0.connection_id} " - "failed in {0.duration_micros} " - "microseconds".format(event)) + logging.info( + "Command {0.command_name} with request id " + "{0.request_id} on server {0.connection_id} " + "failed in {0.duration_micros} " + "microseconds".format(event) + ) class ServerLogger(monitoring.ServerListener): @@ -68,9 +75,9 @@ class ServerLogger(monitoring.ServerListener): .. versionadded:: 3.11 """ + def opened(self, event: monitoring.ServerOpeningEvent) -> None: - logging.info("Server {0.server_address} added to topology " - "{0.topology_id}".format(event)) + logging.info("Server {0.server_address} added to topology " "{0.topology_id}".format(event)) def description_changed(self, event: monitoring.ServerDescriptionChangedEvent) -> None: previous_server_type = event.previous_description.server_type @@ -80,11 +87,13 @@ def description_changed(self, event: monitoring.ServerDescriptionChangedEvent) - logging.info( "Server {0.server_address} changed type from " "{0.previous_description.server_type_name} to " - "{0.new_description.server_type_name}".format(event)) + "{0.new_description.server_type_name}".format(event) + ) def closed(self, event: monitoring.ServerClosedEvent) -> None: - logging.warning("Server {0.server_address} removed from topology " - "{0.topology_id}".format(event)) + logging.warning( + "Server {0.server_address} removed from topology " "{0.topology_id}".format(event) + ) class HeartbeatLogger(monitoring.ServerHeartbeatListener): @@ -97,19 +106,22 @@ class HeartbeatLogger(monitoring.ServerHeartbeatListener): .. versionadded:: 3.11 """ + def started(self, event: monitoring.ServerHeartbeatStartedEvent) -> None: - logging.info("Heartbeat sent to server " - "{0.connection_id}".format(event)) + logging.info("Heartbeat sent to server " "{0.connection_id}".format(event)) def succeeded(self, event: monitoring.ServerHeartbeatSucceededEvent) -> None: # The reply.document attribute was added in PyMongo 3.4. - logging.info("Heartbeat to server {0.connection_id} " - "succeeded with reply " - "{0.reply.document}".format(event)) + logging.info( + "Heartbeat to server {0.connection_id} " + "succeeded with reply " + "{0.reply.document}".format(event) + ) def failed(self, event: monitoring.ServerHeartbeatFailedEvent) -> None: - logging.warning("Heartbeat to server {0.connection_id} " - "failed with error {0.reply}".format(event)) + logging.warning( + "Heartbeat to server {0.connection_id} " "failed with error {0.reply}".format(event) + ) class TopologyLogger(monitoring.TopologyListener): @@ -122,13 +134,14 @@ class TopologyLogger(monitoring.TopologyListener): .. versionadded:: 3.11 """ + def opened(self, event: monitoring.TopologyOpenedEvent) -> None: - logging.info("Topology with id {0.topology_id} " - "opened".format(event)) + logging.info("Topology with id {0.topology_id} " "opened".format(event)) def description_changed(self, event: monitoring.TopologyDescriptionChangedEvent) -> None: - logging.info("Topology description updated for " - "topology id {0.topology_id}".format(event)) + logging.info( + "Topology description updated for " "topology id {0.topology_id}".format(event) + ) previous_topology_type = event.previous_description.topology_type new_topology_type = event.new_description.topology_type if new_topology_type != previous_topology_type: @@ -136,7 +149,8 @@ def description_changed(self, event: monitoring.TopologyDescriptionChangedEvent) logging.info( "Topology {0.topology_id} changed type from " "{0.previous_description.topology_type_name} to " - "{0.new_description.topology_type_name}".format(event)) + "{0.new_description.topology_type_name}".format(event) + ) # The has_writable_server and has_readable_server methods # were added in PyMongo 3.4. if not event.new_description.has_writable_server(): @@ -145,8 +159,7 @@ def description_changed(self, event: monitoring.TopologyDescriptionChangedEvent) logging.warning("No readable servers available.") def closed(self, event: monitoring.TopologyClosedEvent) -> None: - logging.info("Topology with id {0.topology_id} " - "closed".format(event)) + logging.info("Topology with id {0.topology_id} " "closed".format(event)) class ConnectionPoolLogger(monitoring.ConnectionPoolListener): @@ -166,6 +179,7 @@ class ConnectionPoolLogger(monitoring.ConnectionPoolListener): .. versionadded:: 3.11 """ + def pool_created(self, event: monitoring.PoolCreatedEvent) -> None: logging.info("[pool {0.address}] pool created".format(event)) @@ -179,30 +193,41 @@ def pool_closed(self, event: monitoring.PoolClosedEvent) -> None: logging.info("[pool {0.address}] pool closed".format(event)) def connection_created(self, event: monitoring.ConnectionCreatedEvent) -> None: - logging.info("[pool {0.address}][conn #{0.connection_id}] " - "connection created".format(event)) + logging.info( + "[pool {0.address}][conn #{0.connection_id}] " "connection created".format(event) + ) def connection_ready(self, event: monitoring.ConnectionReadyEvent) -> None: - logging.info("[pool {0.address}][conn #{0.connection_id}] " - "connection setup succeeded".format(event)) + logging.info( + "[pool {0.address}][conn #{0.connection_id}] " + "connection setup succeeded".format(event) + ) def connection_closed(self, event: monitoring.ConnectionClosedEvent) -> None: - logging.info("[pool {0.address}][conn #{0.connection_id}] " - "connection closed, reason: " - "{0.reason}".format(event)) + logging.info( + "[pool {0.address}][conn #{0.connection_id}] " + "connection closed, reason: " + "{0.reason}".format(event) + ) - def connection_check_out_started(self, event: monitoring.ConnectionCheckOutStartedEvent) -> None: - logging.info("[pool {0.address}] connection check out " - "started".format(event)) + def connection_check_out_started( + self, event: monitoring.ConnectionCheckOutStartedEvent + ) -> None: + logging.info("[pool {0.address}] connection check out " "started".format(event)) def connection_check_out_failed(self, event: monitoring.ConnectionCheckOutFailedEvent) -> None: - logging.info("[pool {0.address}] connection check out " - "failed, reason: {0.reason}".format(event)) + logging.info( + "[pool {0.address}] connection check out " "failed, reason: {0.reason}".format(event) + ) def connection_checked_out(self, event: monitoring.ConnectionCheckedOutEvent) -> None: - logging.info("[pool {0.address}][conn #{0.connection_id}] " - "connection checked out of pool".format(event)) + logging.info( + "[pool {0.address}][conn #{0.connection_id}] " + "connection checked out of pool".format(event) + ) def connection_checked_in(self, event: monitoring.ConnectionCheckedInEvent) -> None: - logging.info("[pool {0.address}][conn #{0.connection_id}] " - "connection checked into pool".format(event)) + logging.info( + "[pool {0.address}][conn #{0.connection_id}] " + "connection checked into pool".format(event) + ) diff --git a/pymongo/hello.py b/pymongo/hello.py index ba09d80e32..92e9b426c0 100644 --- a/pymongo/hello.py +++ b/pymongo/hello.py @@ -26,36 +26,36 @@ class HelloCompat: - CMD = 'hello' - LEGACY_CMD = 'ismaster' - PRIMARY = 'isWritablePrimary' - LEGACY_PRIMARY = 'ismaster' - LEGACY_ERROR = 'not master' + CMD = "hello" + LEGACY_CMD = "ismaster" + PRIMARY = "isWritablePrimary" + LEGACY_PRIMARY = "ismaster" + LEGACY_ERROR = "not master" def _get_server_type(doc): """Determine the server type from a hello response.""" - if not doc.get('ok'): + if not doc.get("ok"): return SERVER_TYPE.Unknown - if doc.get('serviceId'): + if doc.get("serviceId"): return SERVER_TYPE.LoadBalancer - elif doc.get('isreplicaset'): + elif doc.get("isreplicaset"): return SERVER_TYPE.RSGhost - elif doc.get('setName'): - if doc.get('hidden'): + elif doc.get("setName"): + if doc.get("hidden"): return SERVER_TYPE.RSOther elif doc.get(HelloCompat.PRIMARY): return SERVER_TYPE.RSPrimary elif doc.get(HelloCompat.LEGACY_PRIMARY): return SERVER_TYPE.RSPrimary - elif doc.get('secondary'): + elif doc.get("secondary"): return SERVER_TYPE.RSSecondary - elif doc.get('arbiterOnly'): + elif doc.get("arbiterOnly"): return SERVER_TYPE.RSArbiter else: return SERVER_TYPE.RSOther - elif doc.get('msg') == 'isdbgrid': + elif doc.get("msg") == "isdbgrid": return SERVER_TYPE.Mongos else: return SERVER_TYPE.Standalone @@ -66,8 +66,8 @@ class Hello(Generic[_DocumentType]): .. versionadded:: 3.12 """ - __slots__ = ('_doc', '_server_type', '_is_writable', '_is_readable', - '_awaitable') + + __slots__ = ("_doc", "_server_type", "_is_writable", "_is_readable", "_awaitable") def __init__(self, doc: _DocumentType, awaitable: bool = False) -> None: self._server_type = _get_server_type(doc) @@ -76,11 +76,10 @@ def __init__(self, doc: _DocumentType, awaitable: bool = False) -> None: SERVER_TYPE.RSPrimary, SERVER_TYPE.Standalone, SERVER_TYPE.Mongos, - SERVER_TYPE.LoadBalancer) + SERVER_TYPE.LoadBalancer, + ) - self._is_readable = ( - self.server_type == SERVER_TYPE.RSSecondary - or self._is_writable) + self._is_readable = self.server_type == SERVER_TYPE.RSSecondary or self._is_writable self._awaitable = awaitable @property @@ -98,64 +97,70 @@ def server_type(self) -> int: @property def all_hosts(self) -> Set[Tuple[str, int]]: """List of hosts, passives, and arbiters known to this server.""" - return set(map(common.clean_node, itertools.chain( - self._doc.get('hosts', []), - self._doc.get('passives', []), - self._doc.get('arbiters', [])))) + return set( + map( + common.clean_node, + itertools.chain( + self._doc.get("hosts", []), + self._doc.get("passives", []), + self._doc.get("arbiters", []), + ), + ) + ) @property def tags(self) -> Mapping[str, Any]: """Replica set member tags or empty dict.""" - return self._doc.get('tags', {}) + return self._doc.get("tags", {}) @property def primary(self) -> Optional[Tuple[str, int]]: """This server's opinion about who the primary is, or None.""" - if self._doc.get('primary'): - return common.partition_node(self._doc['primary']) + if self._doc.get("primary"): + return common.partition_node(self._doc["primary"]) else: return None @property def replica_set_name(self) -> Optional[str]: """Replica set name or None.""" - return self._doc.get('setName') + return self._doc.get("setName") @property def max_bson_size(self) -> int: - return self._doc.get('maxBsonObjectSize', common.MAX_BSON_SIZE) + return self._doc.get("maxBsonObjectSize", common.MAX_BSON_SIZE) @property def max_message_size(self) -> int: - return self._doc.get('maxMessageSizeBytes', 2 * self.max_bson_size) + return self._doc.get("maxMessageSizeBytes", 2 * self.max_bson_size) @property def max_write_batch_size(self) -> int: - return self._doc.get('maxWriteBatchSize', common.MAX_WRITE_BATCH_SIZE) + return self._doc.get("maxWriteBatchSize", common.MAX_WRITE_BATCH_SIZE) @property def min_wire_version(self) -> int: - return self._doc.get('minWireVersion', common.MIN_WIRE_VERSION) + return self._doc.get("minWireVersion", common.MIN_WIRE_VERSION) @property def max_wire_version(self) -> int: - return self._doc.get('maxWireVersion', common.MAX_WIRE_VERSION) + return self._doc.get("maxWireVersion", common.MAX_WIRE_VERSION) @property def set_version(self) -> Optional[int]: - return self._doc.get('setVersion') + return self._doc.get("setVersion") @property def election_id(self) -> Optional[ObjectId]: - return self._doc.get('electionId') + return self._doc.get("electionId") @property def cluster_time(self) -> Optional[Mapping[str, Any]]: - return self._doc.get('$clusterTime') + return self._doc.get("$clusterTime") @property def logical_session_timeout_minutes(self) -> Optional[int]: - return self._doc.get('logicalSessionTimeoutMinutes') + return self._doc.get("logicalSessionTimeoutMinutes") @property def is_writable(self) -> bool: @@ -167,18 +172,18 @@ def is_readable(self) -> bool: @property def me(self) -> Optional[Tuple[str, int]]: - me = self._doc.get('me') + me = self._doc.get("me") if me: return common.clean_node(me) return None @property def last_write_date(self) -> Optional[datetime.datetime]: - return self._doc.get('lastWrite', {}).get('lastWriteDate') + return self._doc.get("lastWrite", {}).get("lastWriteDate") @property def compressors(self) -> Optional[List[str]]: - return self._doc.get('compression') + return self._doc.get("compression") @property def sasl_supported_mechs(self) -> List[str]: @@ -190,16 +195,16 @@ def sasl_supported_mechs(self) -> List[str]: ["SCRAM-SHA-1", "SCRAM-SHA-256"] """ - return self._doc.get('saslSupportedMechs', []) + return self._doc.get("saslSupportedMechs", []) @property def speculative_authenticate(self) -> Optional[Mapping[str, Any]]: """The speculativeAuthenticate field.""" - return self._doc.get('speculativeAuthenticate') + return self._doc.get("speculativeAuthenticate") @property def topology_version(self) -> Optional[Mapping[str, Any]]: - return self._doc.get('topologyVersion') + return self._doc.get("topologyVersion") @property def awaitable(self) -> bool: @@ -207,8 +212,8 @@ def awaitable(self) -> bool: @property def service_id(self) -> Optional[ObjectId]: - return self._doc.get('serviceId') + return self._doc.get("serviceId") @property def hello_ok(self) -> bool: - return self._doc.get('helloOk', False) + return self._doc.get("helloOk", False) diff --git a/pymongo/helpers.py b/pymongo/helpers.py index b2726dca6b..f12c1e1655 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -21,36 +21,51 @@ from bson.son import SON from pymongo import ASCENDING -from pymongo.errors import (CursorNotFound, DuplicateKeyError, - ExecutionTimeout, NotPrimaryError, - OperationFailure, WriteConcernError, WriteError, - WTimeoutError) +from pymongo.errors import ( + CursorNotFound, + DuplicateKeyError, + ExecutionTimeout, + NotPrimaryError, + OperationFailure, + WriteConcernError, + WriteError, + WTimeoutError, +) from pymongo.hello import HelloCompat # From the SDAM spec, the "node is shutting down" codes. -_SHUTDOWN_CODES = frozenset([ - 11600, # InterruptedAtShutdown - 91, # ShutdownInProgress -]) +_SHUTDOWN_CODES = frozenset( + [ + 11600, # InterruptedAtShutdown + 91, # ShutdownInProgress + ] +) # From the SDAM spec, the "not primary" error codes are combined with the # "node is recovering" error codes (of which the "node is shutting down" # errors are a subset). -_NOT_PRIMARY_CODES = frozenset([ - 10058, # LegacyNotPrimary <=3.2 "not primary" error code - 10107, # NotWritablePrimary - 13435, # NotPrimaryNoSecondaryOk - 11602, # InterruptedDueToReplStateChange - 13436, # NotPrimaryOrSecondary - 189, # PrimarySteppedDown -]) | _SHUTDOWN_CODES +_NOT_PRIMARY_CODES = ( + frozenset( + [ + 10058, # LegacyNotPrimary <=3.2 "not primary" error code + 10107, # NotWritablePrimary + 13435, # NotPrimaryNoSecondaryOk + 11602, # InterruptedDueToReplStateChange + 13436, # NotPrimaryOrSecondary + 189, # PrimarySteppedDown + ] + ) + | _SHUTDOWN_CODES +) # From the retryable writes spec. -_RETRYABLE_ERROR_CODES = _NOT_PRIMARY_CODES | frozenset([ - 7, # HostNotFound - 6, # HostUnreachable - 89, # NetworkTimeout - 9001, # SocketException - 262, # ExceededTimeLimit -]) +_RETRYABLE_ERROR_CODES = _NOT_PRIMARY_CODES | frozenset( + [ + 7, # HostNotFound + 6, # HostUnreachable + 89, # NetworkTimeout + 9001, # SocketException + 262, # ExceededTimeLimit + ] +) def _gen_index_name(keys): @@ -71,8 +86,9 @@ def _index_list(key_or_list, direction=None): if isinstance(key_or_list, abc.ItemsView): return list(key_or_list) elif not isinstance(key_or_list, (list, tuple)): - raise TypeError("if no direction is specified, " - "key_or_list must be an instance of list") + raise TypeError( + "if no direction is specified, " "key_or_list must be an instance of list" + ) return key_or_list @@ -82,44 +98,44 @@ def _index_document(index_list): Takes a list of (key, direction) pairs. """ if isinstance(index_list, abc.Mapping): - raise TypeError("passing a dict to sort/create_index/hint is not " - "allowed - use a list of tuples instead. did you " - "mean %r?" % list(index_list.items())) + raise TypeError( + "passing a dict to sort/create_index/hint is not " + "allowed - use a list of tuples instead. did you " + "mean %r?" % list(index_list.items()) + ) elif not isinstance(index_list, (list, tuple)): - raise TypeError("must use a list of (key, direction) pairs, " - "not: " + repr(index_list)) + raise TypeError("must use a list of (key, direction) pairs, " "not: " + repr(index_list)) if not len(index_list): raise ValueError("key_or_list must not be the empty list") index: SON[str, Any] = SON() for (key, value) in index_list: if not isinstance(key, str): - raise TypeError( - "first item in each key pair must be an instance of str") + raise TypeError("first item in each key pair must be an instance of str") if not isinstance(value, (str, int, abc.Mapping)): - raise TypeError("second item in each key pair must be 1, -1, " - "'2d', or another valid MongoDB index specifier.") + raise TypeError( + "second item in each key pair must be 1, -1, " + "'2d', or another valid MongoDB index specifier." + ) index[key] = value return index -def _check_command_response(response, max_wire_version, - allowable_errors=None, - parse_write_concern_error=False): - """Check the response to a command for errors. - """ +def _check_command_response( + response, max_wire_version, allowable_errors=None, parse_write_concern_error=False +): + """Check the response to a command for errors.""" if "ok" not in response: # Server didn't recognize our message as a command. - raise OperationFailure(response.get("$err"), - response.get("code"), - response, - max_wire_version) + raise OperationFailure( + response.get("$err"), response.get("code"), response, max_wire_version + ) - if parse_write_concern_error and 'writeConcernError' in response: + if parse_write_concern_error and "writeConcernError" in response: _error = response["writeConcernError"] _labels = response.get("errorLabels") if _labels: - _error.update({'errorLabels': _labels}) + _error.update({"errorLabels": _labels}) _raise_write_concern_error(_error) if response["ok"]: @@ -176,12 +192,10 @@ def _raise_last_write_error(write_errors): def _raise_write_concern_error(error): - if "errInfo" in error and error["errInfo"].get('wtimeout'): + if "errInfo" in error and error["errInfo"].get("wtimeout"): # Make sure we raise WTimeoutError - raise WTimeoutError( - error.get("errmsg"), error.get("code"), error) - raise WriteConcernError( - error.get("errmsg"), error.get("code"), error) + raise WTimeoutError(error.get("errmsg"), error.get("code"), error) + raise WriteConcernError(error.get("errmsg"), error.get("code"), error) def _get_wce_doc(result): @@ -197,8 +211,7 @@ def _get_wce_doc(result): def _check_write_command_response(result): - """Backward compatibility helper for write command error handling. - """ + """Backward compatibility helper for write command error handling.""" # Prefer write errors over write concern errors write_errors = result.get("writeErrors") if write_errors: @@ -223,12 +236,12 @@ def _fields_list_to_dict(fields, option_name): if isinstance(fields, (abc.Sequence, abc.Set)): if not all(isinstance(field, str) for field in fields): - raise TypeError("%s must be a list of key names, each an " - "instance of str" % (option_name,)) + raise TypeError( + "%s must be a list of key names, each an " "instance of str" % (option_name,) + ) return dict.fromkeys(fields, 1) - raise TypeError("%s must be a mapping or " - "list of key names" % (option_name,)) + raise TypeError("%s must be a mapping or " "list of key names" % (option_name,)) def _handle_exception(): @@ -240,8 +253,7 @@ def _handle_exception(): if sys.stderr: einfo = sys.exc_info() try: - traceback.print_exception(einfo[0], einfo[1], einfo[2], - None, sys.stderr) + traceback.print_exception(einfo[0], einfo[1], einfo[2], None, sys.stderr) except IOError: pass finally: diff --git a/pymongo/max_staleness_selectors.py b/pymongo/max_staleness_selectors.py index 6bc2fe7232..28b0bb615e 100644 --- a/pymongo/max_staleness_selectors.py +++ b/pymongo/max_staleness_selectors.py @@ -30,28 +30,27 @@ from pymongo.errors import ConfigurationError from pymongo.server_type import SERVER_TYPE - # Constant defined in Max Staleness Spec: An idle primary writes a no-op every # 10 seconds to refresh secondaries' lastWriteDate values. IDLE_WRITE_PERIOD = 10 SMALLEST_MAX_STALENESS = 90 -def _validate_max_staleness(max_staleness, - heartbeat_frequency): +def _validate_max_staleness(max_staleness, heartbeat_frequency): # We checked for max staleness -1 before this, it must be positive here. if max_staleness < heartbeat_frequency + IDLE_WRITE_PERIOD: raise ConfigurationError( "maxStalenessSeconds must be at least heartbeatFrequencyMS +" " %d seconds. maxStalenessSeconds is set to %d," - " heartbeatFrequencyMS is set to %d." % ( - IDLE_WRITE_PERIOD, max_staleness, heartbeat_frequency * 1000)) + " heartbeatFrequencyMS is set to %d." + % (IDLE_WRITE_PERIOD, max_staleness, heartbeat_frequency * 1000) + ) if max_staleness < SMALLEST_MAX_STALENESS: raise ConfigurationError( "maxStalenessSeconds must be at least %d. " - "maxStalenessSeconds is set to %d." % ( - SMALLEST_MAX_STALENESS, max_staleness)) + "maxStalenessSeconds is set to %d." % (SMALLEST_MAX_STALENESS, max_staleness) + ) def _with_primary(max_staleness, selection): @@ -63,9 +62,10 @@ def _with_primary(max_staleness, selection): if s.server_type == SERVER_TYPE.RSSecondary: # See max-staleness.rst for explanation of this formula. staleness = ( - (s.last_update_time - s.last_write_date) - - (primary.last_update_time - primary.last_write_date) + - selection.heartbeat_frequency) + (s.last_update_time - s.last_write_date) + - (primary.last_update_time - primary.last_write_date) + + selection.heartbeat_frequency + ) if staleness <= max_staleness: sds.append(s) @@ -88,9 +88,7 @@ def _no_primary(max_staleness, selection): for s in selection.server_descriptions: if s.server_type == SERVER_TYPE.RSSecondary: # See max-staleness.rst for explanation of this formula. - staleness = (smax.last_write_date - - s.last_write_date + - selection.heartbeat_frequency) + staleness = smax.last_write_date - s.last_write_date + selection.heartbeat_frequency if staleness <= max_staleness: sds.append(s) diff --git a/pymongo/message.py b/pymongo/message.py index ac6000cfd2..18cf0a6bf3 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -27,25 +27,27 @@ from typing import Any import bson -from bson import (CodecOptions, - encode, - _decode_selective, - _dict_to_bson, - _make_c_string) +from bson import CodecOptions, _decode_selective, _dict_to_bson, _make_c_string, encode from bson.int64 import Int64 -from bson.raw_bson import (DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, - _inflate_bson) +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson from bson.son import SON try: from pymongo import _cmessage # type: ignore[attr-defined] + _use_c = True except ImportError: _use_c = False -from pymongo.errors import (ConfigurationError, CursorNotFound, - DocumentTooLarge, ExecutionTimeout, - InvalidOperation, NotPrimaryError, - OperationFailure, ProtocolError) +from pymongo.errors import ( + ConfigurationError, + CursorNotFound, + DocumentTooLarge, + ExecutionTimeout, + InvalidOperation, + NotPrimaryError, + OperationFailure, + ProtocolError, +) from pymongo.hello import HelloCompat from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -60,26 +62,21 @@ _UPDATE = 1 _DELETE = 2 -_EMPTY = b'' -_BSONOBJ = b'\x03' -_ZERO_8 = b'\x00' -_ZERO_16 = b'\x00\x00' -_ZERO_32 = b'\x00\x00\x00\x00' -_ZERO_64 = b'\x00\x00\x00\x00\x00\x00\x00\x00' -_SKIPLIM = b'\x00\x00\x00\x00\xff\xff\xff\xff' +_EMPTY = b"" +_BSONOBJ = b"\x03" +_ZERO_8 = b"\x00" +_ZERO_16 = b"\x00\x00" +_ZERO_32 = b"\x00\x00\x00\x00" +_ZERO_64 = b"\x00\x00\x00\x00\x00\x00\x00\x00" +_SKIPLIM = b"\x00\x00\x00\x00\xff\xff\xff\xff" _OP_MAP = { - _INSERT: b'\x04documents\x00\x00\x00\x00\x00', - _UPDATE: b'\x04updates\x00\x00\x00\x00\x00', - _DELETE: b'\x04deletes\x00\x00\x00\x00\x00', -} -_FIELD_MAP = { - 'insert': 'documents', - 'update': 'updates', - 'delete': 'deletes' + _INSERT: b"\x04documents\x00\x00\x00\x00\x00", + _UPDATE: b"\x04updates\x00\x00\x00\x00\x00", + _DELETE: b"\x04deletes\x00\x00\x00\x00\x00", } +_FIELD_MAP = {"insert": "documents", "update": "updates", "delete": "deletes"} -_UNICODE_REPLACE_CODEC_OPTIONS = CodecOptions( - unicode_decode_error_handler='replace') +_UNICODE_REPLACE_CODEC_OPTIONS = CodecOptions(unicode_decode_error_handler="replace") def _randint(): @@ -96,9 +93,7 @@ def _maybe_add_read_preference(spec, read_preference): # for maximum backwards compatibility, don't add $readPreference for # secondaryPreferred unless tags or maxStalenessSeconds are in use (setting # the secondaryOkay bit has the same effect). - if mode and ( - mode != ReadPreference.SECONDARY_PREFERRED.mode or - len(document) > 1): + if mode and (mode != ReadPreference.SECONDARY_PREFERRED.mode or len(document) > 1): if "$query" not in spec: spec = SON([("$query", spec)]) spec["$readPreference"] = document @@ -107,8 +102,7 @@ def _maybe_add_read_preference(spec, read_preference): def _convert_exception(exception): """Convert an Exception into a failure document for publishing.""" - return {'errmsg': str(exception), - 'errtype': exception.__class__.__name__} + return {"errmsg": str(exception), "errtype": exception.__class__.__name__} def _convert_write_result(operation, command, result): @@ -121,21 +115,17 @@ def _convert_write_result(operation, command, result): if errmsg: # The write was successful on at least the primary so don't return. if result.get("wtimeout"): - res["writeConcernError"] = {"errmsg": errmsg, - "code": 64, - "errInfo": {"wtimeout": True}} + res["writeConcernError"] = {"errmsg": errmsg, "code": 64, "errInfo": {"wtimeout": True}} else: # The write failed. - error = {"index": 0, - "code": result.get("code", 8), - "errmsg": errmsg} + error = {"index": 0, "code": result.get("code", 8), "errmsg": errmsg} if "errInfo" in result: error["errInfo"] = result["errInfo"] res["writeErrors"] = [error] return res if operation == "insert": # GLE result for insert is always 0 in most MongoDB versions. - res["n"] = len(command['documents']) + res["n"] = len(command["documents"]) elif operation == "update": if "upserted" in result: res["upserted"] = [{"index": 0, "_id": result["upserted"]}] @@ -144,102 +134,149 @@ def _convert_write_result(operation, command, result): elif result.get("updatedExisting") is False and affected == 1: # If _id is in both the update document *and* the query spec # the update document _id takes precedence. - update = command['updates'][0] + update = command["updates"][0] _id = update["u"].get("_id", update["q"].get("_id")) res["upserted"] = [{"index": 0, "_id": _id}] return res -_OPTIONS = SON([ - ('tailable', 2), - ('oplogReplay', 8), - ('noCursorTimeout', 16), - ('awaitData', 32), - ('allowPartialResults', 128)]) - - -_MODIFIERS = SON([ - ('$query', 'filter'), - ('$orderby', 'sort'), - ('$hint', 'hint'), - ('$comment', 'comment'), - ('$maxScan', 'maxScan'), - ('$maxTimeMS', 'maxTimeMS'), - ('$max', 'max'), - ('$min', 'min'), - ('$returnKey', 'returnKey'), - ('$showRecordId', 'showRecordId'), - ('$showDiskLoc', 'showRecordId'), # <= MongoDb 3.0 - ('$snapshot', 'snapshot')]) - - -def _gen_find_command(coll, spec, projection, skip, limit, batch_size, options, - read_concern, collation=None, session=None, - allow_disk_use=None): +_OPTIONS = SON( + [ + ("tailable", 2), + ("oplogReplay", 8), + ("noCursorTimeout", 16), + ("awaitData", 32), + ("allowPartialResults", 128), + ] +) + + +_MODIFIERS = SON( + [ + ("$query", "filter"), + ("$orderby", "sort"), + ("$hint", "hint"), + ("$comment", "comment"), + ("$maxScan", "maxScan"), + ("$maxTimeMS", "maxTimeMS"), + ("$max", "max"), + ("$min", "min"), + ("$returnKey", "returnKey"), + ("$showRecordId", "showRecordId"), + ("$showDiskLoc", "showRecordId"), # <= MongoDb 3.0 + ("$snapshot", "snapshot"), + ] +) + + +def _gen_find_command( + coll, + spec, + projection, + skip, + limit, + batch_size, + options, + read_concern, + collation=None, + session=None, + allow_disk_use=None, +): """Generate a find command document.""" - cmd = SON([('find', coll)]) - if '$query' in spec: - cmd.update([(_MODIFIERS[key], val) if key in _MODIFIERS else (key, val) - for key, val in spec.items()]) - if '$explain' in cmd: - cmd.pop('$explain') - if '$readPreference' in cmd: - cmd.pop('$readPreference') + cmd = SON([("find", coll)]) + if "$query" in spec: + cmd.update( + [ + (_MODIFIERS[key], val) if key in _MODIFIERS else (key, val) + for key, val in spec.items() + ] + ) + if "$explain" in cmd: + cmd.pop("$explain") + if "$readPreference" in cmd: + cmd.pop("$readPreference") else: - cmd['filter'] = spec + cmd["filter"] = spec if projection: - cmd['projection'] = projection + cmd["projection"] = projection if skip: - cmd['skip'] = skip + cmd["skip"] = skip if limit: - cmd['limit'] = abs(limit) + cmd["limit"] = abs(limit) if limit < 0: - cmd['singleBatch'] = True + cmd["singleBatch"] = True if batch_size: - cmd['batchSize'] = batch_size + cmd["batchSize"] = batch_size if read_concern.level and not (session and session.in_transaction): - cmd['readConcern'] = read_concern.document + cmd["readConcern"] = read_concern.document if collation: - cmd['collation'] = collation + cmd["collation"] = collation if allow_disk_use is not None: - cmd['allowDiskUse'] = allow_disk_use + cmd["allowDiskUse"] = allow_disk_use if options: - cmd.update([(opt, True) - for opt, val in _OPTIONS.items() - if options & val]) + cmd.update([(opt, True) for opt, val in _OPTIONS.items() if options & val]) return cmd def _gen_get_more_command(cursor_id, coll, batch_size, max_await_time_ms): """Generate a getMore command document.""" - cmd = SON([('getMore', cursor_id), - ('collection', coll)]) + cmd = SON([("getMore", cursor_id), ("collection", coll)]) if batch_size: - cmd['batchSize'] = batch_size + cmd["batchSize"] = batch_size if max_await_time_ms is not None: - cmd['maxTimeMS'] = max_await_time_ms + cmd["maxTimeMS"] = max_await_time_ms return cmd class _Query(object): """A query operation.""" - __slots__ = ('flags', 'db', 'coll', 'ntoskip', 'spec', - 'fields', 'codec_options', 'read_preference', 'limit', - 'batch_size', 'name', 'read_concern', 'collation', - 'session', 'client', 'allow_disk_use', '_as_command', - 'exhaust') + __slots__ = ( + "flags", + "db", + "coll", + "ntoskip", + "spec", + "fields", + "codec_options", + "read_preference", + "limit", + "batch_size", + "name", + "read_concern", + "collation", + "session", + "client", + "allow_disk_use", + "_as_command", + "exhaust", + ) # For compatibility with the _GetMore class. sock_mgr = None cursor_id = None - def __init__(self, flags, db, coll, ntoskip, spec, fields, - codec_options, read_preference, limit, - batch_size, read_concern, collation, session, client, - allow_disk_use, exhaust): + def __init__( + self, + flags, + db, + coll, + ntoskip, + spec, + fields, + codec_options, + read_preference, + limit, + batch_size, + read_concern, + collation, + session, + client, + allow_disk_use, + exhaust, + ): self.flags = flags self.db = db self.coll = coll @@ -255,7 +292,7 @@ def __init__(self, flags, db, coll, ntoskip, spec, fields, self.session = session self.client = client self.allow_disk_use = allow_disk_use - self.name = 'find' + self.name = "find" self._as_command = None self.exhaust = exhaust @@ -271,10 +308,10 @@ def use_command(self, sock_info): use_find_cmd = True elif not self.read_concern.ok_for_legacy: raise ConfigurationError( - 'read concern level of %s is not valid ' - 'with a max wire version of %d.' - % (self.read_concern.level, - sock_info.max_wire_version)) + "read concern level of %s is not valid " + "with a max wire version of %d." + % (self.read_concern.level, sock_info.max_wire_version) + ) sock_info.validate_session(self.client, self.session) return use_find_cmd @@ -286,14 +323,23 @@ def as_command(self, sock_info): if self._as_command is not None: return self._as_command - explain = '$explain' in self.spec + explain = "$explain" in self.spec cmd = _gen_find_command( - self.coll, self.spec, self.fields, self.ntoskip, - self.limit, self.batch_size, self.flags, self.read_concern, - self.collation, self.session, self.allow_disk_use) + self.coll, + self.spec, + self.fields, + self.ntoskip, + self.limit, + self.batch_size, + self.flags, + self.read_concern, + self.collation, + self.session, + self.allow_disk_use, + ) if explain: - self.name = 'explain' - cmd = SON([('explain', cmd)]) + self.name = "explain" + cmd = SON([("explain", cmd)]) session = self.session sock_info.add_server_api(cmd) if session: @@ -323,8 +369,13 @@ def get_message(self, read_preference, sock_info, use_cmd=False): if use_cmd: spec = self.as_command(sock_info)[0] request_id, msg, size, _ = _op_msg( - 0, spec, self.db, read_preference, self.codec_options, - ctx=sock_info.compression_context) + 0, + spec, + self.db, + read_preference, + self.codec_options, + ctx=sock_info.compression_context, + ) return request_id, msg, size # OP_QUERY treats ntoreturn of -1 and 1 the same, return @@ -340,23 +391,52 @@ def get_message(self, read_preference, sock_info, use_cmd=False): if sock_info.is_mongos: spec = _maybe_add_read_preference(spec, read_preference) - return _query(flags, ns, self.ntoskip, ntoreturn, - spec, None if use_cmd else self.fields, - self.codec_options, ctx=sock_info.compression_context) + return _query( + flags, + ns, + self.ntoskip, + ntoreturn, + spec, + None if use_cmd else self.fields, + self.codec_options, + ctx=sock_info.compression_context, + ) class _GetMore(object): """A getmore operation.""" - __slots__ = ('db', 'coll', 'ntoreturn', 'cursor_id', 'max_await_time_ms', - 'codec_options', 'read_preference', 'session', 'client', - 'sock_mgr', '_as_command', 'exhaust') - - name = 'getMore' - - def __init__(self, db, coll, ntoreturn, cursor_id, codec_options, - read_preference, session, client, max_await_time_ms, - sock_mgr, exhaust): + __slots__ = ( + "db", + "coll", + "ntoreturn", + "cursor_id", + "max_await_time_ms", + "codec_options", + "read_preference", + "session", + "client", + "sock_mgr", + "_as_command", + "exhaust", + ) + + name = "getMore" + + def __init__( + self, + db, + coll, + ntoreturn, + cursor_id, + codec_options, + read_preference, + session, + client, + max_await_time_ms, + sock_mgr, + exhaust, + ): self.db = db self.coll = coll self.ntoreturn = ntoreturn @@ -390,9 +470,9 @@ def as_command(self, sock_info): if self._as_command is not None: return self._as_command - cmd = _gen_get_more_command(self.cursor_id, self.coll, - self.ntoreturn, - self.max_await_time_ms) + cmd = _gen_get_more_command( + self.cursor_id, self.coll, self.ntoreturn, self.max_await_time_ms + ) if self.session: self.session._apply_to(cmd, False, self.read_preference, sock_info) @@ -418,8 +498,8 @@ def get_message(self, dummy0, sock_info, use_cmd=False): else: flags = 0 request_id, msg, size, _ = _op_msg( - flags, spec, self.db, None, self.codec_options, - ctx=sock_info.compression_context) + flags, spec, self.db, None, self.codec_options, ctx=sock_info.compression_context + ) return request_id, msg, size return _get_more(ns, self.ntoreturn, self.cursor_id, ctx) @@ -451,6 +531,7 @@ def use_command(self, sock_info): class _CursorAddress(tuple): """The server address (host, port) of a cursor, with namespace property.""" + __namespace: Any def __new__(cls, address, namespace): @@ -470,8 +551,7 @@ def __hash__(self): def __eq__(self, other): if isinstance(other, _CursorAddress): - return (tuple(self) == tuple(other) - and self.namespace == other.namespace) + return tuple(self) == tuple(other) and self.namespace == other.namespace return NotImplemented def __ne__(self, other): @@ -481,19 +561,21 @@ def __ne__(self, other): _pack_compression_header = struct.Struct(" max_message_size)) + doc_too_large = idx == 0 and (new_message_size > max_message_size) # When OP_MSG is used unacknowleged we have to check # document size client side or applications won't be notified. # Otherwise we let the server deal with documents that are too large # since ordered=False causes those documents to be skipped instead of # halting the bulk write operation. - unacked_doc_too_large = (not ack and (doc_length > max_bson_size)) + unacked_doc_too_large = not ack and (doc_length > max_bson_size) if doc_too_large or unacked_doc_too_large: write_op = list(_FIELD_MAP.keys())[operation] - _raise_document_too_large( - write_op, len(value), max_bson_size) + _raise_document_too_large(write_op, len(value), max_bson_size) # We have enough data, return this batch. if new_message_size > max_message_size: break @@ -966,37 +1074,31 @@ def _batched_op_msg_impl( return to_send, length -def _encode_batched_op_msg( - operation, command, docs, ack, opts, ctx): +def _encode_batched_op_msg(operation, command, docs, ack, opts, ctx): """Encode the next batched insert, update, or delete operation as OP_MSG. """ buf = _BytesIO() - to_send, _ = _batched_op_msg_impl( - operation, command, docs, ack, opts, ctx, buf) + to_send, _ = _batched_op_msg_impl(operation, command, docs, ack, opts, ctx, buf) return buf.getvalue(), to_send + + if _use_c: _encode_batched_op_msg = _cmessage._encode_batched_op_msg -def _batched_op_msg_compressed( - operation, command, docs, ack, opts, ctx): +def _batched_op_msg_compressed(operation, command, docs, ack, opts, ctx): """Create the next batched insert, update, or delete operation with OP_MSG, compressed. """ - data, to_send = _encode_batched_op_msg( - operation, command, docs, ack, opts, ctx) + data, to_send = _encode_batched_op_msg(operation, command, docs, ack, opts, ctx) - request_id, msg = _compress( - 2013, - data, - ctx.sock_info.compression_context) + request_id, msg = _compress(2013, data, ctx.sock_info.compression_context) return request_id, msg, to_send -def _batched_op_msg( - operation, command, docs, ack, opts, ctx): +def _batched_op_msg(operation, command, docs, ack, opts, ctx): """OP_MSG implementation entry point.""" buf = _BytesIO() @@ -1005,8 +1107,7 @@ def _batched_op_msg( # responseTo, opCode buf.write(b"\x00\x00\x00\x00\xdd\x07\x00\x00") - to_send, length = _batched_op_msg_impl( - operation, command, docs, ack, opts, ctx, buf) + to_send, length = _batched_op_msg_impl(operation, command, docs, ack, opts, ctx, buf) # Header - request id and message length buf.seek(4) @@ -1016,45 +1117,42 @@ def _batched_op_msg( buf.write(_pack_int(length)) return request_id, buf.getvalue(), to_send + + if _use_c: _batched_op_msg = _cmessage._batched_op_msg -def _do_batched_op_msg( - namespace, operation, command, docs, opts, ctx): +def _do_batched_op_msg(namespace, operation, command, docs, opts, ctx): """Create the next batched insert, update, or delete operation using OP_MSG. """ - command['$db'] = namespace.split('.', 1)[0] - if 'writeConcern' in command: - ack = bool(command['writeConcern'].get('w', 1)) + command["$db"] = namespace.split(".", 1)[0] + if "writeConcern" in command: + ack = bool(command["writeConcern"].get("w", 1)) else: ack = True if ctx.sock_info.compression_context: - return _batched_op_msg_compressed( - operation, command, docs, ack, opts, ctx) - return _batched_op_msg( - operation, command, docs, ack, opts, ctx) + return _batched_op_msg_compressed(operation, command, docs, ack, opts, ctx) + return _batched_op_msg(operation, command, docs, ack, opts, ctx) # End OP_MSG ----------------------------------------------------- -def _encode_batched_write_command( - namespace, operation, command, docs, opts, ctx): - """Encode the next batched insert, update, or delete command. - """ +def _encode_batched_write_command(namespace, operation, command, docs, opts, ctx): + """Encode the next batched insert, update, or delete command.""" buf = _BytesIO() - to_send, _ = _batched_write_command_impl( - namespace, operation, command, docs, opts, ctx, buf) + to_send, _ = _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, buf) return buf.getvalue(), to_send + + if _use_c: _encode_batched_write_command = _cmessage._encode_batched_write_command -def _batched_write_command_impl( - namespace, operation, command, docs, opts, ctx, buf): +def _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, buf): """Create a batched OP_QUERY write command.""" max_bson_size = ctx.max_bson_size max_write_batch_size = ctx.max_write_batch_size @@ -1066,7 +1164,7 @@ def _batched_write_command_impl( # No options buf.write(_ZERO_32) # Namespace as C string - buf.write(namespace.encode('utf8')) + buf.write(namespace.encode("utf8")) buf.write(_ZERO_8) # Skip: 0, Limit: -1 buf.write(_SKIPLIM) @@ -1082,7 +1180,7 @@ def _batched_write_command_impl( try: buf.write(_OP_MAP[operation]) except KeyError: - raise InvalidOperation('Unknown command') + raise InvalidOperation("Unknown command") # Where to write list document length list_start = buf.tell() - 4 @@ -1090,18 +1188,16 @@ def _batched_write_command_impl( idx = 0 for doc in docs: # Encode the current operation - key = str(idx).encode('utf8') + key = str(idx).encode("utf8") value = _dict_to_bson(doc, False, opts) # Is there enough room to add this document? max_cmd_size accounts for # the two trailing null bytes. doc_too_large = len(value) > max_cmd_size if doc_too_large: write_op = list(_FIELD_MAP.keys())[operation] - _raise_document_too_large( - write_op, len(value), max_bson_size) - enough_data = (idx >= 1 and - (buf.tell() + len(key) + len(value)) >= max_split_size) - enough_documents = (idx >= max_write_batch_size) + _raise_document_too_large(write_op, len(value), max_bson_size) + enough_data = idx >= 1 and (buf.tell() + len(key) + len(value)) >= max_split_size + enough_documents = idx >= max_write_batch_size if enough_data or enough_documents: break buf.write(_BSONOBJ) @@ -1170,20 +1266,25 @@ def raw_response(self, cursor_id=None, user_fields=None): raise NotPrimaryError(error_object["$err"], error_object) elif error_object.get("code") == 50: default_msg = "operation exceeded time limit" - raise ExecutionTimeout(error_object.get("$err", default_msg), - error_object.get("code"), - error_object) - raise OperationFailure("database error: %s" % - error_object.get("$err"), - error_object.get("code"), - error_object) + raise ExecutionTimeout( + error_object.get("$err", default_msg), error_object.get("code"), error_object + ) + raise OperationFailure( + "database error: %s" % error_object.get("$err"), + error_object.get("code"), + error_object, + ) if self.documents: return [self.documents] return [] - def unpack_response(self, cursor_id=None, - codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, - user_fields=None, legacy_response=False): + def unpack_response( + self, + cursor_id=None, + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + user_fields=None, + legacy_response=False, + ): """Unpack a response from the database and decode the BSON document(s). Check the response for errors and unpack, returning a dictionary @@ -1202,8 +1303,7 @@ def unpack_response(self, cursor_id=None, self.raw_response(cursor_id) if legacy_response: return bson.decode_all(self.documents, codec_options) - return bson._decode_all_selective( - self.documents, codec_options, user_fields) + return bson._decode_all_selective(self.documents, codec_options, user_fields) def command_response(self, codec_options): """Unpack a command response.""" @@ -1254,13 +1354,17 @@ def raw_response(self, cursor_id=None, user_fields={}): user_fields is used to determine which fields must not be decoded """ inflated_response = _decode_selective( - RawBSONDocument(self.payload_document), user_fields, - DEFAULT_RAW_BSON_OPTIONS) + RawBSONDocument(self.payload_document), user_fields, DEFAULT_RAW_BSON_OPTIONS + ) return [inflated_response] - def unpack_response(self, cursor_id=None, - codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, - user_fields=None, legacy_response=False): + def unpack_response( + self, + cursor_id=None, + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + user_fields=None, + legacy_response=False, + ): """Unpack a OP_MSG command response. :Parameters: @@ -1270,8 +1374,7 @@ def unpack_response(self, cursor_id=None, """ # If _OpMsg is in-use, this cannot be a legacy response. assert not legacy_response - return bson._decode_all_selective( - self.payload_document, codec_options, user_fields) + return bson._decode_all_selective(self.payload_document, codec_options, user_fields) def command_response(self, codec_options): """Unpack a command response.""" @@ -1292,17 +1395,12 @@ def unpack(cls, msg): flags, first_payload_type, first_payload_size = cls.UNPACK_FROM(msg) if flags != 0: if flags & cls.CHECKSUM_PRESENT: - raise ProtocolError( - "Unsupported OP_MSG flag checksumPresent: " - "0x%x" % (flags,)) + raise ProtocolError("Unsupported OP_MSG flag checksumPresent: " "0x%x" % (flags,)) if flags ^ cls.MORE_TO_COME: - raise ProtocolError( - "Unsupported OP_MSG flags: 0x%x" % (flags,)) + raise ProtocolError("Unsupported OP_MSG flags: 0x%x" % (flags,)) if first_payload_type != 0: - raise ProtocolError( - "Unsupported OP_MSG payload type: " - "0x%x" % (first_payload_type,)) + raise ProtocolError("Unsupported OP_MSG payload type: " "0x%x" % (first_payload_type,)) if len(msg) != first_payload_size + 5: raise ProtocolError("Unsupported OP_MSG reply: >1 section") diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 975fc87610..3fa2946c7c 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -35,23 +35,50 @@ import threading import weakref from collections import defaultdict -from typing import (TYPE_CHECKING, Any, Dict, FrozenSet, Generic, List, - Mapping, Optional, Sequence, Set, Tuple, Type, Union, cast) +from typing import ( + TYPE_CHECKING, + Any, + Dict, + FrozenSet, + Generic, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, + Type, + Union, + cast, +) import bson -from bson.codec_options import (DEFAULT_CODEC_OPTIONS, CodecOptions, - TypeRegistry) +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry from bson.son import SON from bson.timestamp import Timestamp -from pymongo import (client_session, common, database, helpers, message, - periodic_executor, uri_parser) +from pymongo import ( + client_session, + common, + database, + helpers, + message, + periodic_executor, + uri_parser, +) from pymongo.change_stream import ChangeStream, ClusterChangeStream from pymongo.client_options import ClientOptions from pymongo.command_cursor import CommandCursor -from pymongo.errors import (AutoReconnect, BulkWriteError, ConfigurationError, - ConnectionFailure, InvalidOperation, - NotPrimaryError, OperationFailure, PyMongoError, - ServerSelectionTimeoutError) +from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NotPrimaryError, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, +) from pymongo.pool import ConnectionClosedReason from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.server_selectors import writable_server_selector @@ -60,8 +87,12 @@ from pymongo.topology import Topology, _ErrorContext from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription from pymongo.typings import _CollationIn, _DocumentType, _Pipeline -from pymongo.uri_parser import (_check_options, _handle_option_deprecations, - _handle_security_options, _normalize_options) +from pymongo.uri_parser import ( + _check_options, + _handle_option_deprecations, + _handle_security_options, + _normalize_options, +) from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern if TYPE_CHECKING: @@ -78,13 +109,15 @@ class MongoClient(common.BaseObject, Generic[_DocumentType]): resources related to this, including background threads for monitoring, and connection pools. """ + HOST = "localhost" PORT = 27017 # Define order to retrieve options from ClientOptions for __repr__. # No host/port; these are retrieved from TopologySettings. - _constructor_args = ('document_class', 'tz_aware', 'connect') + _constructor_args = ("document_class", "tz_aware", "connect") - def __init__(self, + def __init__( + self, host: Optional[Union[str, Sequence[str]]] = None, port: Optional[int] = None, document_class: Type[_DocumentType] = dict, @@ -616,13 +649,15 @@ def __init__(self, client.__my_database__ """ - self.__init_kwargs: Dict[str, Any] = {'host': host, - 'port': port, - 'document_class': document_class, - 'tz_aware': tz_aware, - 'connect': connect, - 'type_registry': type_registry, - **kwargs} + self.__init_kwargs: Dict[str, Any] = { + "host": host, + "port": port, + "document_class": document_class, + "tz_aware": tz_aware, + "connect": connect, + "type_registry": type_registry, + **kwargs, + } if host is None: host = self.HOST @@ -635,13 +670,13 @@ def __init__(self, # _pool_class, _monitor_class, and _condition_class are for deep # customization of PyMongo, e.g. Motor. - pool_class = kwargs.pop('_pool_class', None) - monitor_class = kwargs.pop('_monitor_class', None) - condition_class = kwargs.pop('_condition_class', None) + pool_class = kwargs.pop("_pool_class", None) + monitor_class = kwargs.pop("_monitor_class", None) + condition_class = kwargs.pop("_condition_class", None) # Parse options passed as kwargs. keyword_opts = common._CaseInsensitiveDictionary(kwargs) - keyword_opts['document_class'] = document_class + keyword_opts["document_class"] = document_class seeds = set() username = None @@ -652,8 +687,7 @@ def __init__(self, srv_service_name = keyword_opts.get("srvservicename") srv_max_hosts = keyword_opts.get("srvmaxhosts") if len([h for h in host if "/" in h]) > 1: - raise ConfigurationError("host must not contain multiple MongoDB " - "URIs") + raise ConfigurationError("host must not contain multiple MongoDB " "URIs") for entity in host: # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' # it must be a URI, @@ -663,12 +697,18 @@ def __init__(self, timeout = keyword_opts.get("connecttimeoutms") if timeout is not None: timeout = common.validate_timeout_or_none_or_zero( - keyword_opts.cased_key("connecttimeoutms"), timeout) + keyword_opts.cased_key("connecttimeoutms"), timeout + ) res = uri_parser.parse_uri( - entity, port, validate=True, warn=True, normalize=False, + entity, + port, + validate=True, + warn=True, + normalize=False, connect_timeout=timeout, srv_service_name=srv_service_name, - srv_max_hosts=srv_max_hosts) + srv_max_hosts=srv_max_hosts, + ) seeds.update(res["nodelist"]) username = res["username"] or username password = res["password"] or password @@ -682,19 +722,20 @@ def __init__(self, # Add options with named keyword arguments to the parsed kwarg options. if type_registry is not None: - keyword_opts['type_registry'] = type_registry + keyword_opts["type_registry"] = type_registry if tz_aware is None: - tz_aware = opts.get('tz_aware', False) + tz_aware = opts.get("tz_aware", False) if connect is None: - connect = opts.get('connect', True) - keyword_opts['tz_aware'] = tz_aware - keyword_opts['connect'] = connect + connect = opts.get("connect", True) + keyword_opts["tz_aware"] = tz_aware + keyword_opts["connect"] = connect # Handle deprecated options in kwarg options. keyword_opts = _handle_option_deprecations(keyword_opts) # Validate kwarg options. - keyword_opts = common._CaseInsensitiveDictionary(dict(common.validate( - keyword_opts.cased_key(k), v) for k, v in keyword_opts.items())) + keyword_opts = common._CaseInsensitiveDictionary( + dict(common.validate(keyword_opts.cased_key(k), v) for k, v in keyword_opts.items()) + ) # Override connection string options with kwarg options. opts.update(keyword_opts) @@ -712,18 +753,19 @@ def __init__(self, # Username and password passed as kwargs override user info in URI. username = opts.get("username", username) password = opts.get("password", password) - self.__options = options = ClientOptions( - username, password, dbase, opts) + self.__options = options = ClientOptions(username, password, dbase, opts) self.__default_database_name = dbase self.__lock = threading.Lock() self.__kill_cursors_queue: List = [] self._event_listeners = options.pool_options._event_listeners - super(MongoClient, self).__init__(options.codec_options, - options.read_preference, - options.write_concern, - options.read_concern) + super(MongoClient, self).__init__( + options.codec_options, + options.read_preference, + options.write_concern, + options.read_concern, + ) self._topology_settings = TopologySettings( seeds=seeds, @@ -740,7 +782,7 @@ def __init__(self, direct_connection=options.direct_connection, load_balanced=options.load_balanced, srv_service_name=srv_service_name, - srv_max_hosts=srv_max_hosts + srv_max_hosts=srv_max_hosts, ) self._topology = Topology(self._topology_settings) @@ -756,7 +798,8 @@ def target(): interval=common.KILL_CURSOR_FREQUENCY, min_interval=common.MIN_HEARTBEAT_INTERVAL, target=target, - name="pymongo_kill_cursors_thread") + name="pymongo_kill_cursors_thread", + ) # We strongly reference the executor and it weakly references us via # this closure. When the client is freed, stop the executor soon. @@ -769,8 +812,8 @@ def target(): self._encrypter = None if self.__options.auto_encryption_opts: from pymongo.encryption import _Encrypter - self._encrypter = _Encrypter( - self, self.__options.auto_encryption_opts) + + self._encrypter = _Encrypter(self, self.__options.auto_encryption_opts) def _duplicate(self, **kwargs): args = self.__init_kwargs.copy() @@ -788,12 +831,12 @@ def _server_property(self, attr_name): the server may change. In such cases, store a local reference to a ServerDescription first, then use its properties. """ - server = self._topology.select_server( - writable_server_selector) + server = self._topology.select_server(writable_server_selector) return getattr(server.description, attr_name) - def watch(self, + def watch( + self, pipeline: Optional[_Pipeline] = None, full_document: Optional[str] = None, resume_after: Optional[Mapping[str, Any]] = None, @@ -889,9 +932,17 @@ def watch(self, https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst """ return ClusterChangeStream( - self.admin, pipeline, full_document, resume_after, max_await_time_ms, - batch_size, collation, start_at_operation_time, session, - start_after) + self.admin, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + ) @property def topology_description(self) -> TopologyDescription: @@ -916,7 +967,7 @@ def topology_description(self) -> TopologyDescription: return self._topology.description @property - def address(self) -> Optional[Tuple[str, int]]: + def address(self) -> Optional[Tuple[str, int]]: """(host, port) of the current standalone, primary, or mongos, or None. Accessing :attr:`address` raises :exc:`~.errors.InvalidOperation` if @@ -930,17 +981,22 @@ def address(self) -> Optional[Tuple[str, int]]: .. versionadded:: 3.0 """ topology_type = self._topology._description.topology_type - if (topology_type == TOPOLOGY_TYPE.Sharded and - len(self.topology_description.server_descriptions()) > 1): + if ( + topology_type == TOPOLOGY_TYPE.Sharded + and len(self.topology_description.server_descriptions()) > 1 + ): raise InvalidOperation( 'Cannot use "address" property when load balancing among' - ' mongoses, use "nodes" instead.') - if topology_type not in (TOPOLOGY_TYPE.ReplicaSetWithPrimary, - TOPOLOGY_TYPE.Single, - TOPOLOGY_TYPE.LoadBalanced, - TOPOLOGY_TYPE.Sharded): + ' mongoses, use "nodes" instead.' + ) + if topology_type not in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.Single, + TOPOLOGY_TYPE.LoadBalanced, + TOPOLOGY_TYPE.Sharded, + ): return None - return self._server_property('address') + return self._server_property("address") @property def primary(self) -> Optional[Tuple[str, int]]: @@ -987,7 +1043,7 @@ def is_primary(self) -> bool: connection is established or raise ServerSelectionTimeoutError if no server is available. """ - return self._server_property('is_writable') + return self._server_property("is_writable") @property def is_mongos(self) -> bool: @@ -995,7 +1051,7 @@ def is_mongos(self) -> bool: connected, this will block until a connection is established or raise ServerSelectionTimeoutError if no server is available.. """ - return self._server_property('server_type') == SERVER_TYPE.Mongos + return self._server_property("server_type") == SERVER_TYPE.Mongos @property def nodes(self) -> FrozenSet[Tuple[str, Optional[int]]]: @@ -1027,17 +1083,16 @@ def _end_sessions(self, session_ids): try: # Use SocketInfo.command directly to avoid implicitly creating # another session. - with self._socket_for_reads( - ReadPreference.PRIMARY_PREFERRED, - None) as (sock_info, read_pref): + with self._socket_for_reads(ReadPreference.PRIMARY_PREFERRED, None) as ( + sock_info, + read_pref, + ): if not sock_info.supports_sessions: return for i in range(0, len(session_ids), common._MAX_END_SESSIONS): - spec = SON([('endSessions', - session_ids[i:i + common._MAX_END_SESSIONS])]) - sock_info.command( - 'admin', spec, read_preference=read_pref, client=self) + spec = SON([("endSessions", session_ids[i : i + common._MAX_END_SESSIONS])]) + sock_info.command("admin", spec, read_preference=read_pref, client=self) except PyMongoError: # Drivers MUST ignore any errors returned by the endSessions # command. @@ -1091,16 +1146,20 @@ def _get_socket(self, server, session): return with server.get_socket(handler=err_handler) as sock_info: # Pin this session to the selected server or connection. - if (in_txn and server.description.server_type in ( - SERVER_TYPE.Mongos, SERVER_TYPE.LoadBalancer)): + if in_txn and server.description.server_type in ( + SERVER_TYPE.Mongos, + SERVER_TYPE.LoadBalancer, + ): session._pin(server, sock_info) err_handler.contribute_socket(sock_info) - if (self._encrypter and - not self._encrypter._bypass_auto_encryption and - sock_info.max_wire_version < 8): + if ( + self._encrypter + and not self._encrypter._bypass_auto_encryption + and sock_info.max_wire_version < 8 + ): raise ConfigurationError( - 'Auto-encryption requires a minimum MongoDB version ' - 'of 4.2') + "Auto-encryption requires a minimum MongoDB version " "of 4.2" + ) yield sock_info def _select_server(self, server_selector, session, address=None): @@ -1123,8 +1182,7 @@ def _select_server(self, server_selector, session, address=None): # We're running a getMore or this session is pinned to a mongos. server = topology.select_server_by_address(address) if not server: - raise AutoReconnect('server %s:%d no longer available' - % address) + raise AutoReconnect("server %s:%d no longer available" % address) else: server = topology.select_server(server_selector) return server @@ -1169,8 +1227,7 @@ def _socket_for_reads(self, read_preference, session): return self._socket_from_server(read_preference, server, session) def _should_pin_cursor(self, session): - return (self.__options.load_balanced and - not (session and session.in_transaction)) + return self.__options.load_balanced and not (session and session.in_transaction) def _run_operation(self, operation, unpack_res, address=None): """Run a _Query/_GetMore operation and return a Response. @@ -1183,24 +1240,28 @@ def _run_operation(self, operation, unpack_res, address=None): """ if operation.sock_mgr: server = self._select_server( - operation.read_preference, operation.session, address=address) + operation.read_preference, operation.session, address=address + ) with operation.sock_mgr.lock: - with _MongoClientErrorHandler( - self, server, operation.session) as err_handler: + with _MongoClientErrorHandler(self, server, operation.session) as err_handler: err_handler.contribute_socket(operation.sock_mgr.sock) return server.run_operation( - operation.sock_mgr.sock, operation, True, - self._event_listeners, unpack_res) + operation.sock_mgr.sock, operation, True, self._event_listeners, unpack_res + ) def _cmd(session, server, sock_info, read_preference): return server.run_operation( - sock_info, operation, read_preference, self._event_listeners, - unpack_res) + sock_info, operation, read_preference, self._event_listeners, unpack_res + ) return self._retryable_read( - _cmd, operation.read_preference, operation.session, - address=address, retryable=isinstance(operation, message._Query)) + _cmd, + operation.read_preference, + operation.session, + address=address, + retryable=isinstance(operation, message._Query), + ) def _retry_with_session(self, retryable, func, session, bulk): """Execute an operation with at most one consecutive retries @@ -1210,8 +1271,9 @@ def _retry_with_session(self, retryable, func, session, bulk): Re-raises any exception thrown by func(). """ - retryable = (retryable and self.options.retry_writes - and session and not session.in_transaction) + retryable = ( + retryable and self.options.retry_writes and session and not session.in_transaction + ) return self._retry_internal(retryable, func, session, bulk) def _retry_internal(self, retryable, func, session, bulk): @@ -1222,6 +1284,7 @@ def _retry_internal(self, retryable, func, session, bulk): def is_retrying(): return bulk.retrying if bulk else retrying + # Increment the transaction id up front to ensure any retry attempt # will use the proper txnNumber, even if server or socket selection # fails before the command can be sent. @@ -1234,8 +1297,8 @@ def is_retrying(): try: server = self._select_server(writable_server_selector, session) supports_session = ( - session is not None and - server.description.retryable_writes_supported) + session is not None and server.description.retryable_writes_supported + ) with self._get_socket(server, session) as sock_info: max_wire_version = sock_info.max_wire_version if retryable and not supports_session: @@ -1273,8 +1336,7 @@ def is_retrying(): retrying = True last_error = exc - def _retryable_read(self, func, read_pref, session, address=None, - retryable=True): + def _retryable_read(self, func, read_pref, session, address=None, retryable=True): """Execute an operation with at most one consecutive retries Returns func()'s return value on success. On error retries the same @@ -1282,18 +1344,16 @@ def _retryable_read(self, func, read_pref, session, address=None, Re-raises any exception thrown by func(). """ - retryable = (retryable and - self.options.retry_reads - and not (session and session.in_transaction)) + retryable = ( + retryable and self.options.retry_reads and not (session and session.in_transaction) + ) last_error: Optional[Exception] = None retrying = False while True: try: - server = self._select_server( - read_pref, session, address=address) - with self._socket_from_server(read_pref, server, session) as ( - sock_info, read_pref): + server = self._select_server(read_pref, session, address=address) + with self._socket_from_server(read_pref, server, session) as (sock_info, read_pref): if retrying and not retryable: # A retry is not possible because this server does # not support retryable reads, raise the last error. @@ -1343,35 +1403,38 @@ def __hash__(self) -> int: def _repr_helper(self): def option_repr(option, value): """Fix options whose __repr__ isn't usable in a constructor.""" - if option == 'document_class': + if option == "document_class": if value is dict: - return 'document_class=dict' + return "document_class=dict" else: - return 'document_class=%s.%s' % (value.__module__, - value.__name__) + return "document_class=%s.%s" % (value.__module__, value.__name__) if option in common.TIMEOUT_OPTIONS and value is not None: return "%s=%s" % (option, int(value * 1000)) - return '%s=%r' % (option, value) + return "%s=%r" % (option, value) # Host first... - options = ['host=%r' % [ - '%s:%d' % (host, port) if port is not None else host - for host, port in self._topology_settings.seeds]] + options = [ + "host=%r" + % [ + "%s:%d" % (host, port) if port is not None else host + for host, port in self._topology_settings.seeds + ] + ] # ... then everything in self._constructor_args... options.extend( - option_repr(key, self.__options._options[key]) - for key in self._constructor_args) + option_repr(key, self.__options._options[key]) for key in self._constructor_args + ) # ... then everything else. options.extend( option_repr(key, self.__options._options[key]) for key in self.__options._options - if key not in set(self._constructor_args) - and key != 'username' and key != 'password') - return ', '.join(options) + if key not in set(self._constructor_args) and key != "username" and key != "password" + ) + return ", ".join(options) def __repr__(self): - return ("MongoClient(%s)" % (self._repr_helper(),)) + return "MongoClient(%s)" % (self._repr_helper(),) def __getattr__(self, name: str) -> database.Database[_DocumentType]: """Get a database by name. @@ -1382,10 +1445,11 @@ def __getattr__(self, name: str) -> database.Database[_DocumentType]: :Parameters: - `name`: the name of the database to get """ - if name.startswith('_'): + if name.startswith("_"): raise AttributeError( "MongoClient has no attribute %r. To access the %s" - " database, use client[%r]." % (name, name, name)) + " database, use client[%r]." % (name, name, name) + ) return self.__getitem__(name) def __getitem__(self, name: str) -> database.Database[_DocumentType]: @@ -1399,8 +1463,9 @@ def __getitem__(self, name: str) -> database.Database[_DocumentType]: """ return database.Database(self, name) - def _cleanup_cursor(self, locks_allowed, cursor_id, address, sock_mgr, - session, explicit_session): + def _cleanup_cursor( + self, locks_allowed, cursor_id, address, sock_mgr, session, explicit_session + ): """Cleanup a cursor from cursor.close() or __del__. This method handles cleanup for Cursors/CommandCursors including any @@ -1421,12 +1486,9 @@ def _cleanup_cursor(self, locks_allowed, cursor_id, address, sock_mgr, # If this is an exhaust cursor and we haven't completely # exhausted the result set we *must* close the socket # to stop the server from sending more data. - sock_mgr.sock.close_socket( - ConnectionClosedReason.ERROR) + sock_mgr.sock.close_socket(ConnectionClosedReason.ERROR) else: - self._close_cursor_now( - cursor_id, address, session=session, - sock_mgr=sock_mgr) + self._close_cursor_now(cursor_id, address, session=session, sock_mgr=sock_mgr) if sock_mgr: sock_mgr.close() else: @@ -1440,8 +1502,7 @@ def _close_cursor_soon(self, cursor_id, address, sock_mgr=None): """Request that a cursor and/or connection be cleaned up soon.""" self.__kill_cursors_queue.append((address, cursor_id, sock_mgr)) - def _close_cursor_now(self, cursor_id, address=None, session=None, - sock_mgr=None): + def _close_cursor_now(self, cursor_id, address=None, session=None, sock_mgr=None): """Send a kill cursors message with the given id. The cursor is closed synchronously on the current thread. @@ -1453,11 +1514,9 @@ def _close_cursor_now(self, cursor_id, address=None, session=None, if sock_mgr: with sock_mgr.lock: # Cursor is pinned to LB outside of a transaction. - self._kill_cursor_impl( - [cursor_id], address, session, sock_mgr.sock) + self._kill_cursor_impl([cursor_id], address, session, sock_mgr.sock) else: - self._kill_cursors( - [cursor_id], address, self._get_topology(), session) + self._kill_cursors([cursor_id], address, self._get_topology(), session) except PyMongoError: # Make another attempt to kill the cursor later. self._close_cursor_soon(cursor_id, address) @@ -1477,8 +1536,8 @@ def _kill_cursors(self, cursor_ids, address, topology, session): def _kill_cursor_impl(self, cursor_ids, address, session, sock_info): namespace = address.namespace - db, coll = namespace.split('.', 1) - spec = SON([('killCursors', coll), ('cursors', cursor_ids)]) + db, coll = namespace.split(".", 1) + spec = SON([("killCursors", coll), ("cursors", cursor_ids)]) sock_info.command(db, spec, session=session, client=self) def _process_kill_cursors(self): @@ -1500,11 +1559,9 @@ def _process_kill_cursors(self): for address, cursor_id, sock_mgr in pinned_cursors: try: - self._cleanup_cursor(True, cursor_id, address, sock_mgr, - None, False) + self._cleanup_cursor(True, cursor_id, address, sock_mgr, None, False) except Exception as exc: - if (isinstance(exc, InvalidOperation) - and self._topology._closed): + if isinstance(exc, InvalidOperation) and self._topology._closed: # Raise the exception when client is closed so that it # can be caught in _process_periodic_tasks raise @@ -1516,11 +1573,9 @@ def _process_kill_cursors(self): topology = self._get_topology() for address, cursor_ids in address_to_cursor_ids.items(): try: - self._kill_cursors( - cursor_ids, address, topology, session=None) + self._kill_cursors(cursor_ids, address, topology, session=None) except Exception as exc: - if (isinstance(exc, InvalidOperation) and - self._topology._closed): + if isinstance(exc, InvalidOperation) and self._topology._closed: raise else: helpers._handle_exception() @@ -1542,10 +1597,10 @@ def __start_session(self, implicit, **kwargs): # Raises ConfigurationError if sessions are not supported. server_session = self._get_server_session() opts = client_session.SessionOptions(**kwargs) - return client_session.ClientSession( - self, server_session, opts, implicit) + return client_session.ClientSession(self, server_session, opts, implicit) - def start_session(self, + def start_session( + self, causal_consistency: Optional[bool] = None, default_transaction_options: Optional[client_session.TransactionOptions] = None, snapshot: Optional[bool] = False, @@ -1571,7 +1626,8 @@ def start_session(self, False, causal_consistency=causal_consistency, default_transaction_options=default_transaction_options, - snapshot=snapshot) + snapshot=snapshot, + ) def _get_server_session(self): """Internal: start or resume a _ServerSession.""" @@ -1624,23 +1680,21 @@ def _send_cluster_time(self, command, session): topology_time = self._topology.max_cluster_time() session_time = session.cluster_time if session else None if topology_time and session_time: - if topology_time['clusterTime'] > session_time['clusterTime']: + if topology_time["clusterTime"] > session_time["clusterTime"]: cluster_time = topology_time else: cluster_time = session_time else: cluster_time = topology_time or session_time if cluster_time: - command['$clusterTime'] = cluster_time + command["$clusterTime"] = cluster_time def _process_response(self, reply, session): - self._topology.receive_cluster_time(reply.get('$clusterTime')) + self._topology.receive_cluster_time(reply.get("$clusterTime")) if session is not None: session._process_response(reply) - def server_info(self, - session: Optional[client_session.ClientSession] = None - ) -> Dict[str, Any]: + def server_info(self, session: Optional[client_session.ClientSession] = None) -> Dict[str, Any]: """Get information about the MongoDB server we're connected to. :Parameters: @@ -1650,13 +1704,12 @@ def server_info(self, .. versionchanged:: 3.6 Added ``session`` parameter. """ - return self.admin.command("buildinfo", - read_preference=ReadPreference.PRIMARY, - session=session) + return self.admin.command( + "buildinfo", read_preference=ReadPreference.PRIMARY, session=session + ) - def list_databases(self, - session: Optional[client_session.ClientSession] = None, - **kwargs: Any + def list_databases( + self, session: Optional[client_session.ClientSession] = None, **kwargs: Any ) -> CommandCursor[Dict[str, Any]]: """Get a cursor over the databases of the connected server. @@ -1686,8 +1739,8 @@ def list_databases(self, } return CommandCursor(admin["$cmd"], cursor, None) - def list_database_names(self, - session: Optional[client_session.ClientSession] = None + def list_database_names( + self, session: Optional[client_session.ClientSession] = None ) -> List[str]: """Get a list of the names of all databases on the connected server. @@ -1697,12 +1750,12 @@ def list_database_names(self, .. versionadded:: 3.6 """ - return [doc["name"] - for doc in self.list_databases(session, nameOnly=True)] + return [doc["name"] for doc in self.list_databases(session, nameOnly=True)] - def drop_database(self, + def drop_database( + self, name_or_database: Union[str, database.Database], - session: Optional[client_session.ClientSession] = None + session: Optional[client_session.ClientSession] = None, ) -> None: """Drop a database. @@ -1733,8 +1786,7 @@ def drop_database(self, name = name.name if not isinstance(name, str): - raise TypeError("name_or_database must be an instance " - "of str or a Database") + raise TypeError("name_or_database must be an instance " "of str or a Database") with self._socket_for_writes(session) as sock_info: self[name]._command( @@ -1743,9 +1795,11 @@ def drop_database(self, read_preference=ReadPreference.PRIMARY, write_concern=self._write_concern_for(session), parse_write_concern_error=True, - session=session) + session=session, + ) - def get_default_database(self, + def get_default_database( + self, default: Optional[str] = None, codec_options: Optional[CodecOptions] = None, read_preference: Optional[_ServerMode] = None, @@ -1793,15 +1847,15 @@ def get_default_database(self, Deprecated, use :meth:`get_database` instead. """ if self.__default_database_name is None and default is None: - raise ConfigurationError( - 'No default database name defined or provided.') + raise ConfigurationError("No default database name defined or provided.") name = cast(str, self.__default_database_name or default) return database.Database( - self, name, codec_options, - read_preference, write_concern, read_concern) + self, name, codec_options, read_preference, write_concern, read_concern + ) - def get_database(self, + def get_database( + self, name: Optional[str] = None, codec_options: Optional[CodecOptions] = None, read_preference: Optional[_ServerMode] = None, @@ -1853,19 +1907,21 @@ def get_database(self, """ if name is None: if self.__default_database_name is None: - raise ConfigurationError('No default database defined') + raise ConfigurationError("No default database defined") name = self.__default_database_name return database.Database( - self, name, codec_options, read_preference, - write_concern, read_concern) + self, name, codec_options, read_preference, write_concern, read_concern + ) def _database_default_options(self, name): """Get a Database instance with the default settings.""" return self.get_database( - name, codec_options=DEFAULT_CODEC_OPTIONS, + name, + codec_options=DEFAULT_CODEC_OPTIONS, read_preference=ReadPreference.PRIMARY, - write_concern=DEFAULT_WRITE_CONCERN) + write_concern=DEFAULT_WRITE_CONCERN, + ) def __enter__(self) -> "MongoClient[_DocumentType]": return self @@ -1887,7 +1943,7 @@ def _retryable_error_doc(exc): if isinstance(exc, BulkWriteError): # Check the last writeConcernError to determine if this # BulkWriteError is retryable. - wces = exc.details['writeConcernErrors'] + wces = exc.details["writeConcernErrors"] wce = wces[-1] if wces else None return wce if isinstance(exc, (NotPrimaryError, OperationFailure)): @@ -1898,18 +1954,18 @@ def _retryable_error_doc(exc): def _add_retryable_write_error(exc, max_wire_version): doc = _retryable_error_doc(exc) if doc: - code = doc.get('code', 0) + code = doc.get("code", 0) # retryWrites on MMAPv1 should raise an actionable error. - if (code == 20 and - str(exc).startswith("Transaction numbers")): + if code == 20 and str(exc).startswith("Transaction numbers"): errmsg = ( "This MongoDB deployment does not support " "retryable writes. Please add retryWrites=false " - "to your connection string.") + "to your connection string." + ) raise OperationFailure(errmsg, code, exc.details) if max_wire_version >= 9: # In MongoDB 4.4+, the server reports the error labels. - for label in doc.get('errorLabels', []): + for label in doc.get("errorLabels", []): exc._add_error_label(label) else: if code in helpers._RETRYABLE_ERROR_CODES: @@ -1917,16 +1973,23 @@ def _add_retryable_write_error(exc, max_wire_version): # Connection errors are always retryable except NotPrimaryError which is # handled above. - if (isinstance(exc, ConnectionFailure) and - not isinstance(exc, NotPrimaryError)): + if isinstance(exc, ConnectionFailure) and not isinstance(exc, NotPrimaryError): exc._add_error_label("RetryableWriteError") class _MongoClientErrorHandler(object): """Handle errors raised when executing an operation.""" - __slots__ = ('client', 'server_address', 'session', 'max_wire_version', - 'sock_generation', 'completed_handshake', 'service_id', - 'handled') + + __slots__ = ( + "client", + "server_address", + "session", + "max_wire_version", + "sock_generation", + "completed_handshake", + "service_id", + "handled", + ) def __init__(self, client, server, session): self.client = client @@ -1960,13 +2023,18 @@ def handle(self, exc_type, exc_val): self.session._server_session.mark_dirty() if issubclass(exc_type, PyMongoError): - if (exc_val.has_error_label("TransientTransactionError") or - exc_val.has_error_label("RetryableWriteError")): + if exc_val.has_error_label("TransientTransactionError") or exc_val.has_error_label( + "RetryableWriteError" + ): self.session._unpin() err_ctx = _ErrorContext( - exc_val, self.max_wire_version, self.sock_generation, - self.completed_handshake, self.service_id) + exc_val, + self.max_wire_version, + self.sock_generation, + self.completed_handshake, + self.service_id, + ) self.client._topology.handle_error(self.server_address, err_ctx) def __enter__(self): diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 388ba61687..844ad02262 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -21,8 +21,7 @@ from typing import Any, Mapping, cast from pymongo import common, periodic_executor -from pymongo.errors import (NotPrimaryError, OperationFailure, - _OperationCancelled) +from pymongo.errors import NotPrimaryError, OperationFailure, _OperationCancelled from pymongo.hello import Hello from pymongo.periodic_executor import _shutdown_executors from pymongo.read_preferences import MovingAverage @@ -54,10 +53,8 @@ def target(): return True executor = periodic_executor.PeriodicExecutor( - interval=interval, - min_interval=min_interval, - target=target, - name=name) + interval=interval, min_interval=min_interval, target=target, name=name + ) self._executor = executor @@ -101,12 +98,7 @@ def request_check(self): class Monitor(MonitorBase): - def __init__( - self, - server_description, - topology, - pool, - topology_settings): + def __init__(self, server_description, topology, pool, topology_settings): """Class to monitor a MongoDB server on a background thread. Pass an initial ServerDescription, a Topology, a Pool, and @@ -119,7 +111,8 @@ def __init__( topology, "pymongo_server_monitor_thread", topology_settings.heartbeat_frequency, - common.MIN_HEARTBEAT_INTERVAL) + common.MIN_HEARTBEAT_INTERVAL, + ) self._server_description = server_description self._pool = pool self._settings = topology_settings @@ -128,8 +121,10 @@ def __init__( self._publish = pub and self._listeners.enabled_for_server_heartbeat self._cancel_context = None self._rtt_monitor = _RttMonitor( - topology, topology_settings, topology._create_pool_for_monitor( - server_description.address)) + topology, + topology_settings, + topology._create_pool_for_monitor(server_description.address), + ) self.heartbeater = None def cancel_check(self): @@ -179,7 +174,8 @@ def _run(self): _sanitize(exc) # Already closed the connection, wait for the next check. self._server_description = ServerDescription( - self._server_description.address, error=exc) + self._server_description.address, error=exc + ) if prev_sd.is_server_type_known: # Immediately retry since we've already waited 500ms to # discover that we've been cancelled. @@ -187,11 +183,14 @@ def _run(self): return # Update the Topology and clear the server pool on error. - self._topology.on_change(self._server_description, - reset_pool=self._server_description.error) - - if (self._server_description.is_server_type_known and - self._server_description.topology_version): + self._topology.on_change( + self._server_description, reset_pool=self._server_description.error + ) + + if ( + self._server_description.is_server_type_known + and self._server_description.topology_version + ): self._start_rtt_monitor() # Immediately check for the next streaming response. self._executor.skip_sleep() @@ -215,7 +214,7 @@ def _check_server(self): except (OperationFailure, NotPrimaryError) as exc: # Update max cluster time even when hello fails. details = cast(Mapping[str, Any], exc.details) - self._topology.receive_cluster_time(details.get('$clusterTime')) + self._topology.receive_cluster_time(details.get("$clusterTime")) raise except ReferenceError: raise @@ -226,8 +225,7 @@ def _check_server(self): duration = time.monotonic() - start if self._publish: awaited = sd.is_server_type_known and sd.topology_version - self._listeners.publish_server_heartbeat_failed( - address, duration, error, awaited) + self._listeners.publish_server_heartbeat_failed(address, duration, error, awaited) self._reset_connection() if isinstance(error, _OperationCancelled): raise @@ -252,11 +250,11 @@ def _check_once(self): if not response.awaitable: self._rtt_monitor.add_sample(round_trip_time) - sd = ServerDescription(address, response, - self._rtt_monitor.average()) + sd = ServerDescription(address, response, self._rtt_monitor.average()) if self._publish: self._listeners.publish_server_heartbeat_succeeded( - address, round_trip_time, response, response.awaitable) + address, round_trip_time, response, response.awaitable + ) return sd def _check_with_socket(self, conn): @@ -269,13 +267,13 @@ def _check_with_socket(self, conn): if conn.more_to_come: # Read the next streaming hello (MongoDB 4.4+). response = Hello(conn._next_reply(), awaitable=True) - elif (conn.performed_handshake and - self._server_description.topology_version): + elif conn.performed_handshake and self._server_description.topology_version: # Initiate streaming hello (MongoDB 4.4+). response = conn._hello( cluster_time, self._server_description.topology_version, - self._settings.heartbeat_frequency) + self._settings.heartbeat_frequency, + ) else: # New connection handshake or polling hello (MongoDB <4.4). response = conn._hello(cluster_time, None, None) @@ -294,7 +292,8 @@ def __init__(self, topology, topology_settings): topology, "pymongo_srv_polling_thread", common.MIN_SRV_RESCAN_INTERVAL, - topology_settings.heartbeat_frequency) + topology_settings.heartbeat_frequency, + ) self._settings = topology_settings self._seedlist = self._settings._seeds self._fqdn = self._settings.fqdn @@ -315,9 +314,11 @@ def _get_seedlist(self): Returns a list of ServerDescriptions. """ try: - resolver = _SrvResolver(self._fqdn, - self._settings.pool_options.connect_timeout, - self._settings.srv_service_name) + resolver = _SrvResolver( + self._fqdn, + self._settings.pool_options.connect_timeout, + self._settings.srv_service_name, + ) seedlist, ttl = resolver.get_hosts_and_min_ttl() if len(seedlist) == 0: # As per the spec: this should be treated as a failure. @@ -330,8 +331,7 @@ def _get_seedlist(self): self.request_check() return None else: - self._executor.update_interval( - max(ttl, common.MIN_SRV_RESCAN_INTERVAL)) + self._executor.update_interval(max(ttl, common.MIN_SRV_RESCAN_INTERVAL)) return seedlist @@ -345,7 +345,8 @@ def __init__(self, topology, topology_settings, pool): topology, "pymongo_server_rtt_thread", topology_settings.heartbeat_frequency, - common.MIN_HEARTBEAT_INTERVAL) + common.MIN_HEARTBEAT_INTERVAL, + ) self._pool = pool self._moving_average = MovingAverage() @@ -389,7 +390,7 @@ def _ping(self): """Run a "hello" command and return the RTT.""" with self._pool.get_socket() as sock_info: if self._executor._stopped: - raise Exception('_RttMonitor closed') + raise Exception("_RttMonitor closed") start = time.monotonic() sock_info.hello() return time.monotonic() - start diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 6f57200a3b..6a3ed6d07e 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -194,10 +194,16 @@ def connection_checked_in(self, event): from pymongo.topology_description import TopologyDescription -_Listeners = namedtuple('_Listeners', - ('command_listeners', 'server_listeners', - 'server_heartbeat_listeners', 'topology_listeners', - 'cmap_listeners')) +_Listeners = namedtuple( + "_Listeners", + ( + "command_listeners", + "server_listeners", + "server_heartbeat_listeners", + "topology_listeners", + "cmap_listeners", + ), +) _LISTENERS = _Listeners([], [], [], [], []) @@ -483,10 +489,12 @@ def _validate_event_listeners(option, listeners): raise TypeError("%s must be a list or tuple" % (option,)) for listener in listeners: if not isinstance(listener, _EventListener): - raise TypeError("Listeners for %s must be either a " - "CommandListener, ServerHeartbeatListener, " - "ServerListener, TopologyListener, or " - "ConnectionPoolListener." % (option,)) + raise TypeError( + "Listeners for %s must be either a " + "CommandListener, ServerHeartbeatListener, " + "ServerListener, TopologyListener, or " + "ConnectionPoolListener." % (option,) + ) return listeners @@ -499,10 +507,12 @@ def register(listener: _EventListener) -> None: :class:`TopologyListener`, or :class:`ConnectionPoolListener`. """ if not isinstance(listener, _EventListener): - raise TypeError("Listeners for %s must be either a " - "CommandListener, ServerHeartbeatListener, " - "ServerListener, TopologyListener, or " - "ConnectionPoolListener." % (listener,)) + raise TypeError( + "Listeners for %s must be either a " + "CommandListener, ServerHeartbeatListener, " + "ServerListener, TopologyListener, or " + "ConnectionPoolListener." % (listener,) + ) if isinstance(listener, CommandListener): _LISTENERS.command_listeners.append(listener) if isinstance(listener, ServerHeartbeatListener): @@ -514,19 +524,32 @@ def register(listener: _EventListener) -> None: if isinstance(listener, ConnectionPoolListener): _LISTENERS.cmap_listeners.append(listener) + # Note - to avoid bugs from forgetting which if these is all lowercase and # which are camelCase, and at the same time avoid having to add a test for # every command, use all lowercase here and test against command_name.lower(). _SENSITIVE_COMMANDS = set( - ["authenticate", "saslstart", "saslcontinue", "getnonce", "createuser", - "updateuser", "copydbgetnonce", "copydbsaslstart", "copydb"]) + [ + "authenticate", + "saslstart", + "saslcontinue", + "getnonce", + "createuser", + "updateuser", + "copydbgetnonce", + "copydbsaslstart", + "copydb", + ] +) # The "hello" command is also deemed sensitive when attempting speculative # authentication. def _is_speculative_authenticate(command_name, doc): - if (command_name.lower() in ('hello', HelloCompat.LEGACY_CMD) and - 'speculativeAuthenticate' in doc): + if ( + command_name.lower() in ("hello", HelloCompat.LEGACY_CMD) + and "speculativeAuthenticate" in doc + ): return True return False @@ -534,8 +557,7 @@ def _is_speculative_authenticate(command_name, doc): class _CommandEvent(object): """Base class for command events.""" - __slots__ = ("__cmd_name", "__rqst_id", "__conn_id", "__op_id", - "__service_id") + __slots__ = ("__cmd_name", "__rqst_id", "__conn_id", "__op_id", "__service_id") def __init__( self, @@ -592,6 +614,7 @@ class CommandStartedEvent(_CommandEvent): - `operation_id`: An optional identifier for a series of related events. - `service_id`: The service_id this command was sent to, or ``None``. """ + __slots__ = ("__cmd", "__db") def __init__( @@ -608,10 +631,10 @@ def __init__( # Command name must be first key. command_name = next(iter(command)) super(CommandStartedEvent, self).__init__( - command_name, request_id, connection_id, operation_id, service_id=service_id) + command_name, request_id, connection_id, operation_id, service_id=service_id + ) cmd_name, cmd_doc = command_name.lower(), command[command_name] - if (cmd_name in _SENSITIVE_COMMANDS or - _is_speculative_authenticate(cmd_name, command)): + if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, command): self.__cmd: Mapping[str, Any] = {} else: self.__cmd = command @@ -628,12 +651,14 @@ def database_name(self) -> str: return self.__db def __repr__(self): - return ( - "<%s %s db: %r, command: %r, operation_id: %s, " - "service_id: %s>") % ( - self.__class__.__name__, self.connection_id, - self.database_name, self.command_name, self.operation_id, - self.service_id) + return ("<%s %s db: %r, command: %r, operation_id: %s, " "service_id: %s>") % ( + self.__class__.__name__, + self.connection_id, + self.database_name, + self.command_name, + self.operation_id, + self.service_id, + ) class CommandSucceededEvent(_CommandEvent): @@ -649,6 +674,7 @@ class CommandSucceededEvent(_CommandEvent): - `operation_id`: An optional identifier for a series of related events. - `service_id`: The service_id this command was sent to, or ``None``. """ + __slots__ = ("__duration_micros", "__reply") def __init__( @@ -662,12 +688,11 @@ def __init__( service_id: Optional[ObjectId] = None, ) -> None: super(CommandSucceededEvent, self).__init__( - command_name, request_id, connection_id, operation_id, - service_id=service_id) + command_name, request_id, connection_id, operation_id, service_id=service_id + ) self.__duration_micros = _to_micros(duration) cmd_name = command_name.lower() - if (cmd_name in _SENSITIVE_COMMANDS or - _is_speculative_authenticate(cmd_name, reply)): + if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, reply): self.__reply: Mapping[str, Any] = {} else: self.__reply = reply @@ -683,12 +708,14 @@ def reply(self) -> _DocumentOut: return self.__reply def __repr__(self): - return ( - "<%s %s command: %r, operation_id: %s, duration_micros: %s, " - "service_id: %s>") % ( - self.__class__.__name__, self.connection_id, - self.command_name, self.operation_id, self.duration_micros, - self.service_id) + return ("<%s %s command: %r, operation_id: %s, duration_micros: %s, " "service_id: %s>") % ( + self.__class__.__name__, + self.connection_id, + self.command_name, + self.operation_id, + self.duration_micros, + self.service_id, + ) class CommandFailedEvent(_CommandEvent): @@ -704,6 +731,7 @@ class CommandFailedEvent(_CommandEvent): - `operation_id`: An optional identifier for a series of related events. - `service_id`: The service_id this command was sent to, or ``None``. """ + __slots__ = ("__duration_micros", "__failure") def __init__( @@ -716,7 +744,9 @@ def __init__( operation_id: Optional[int], service_id: Optional[ObjectId] = None, ) -> None: - super(CommandFailedEvent, self).__init__(command_name, request_id, connection_id, operation_id, service_id=service_id) + super(CommandFailedEvent, self).__init__( + command_name, request_id, connection_id, operation_id, service_id=service_id + ) self.__duration_micros = _to_micros(duration) self.__failure = failure @@ -733,14 +763,21 @@ def failure(self) -> _DocumentOut: def __repr__(self): return ( "<%s %s command: %r, operation_id: %s, duration_micros: %s, " - "failure: %r, service_id: %s>") % ( - self.__class__.__name__, self.connection_id, self.command_name, - self.operation_id, self.duration_micros, self.failure, - self.service_id) + "failure: %r, service_id: %s>" + ) % ( + self.__class__.__name__, + self.connection_id, + self.command_name, + self.operation_id, + self.duration_micros, + self.failure, + self.service_id, + ) class _PoolEvent(object): """Base class for pool events.""" + __slots__ = ("__address",) def __init__(self, address: _Address) -> None: @@ -754,7 +791,7 @@ def address(self) -> _Address: return self.__address def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, self.__address) + return "%s(%r)" % (self.__class__.__name__, self.__address) class PoolCreatedEvent(_PoolEvent): @@ -766,6 +803,7 @@ class PoolCreatedEvent(_PoolEvent): .. versionadded:: 3.9 """ + __slots__ = ("__options",) def __init__(self, address: _Address, options: Dict[str, Any]) -> None: @@ -774,13 +812,11 @@ def __init__(self, address: _Address, options: Dict[str, Any]) -> None: @property def options(self) -> Dict[str, Any]: - """Any non-default pool options that were set on this Connection Pool. - """ + """Any non-default pool options that were set on this Connection Pool.""" return self.__options def __repr__(self): - return '%s(%r, %r)' % ( - self.__class__.__name__, self.address, self.__options) + return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__options) class PoolReadyEvent(_PoolEvent): @@ -792,6 +828,7 @@ class PoolReadyEvent(_PoolEvent): .. versionadded:: 4.0 """ + __slots__ = () @@ -805,6 +842,7 @@ class PoolClearedEvent(_PoolEvent): .. versionadded:: 3.9 """ + __slots__ = ("__service_id",) def __init__(self, address: _Address, service_id: Optional[ObjectId] = None) -> None: @@ -822,8 +860,7 @@ def service_id(self) -> Optional[ObjectId]: return self.__service_id def __repr__(self): - return '%s(%r, %r)' % ( - self.__class__.__name__, self.address, self.__service_id) + return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__service_id) class PoolClosedEvent(_PoolEvent): @@ -835,6 +872,7 @@ class PoolClosedEvent(_PoolEvent): .. versionadded:: 3.9 """ + __slots__ = () @@ -845,17 +883,17 @@ class ConnectionClosedReason(object): .. versionadded:: 3.9 """ - STALE = 'stale' + STALE = "stale" """The pool was cleared, making the connection no longer valid.""" - IDLE = 'idle' + IDLE = "idle" """The connection became stale by being idle for too long (maxIdleTimeMS). """ - ERROR = 'error' + ERROR = "error" """The connection experienced an error, making it no longer valid.""" - POOL_CLOSED = 'poolClosed' + POOL_CLOSED = "poolClosed" """The pool was closed, making the connection no longer valid.""" @@ -866,13 +904,13 @@ class ConnectionCheckOutFailedReason(object): .. versionadded:: 3.9 """ - TIMEOUT = 'timeout' + TIMEOUT = "timeout" """The connection check out attempt exceeded the specified timeout.""" - POOL_CLOSED = 'poolClosed' + POOL_CLOSED = "poolClosed" """The pool was previously closed, and cannot provide new connections.""" - CONN_ERROR = 'connectionError' + CONN_ERROR = "connectionError" """The connection check out attempt experienced an error while setting up a new connection. """ @@ -880,6 +918,7 @@ class ConnectionCheckOutFailedReason(object): class _ConnectionEvent(object): """Private base class for some connection events.""" + __slots__ = ("__address", "__connection_id") def __init__(self, address: _Address, connection_id: int) -> None: @@ -899,8 +938,7 @@ def connection_id(self) -> int: return self.__connection_id def __repr__(self): - return '%s(%r, %r)' % ( - self.__class__.__name__, self.__address, self.__connection_id) + return "%s(%r, %r)" % (self.__class__.__name__, self.__address, self.__connection_id) class ConnectionCreatedEvent(_ConnectionEvent): @@ -916,6 +954,7 @@ class ConnectionCreatedEvent(_ConnectionEvent): .. versionadded:: 3.9 """ + __slots__ = () @@ -929,6 +968,7 @@ class ConnectionReadyEvent(_ConnectionEvent): .. versionadded:: 3.9 """ + __slots__ = () @@ -943,6 +983,7 @@ class ConnectionClosedEvent(_ConnectionEvent): .. versionadded:: 3.9 """ + __slots__ = ("__reason",) def __init__(self, address, connection_id, reason): @@ -959,9 +1000,12 @@ def reason(self): return self.__reason def __repr__(self): - return '%s(%r, %r, %r)' % ( - self.__class__.__name__, self.address, self.connection_id, - self.__reason) + return "%s(%r, %r, %r)" % ( + self.__class__.__name__, + self.address, + self.connection_id, + self.__reason, + ) class ConnectionCheckOutStartedEvent(object): @@ -973,6 +1017,7 @@ class ConnectionCheckOutStartedEvent(object): .. versionadded:: 3.9 """ + __slots__ = ("__address",) def __init__(self, address): @@ -986,7 +1031,7 @@ def address(self): return self.__address def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, self.__address) + return "%s(%r)" % (self.__class__.__name__, self.__address) class ConnectionCheckOutFailedEvent(object): @@ -999,6 +1044,7 @@ class ConnectionCheckOutFailedEvent(object): .. versionadded:: 3.9 """ + __slots__ = ("__address", "__reason") def __init__(self, address: _Address, reason: str) -> None: @@ -1022,8 +1068,7 @@ def reason(self) -> str: return self.__reason def __repr__(self): - return '%s(%r, %r)' % ( - self.__class__.__name__, self.__address, self.__reason) + return "%s(%r, %r)" % (self.__class__.__name__, self.__address, self.__reason) class ConnectionCheckedOutEvent(_ConnectionEvent): @@ -1036,6 +1081,7 @@ class ConnectionCheckedOutEvent(_ConnectionEvent): .. versionadded:: 3.9 """ + __slots__ = () @@ -1049,6 +1095,7 @@ class ConnectionCheckedInEvent(_ConnectionEvent): .. versionadded:: 3.9 """ + __slots__ = () @@ -1073,7 +1120,10 @@ def topology_id(self) -> ObjectId: def __repr__(self): return "<%s %s topology_id: %s>" % ( - self.__class__.__name__, self.server_address, self.topology_id) + self.__class__.__name__, + self.server_address, + self.topology_id, + ) class ServerDescriptionChangedEvent(_ServerEvent): @@ -1082,9 +1132,14 @@ class ServerDescriptionChangedEvent(_ServerEvent): .. versionadded:: 3.3 """ - __slots__ = ('__previous_description', '__new_description') + __slots__ = ("__previous_description", "__new_description") - def __init__(self, previous_description: "ServerDescription", new_description: "ServerDescription", *args: Any) -> None: + def __init__( + self, + previous_description: "ServerDescription", + new_description: "ServerDescription", + *args: Any + ) -> None: super(ServerDescriptionChangedEvent, self).__init__(*args) self.__previous_description = previous_description self.__new_description = new_description @@ -1103,8 +1158,11 @@ def new_description(self) -> "ServerDescription": def __repr__(self): return "<%s %s changed from: %s, to: %s>" % ( - self.__class__.__name__, self.server_address, - self.previous_description, self.new_description) + self.__class__.__name__, + self.server_address, + self.previous_description, + self.new_description, + ) class ServerOpeningEvent(_ServerEvent): @@ -1128,7 +1186,7 @@ class ServerClosedEvent(_ServerEvent): class TopologyEvent(object): """Base class for topology description events.""" - __slots__ = ('__topology_id') + __slots__ = "__topology_id" def __init__(self, topology_id: ObjectId) -> None: self.__topology_id = topology_id @@ -1139,8 +1197,7 @@ def topology_id(self) -> ObjectId: return self.__topology_id def __repr__(self): - return "<%s topology_id: %s>" % ( - self.__class__.__name__, self.topology_id) + return "<%s topology_id: %s>" % (self.__class__.__name__, self.topology_id) class TopologyDescriptionChangedEvent(TopologyEvent): @@ -1149,9 +1206,14 @@ class TopologyDescriptionChangedEvent(TopologyEvent): .. versionadded:: 3.3 """ - __slots__ = ('__previous_description', '__new_description') + __slots__ = ("__previous_description", "__new_description") - def __init__(self, previous_description: "TopologyDescription", new_description: "TopologyDescription", *args: Any) -> None: + def __init__( + self, + previous_description: "TopologyDescription", + new_description: "TopologyDescription", + *args: Any + ) -> None: super(TopologyDescriptionChangedEvent, self).__init__(*args) self.__previous_description = previous_description self.__new_description = new_description @@ -1170,8 +1232,11 @@ def new_description(self) -> "TopologyDescription": def __repr__(self): return "<%s topology_id: %s changed from: %s, to: %s>" % ( - self.__class__.__name__, self.topology_id, - self.previous_description, self.new_description) + self.__class__.__name__, + self.topology_id, + self.previous_description, + self.new_description, + ) class TopologyOpenedEvent(TopologyEvent): @@ -1195,7 +1260,7 @@ class TopologyClosedEvent(TopologyEvent): class _ServerHeartbeatEvent(object): """Base class for server heartbeat events.""" - __slots__ = ('__connection_id') + __slots__ = "__connection_id" def __init__(self, connection_id: _Address) -> None: self.__connection_id = connection_id @@ -1225,9 +1290,11 @@ class ServerHeartbeatSucceededEvent(_ServerHeartbeatEvent): .. versionadded:: 3.3 """ - __slots__ = ('__duration', '__reply', '__awaited') + __slots__ = ("__duration", "__reply", "__awaited") - def __init__(self, duration: float, reply: Hello, connection_id: _Address, awaited: bool = False) -> None: + def __init__( + self, duration: float, reply: Hello, connection_id: _Address, awaited: bool = False + ) -> None: super(ServerHeartbeatSucceededEvent, self).__init__(connection_id) self.__duration = duration self.__reply = reply @@ -1255,8 +1322,12 @@ def awaited(self) -> bool: def __repr__(self): return "<%s %s duration: %s, awaited: %s, reply: %s>" % ( - self.__class__.__name__, self.connection_id, - self.duration, self.awaited, self.reply) + self.__class__.__name__, + self.connection_id, + self.duration, + self.awaited, + self.reply, + ) class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): @@ -1266,9 +1337,11 @@ class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): .. versionadded:: 3.3 """ - __slots__ = ('__duration', '__reply', '__awaited') + __slots__ = ("__duration", "__reply", "__awaited") - def __init__(self, duration: float, reply: Exception, connection_id: _Address, awaited: bool = False) -> None: + def __init__( + self, duration: float, reply: Exception, connection_id: _Address, awaited: bool = False + ) -> None: super(ServerHeartbeatFailedEvent, self).__init__(connection_id) self.__duration = duration self.__reply = reply @@ -1296,8 +1369,12 @@ def awaited(self) -> bool: def __repr__(self): return "<%s %s duration: %s, awaited: %s, reply: %r>" % ( - self.__class__.__name__, self.connection_id, - self.duration, self.awaited, self.reply) + self.__class__.__name__, + self.connection_id, + self.duration, + self.awaited, + self.reply, + ) class _EventListeners(object): @@ -1308,6 +1385,7 @@ class _EventListeners(object): :Parameters: - `listeners`: A list of event listeners. """ + def __init__(self, listeners): self.__command_listeners = _LISTENERS.command_listeners[:] self.__server_listeners = _LISTENERS.server_listeners[:] @@ -1329,8 +1407,7 @@ def __init__(self, listeners): self.__cmap_listeners.append(lst) self.__enabled_for_commands = bool(self.__command_listeners) self.__enabled_for_server = bool(self.__server_listeners) - self.__enabled_for_server_heartbeat = bool( - self.__server_heartbeat_listeners) + self.__enabled_for_server_heartbeat = bool(self.__server_heartbeat_listeners) self.__enabled_for_topology = bool(self.__topology_listeners) self.__enabled_for_cmap = bool(self.__cmap_listeners) @@ -1361,15 +1438,17 @@ def enabled_for_cmap(self): def event_listeners(self): """List of registered event listeners.""" - return (self.__command_listeners + - self.__server_heartbeat_listeners + - self.__server_listeners + - self.__topology_listeners + - self.__cmap_listeners) - - def publish_command_start(self, command, database_name, - request_id, connection_id, op_id=None, - service_id=None): + return ( + self.__command_listeners + + self.__server_heartbeat_listeners + + self.__server_listeners + + self.__topology_listeners + + self.__cmap_listeners + ) + + def publish_command_start( + self, command, database_name, request_id, connection_id, op_id=None, service_id=None + ): """Publish a CommandStartedEvent to all command listeners. :Parameters: @@ -1385,18 +1464,25 @@ def publish_command_start(self, command, database_name, if op_id is None: op_id = request_id event = CommandStartedEvent( - command, database_name, request_id, connection_id, op_id, - service_id=service_id) + command, database_name, request_id, connection_id, op_id, service_id=service_id + ) for subscriber in self.__command_listeners: try: subscriber.started(event) except Exception: _handle_exception() - def publish_command_success(self, duration, reply, command_name, - request_id, connection_id, op_id=None, - service_id=None, - speculative_hello=False): + def publish_command_success( + self, + duration, + reply, + command_name, + request_id, + connection_id, + op_id=None, + service_id=None, + speculative_hello=False, + ): """Publish a CommandSucceededEvent to all command listeners. :Parameters: @@ -1417,17 +1503,24 @@ def publish_command_success(self, duration, reply, command_name, # speculativeAuthenticate. reply = {} event = CommandSucceededEvent( - duration, reply, command_name, request_id, connection_id, op_id, - service_id) + duration, reply, command_name, request_id, connection_id, op_id, service_id + ) for subscriber in self.__command_listeners: try: subscriber.succeeded(event) except Exception: _handle_exception() - def publish_command_failure(self, duration, failure, command_name, - request_id, connection_id, op_id=None, - service_id=None): + def publish_command_failure( + self, + duration, + failure, + command_name, + request_id, + connection_id, + op_id=None, + service_id=None, + ): """Publish a CommandFailedEvent to all command listeners. :Parameters: @@ -1444,8 +1537,8 @@ def publish_command_failure(self, duration, failure, command_name, if op_id is None: op_id = request_id event = CommandFailedEvent( - duration, failure, command_name, request_id, connection_id, op_id, - service_id=service_id) + duration, failure, command_name, request_id, connection_id, op_id, service_id=service_id + ) for subscriber in self.__command_listeners: try: subscriber.failed(event) @@ -1466,8 +1559,7 @@ def publish_server_heartbeat_started(self, connection_id): except Exception: _handle_exception() - def publish_server_heartbeat_succeeded(self, connection_id, duration, - reply, awaited): + def publish_server_heartbeat_succeeded(self, connection_id, duration, reply, awaited): """Publish a ServerHeartbeatSucceededEvent to all server heartbeat listeners. @@ -1477,17 +1569,15 @@ def publish_server_heartbeat_succeeded(self, connection_id, duration, resolution for the platform. - `reply`: The command reply. - `awaited`: True if the response was awaited. - """ - event = ServerHeartbeatSucceededEvent(duration, reply, connection_id, - awaited) + """ + event = ServerHeartbeatSucceededEvent(duration, reply, connection_id, awaited) for subscriber in self.__server_heartbeat_listeners: try: subscriber.succeeded(event) except Exception: _handle_exception() - def publish_server_heartbeat_failed(self, connection_id, duration, reply, - awaited): + def publish_server_heartbeat_failed(self, connection_id, duration, reply, awaited): """Publish a ServerHeartbeatFailedEvent to all server heartbeat listeners. @@ -1497,9 +1587,8 @@ def publish_server_heartbeat_failed(self, connection_id, duration, reply, resolution for the platform. - `reply`: The command reply. - `awaited`: True if the response was awaited. - """ - event = ServerHeartbeatFailedEvent(duration, reply, connection_id, - awaited) + """ + event = ServerHeartbeatFailedEvent(duration, reply, connection_id, awaited) for subscriber in self.__server_heartbeat_listeners: try: subscriber.failed(event) @@ -1536,9 +1625,9 @@ def publish_server_closed(self, server_address, topology_id): except Exception: _handle_exception() - def publish_server_description_changed(self, previous_description, - new_description, server_address, - topology_id): + def publish_server_description_changed( + self, previous_description, new_description, server_address, topology_id + ): """Publish a ServerDescriptionChangedEvent to all server listeners. :Parameters: @@ -1548,9 +1637,9 @@ def publish_server_description_changed(self, previous_description, - `topology_id`: A unique identifier for the topology this server is a part of. """ - event = ServerDescriptionChangedEvent(previous_description, - new_description, server_address, - topology_id) + event = ServerDescriptionChangedEvent( + previous_description, new_description, server_address, topology_id + ) for subscriber in self.__server_listeners: try: subscriber.description_changed(event) @@ -1585,8 +1674,9 @@ def publish_topology_closed(self, topology_id): except Exception: _handle_exception() - def publish_topology_description_changed(self, previous_description, - new_description, topology_id): + def publish_topology_description_changed( + self, previous_description, new_description, topology_id + ): """Publish a TopologyDescriptionChangedEvent to all topology listeners. :Parameters: @@ -1595,8 +1685,7 @@ def publish_topology_description_changed(self, previous_description, - `topology_id`: A unique identifier for the topology this server is a part of. """ - event = TopologyDescriptionChangedEvent(previous_description, - new_description, topology_id) + event = TopologyDescriptionChangedEvent(previous_description, new_description, topology_id) for subscriber in self.__topology_listeners: try: subscriber.description_changed(event) @@ -1604,8 +1693,7 @@ def publish_topology_description_changed(self, previous_description, _handle_exception() def publish_pool_created(self, address, options): - """Publish a :class:`PoolCreatedEvent` to all pool listeners. - """ + """Publish a :class:`PoolCreatedEvent` to all pool listeners.""" event = PoolCreatedEvent(address, options) for subscriber in self.__cmap_listeners: try: @@ -1614,8 +1702,7 @@ def publish_pool_created(self, address, options): _handle_exception() def publish_pool_ready(self, address): - """Publish a :class:`PoolReadyEvent` to all pool listeners. - """ + """Publish a :class:`PoolReadyEvent` to all pool listeners.""" event = PoolReadyEvent(address) for subscriber in self.__cmap_listeners: try: @@ -1624,8 +1711,7 @@ def publish_pool_ready(self, address): _handle_exception() def publish_pool_cleared(self, address, service_id): - """Publish a :class:`PoolClearedEvent` to all pool listeners. - """ + """Publish a :class:`PoolClearedEvent` to all pool listeners.""" event = PoolClearedEvent(address, service_id) for subscriber in self.__cmap_listeners: try: @@ -1634,8 +1720,7 @@ def publish_pool_cleared(self, address, service_id): _handle_exception() def publish_pool_closed(self, address): - """Publish a :class:`PoolClosedEvent` to all pool listeners. - """ + """Publish a :class:`PoolClosedEvent` to all pool listeners.""" event = PoolClosedEvent(address) for subscriber in self.__cmap_listeners: try: @@ -1655,8 +1740,7 @@ def publish_connection_created(self, address, connection_id): _handle_exception() def publish_connection_ready(self, address, connection_id): - """Publish a :class:`ConnectionReadyEvent` to all connection listeners. - """ + """Publish a :class:`ConnectionReadyEvent` to all connection listeners.""" event = ConnectionReadyEvent(address, connection_id) for subscriber in self.__cmap_listeners: try: diff --git a/pymongo/network.py b/pymongo/network.py index 48e5084e31..db952af731 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -24,8 +24,12 @@ from pymongo import helpers, message from pymongo.common import MAX_MESSAGE_SIZE from pymongo.compression_support import _NO_COMPRESSION, decompress -from pymongo.errors import (NotPrimaryError, OperationFailure, ProtocolError, - _OperationCancelled) +from pymongo.errors import ( + NotPrimaryError, + OperationFailure, + ProtocolError, + _OperationCancelled, +) from pymongo.message import _UNPACK_REPLY, _OpMsg from pymongo.monitoring import _is_speculative_authenticate from pymongo.socket_checker import _errno_from_exception @@ -33,18 +37,29 @@ _UNPACK_HEADER = struct.Struct(" max_bson_size): + if unacknowledged and max_bson_size is not None and max_doc_size > max_bson_size: message._raise_document_too_large(name, size, max_bson_size) else: request_id, msg, size = message._query( - 0, ns, 0, -1, spec, None, codec_options, compression_ctx) + 0, ns, 0, -1, spec, None, codec_options, compression_ctx + ) - if (max_bson_size is not None - and size > max_bson_size + message._COMMAND_OVERHEAD): - message._raise_document_too_large( - name, size, max_bson_size + message._COMMAND_OVERHEAD) + if max_bson_size is not None and size > max_bson_size + message._COMMAND_OVERHEAD: + message._raise_document_too_large(name, size, max_bson_size + message._COMMAND_OVERHEAD) if publish: encoding_duration = datetime.datetime.now() - start - listeners.publish_command_start(orig, dbname, request_id, address, - service_id=sock_info.service_id) + listeners.publish_command_start( + orig, dbname, request_id, address, service_id=sock_info.service_id + ) start = datetime.datetime.now() try: @@ -137,15 +150,19 @@ def command(sock_info, dbname, spec, is_mongos, reply = receive_message(sock_info, request_id) sock_info.more_to_come = reply.more_to_come unpacked_docs = reply.unpack_response( - codec_options=codec_options, user_fields=user_fields) + codec_options=codec_options, user_fields=user_fields + ) response_doc = unpacked_docs[0] if client: client._process_response(response_doc, session) if check: helpers._check_command_response( - response_doc, sock_info.max_wire_version, allowable_errors, - parse_write_concern_error=parse_write_concern_error) + response_doc, + sock_info.max_wire_version, + allowable_errors, + parse_write_concern_error=parse_write_concern_error, + ) except Exception as exc: if publish: duration = (datetime.datetime.now() - start) + encoding_duration @@ -154,25 +171,31 @@ def command(sock_info, dbname, spec, is_mongos, else: failure = message._convert_exception(exc) listeners.publish_command_failure( - duration, failure, name, request_id, address, - service_id=sock_info.service_id) + duration, failure, name, request_id, address, service_id=sock_info.service_id + ) raise if publish: duration = (datetime.datetime.now() - start) + encoding_duration listeners.publish_command_success( - duration, response_doc, name, request_id, address, + duration, + response_doc, + name, + request_id, + address, service_id=sock_info.service_id, - speculative_hello=speculative_hello) + speculative_hello=speculative_hello, + ) if client and client._encrypter and reply: decrypted = client._encrypter.decrypt(reply.raw_command_response()) - response_doc = _decode_all_selective(decrypted, codec_options, - user_fields)[0] + response_doc = _decode_all_selective(decrypted, codec_options, user_fields)[0] return response_doc + _UNPACK_COMPRESSION_HEADER = struct.Struct(" max_message_size: - raise ProtocolError("Message length (%r) is larger than server max " - "message size (%r)" % (length, max_message_size)) + raise ProtocolError( + "Message length (%r) is larger than server max " + "message size (%r)" % (length, max_message_size) + ) if op_code == 2012: op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER( - _receive_data_on_socket(sock_info, 9, deadline)) - data = decompress( - _receive_data_on_socket(sock_info, length - 25, deadline), - compressor_id) + _receive_data_on_socket(sock_info, 9, deadline) + ) + data = decompress(_receive_data_on_socket(sock_info, length - 25, deadline), compressor_id) else: data = _receive_data_on_socket(sock_info, length - 16, deadline) try: unpack_reply = _UNPACK_REPLY[op_code] except KeyError: - raise ProtocolError("Got opcode %r but expected " - "%r" % (op_code, _UNPACK_REPLY.keys())) + raise ProtocolError("Got opcode %r but expected " "%r" % (op_code, _UNPACK_REPLY.keys())) return unpack_reply(data) @@ -222,7 +246,7 @@ def wait_for_read(sock_info, deadline): sock = sock_info.sock while True: # SSLSocket can have buffered data which won't be caught by select. - if hasattr(sock, 'pending') and sock.pending() > 0: + if hasattr(sock, "pending") and sock.pending() > 0: readable = True else: # Wait up to 500ms for the socket to become readable and then @@ -231,15 +255,15 @@ def wait_for_read(sock_info, deadline): timeout = max(min(deadline - time.monotonic(), _POLL_TIMEOUT), 0.001) else: timeout = _POLL_TIMEOUT - readable = sock_info.socket_checker.select( - sock, read=True, timeout=timeout) + readable = sock_info.socket_checker.select(sock, read=True, timeout=timeout) if context.cancelled: - raise _OperationCancelled('hello cancelled') + raise _OperationCancelled("hello cancelled") if readable: return if deadline and time.monotonic() > deadline: raise socket.timeout("timed out") + def _receive_data_on_socket(sock_info, length, deadline): buf = bytearray(length) mv = memoryview(buf) diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py index 0db1014774..24507260ed 100644 --- a/pymongo/ocsp_cache.py +++ b/pymongo/ocsp_cache.py @@ -21,9 +21,11 @@ class _OCSPCache(object): """A cache for OCSP responses.""" - CACHE_KEY_TYPE = namedtuple('OcspResponseCacheKey', # type: ignore - ['hash_algorithm', 'issuer_name_hash', - 'issuer_key_hash', 'serial_number']) + + CACHE_KEY_TYPE = namedtuple( # type: ignore + "OcspResponseCacheKey", + ["hash_algorithm", "issuer_name_hash", "issuer_key_hash", "serial_number"], + ) def __init__(self): self._data = {} @@ -35,7 +37,8 @@ def _get_cache_key(self, ocsp_request): hash_algorithm=ocsp_request.hash_algorithm.name.lower(), issuer_name_hash=ocsp_request.issuer_name_hash, issuer_key_hash=ocsp_request.issuer_key_hash, - serial_number=ocsp_request.serial_number) + serial_number=ocsp_request.serial_number, + ) def __setitem__(self, key, value): """Add/update a cache entry. @@ -56,15 +59,13 @@ def __setitem__(self, key, value): return # Do nothing if the response is invalid. - if not (value.this_update <= _datetime.utcnow() - < value.next_update): + if not (value.this_update <= _datetime.utcnow() < value.next_update): return # Cache new response OR update cached response if new response # has longer validity. cached_value = self._data.get(cache_key, None) - if (cached_value is None or - cached_value.next_update < value.next_update): + if cached_value is None or cached_value.next_update < value.next_update: self._data[cache_key] = value def __getitem__(self, item): @@ -79,8 +80,7 @@ def __getitem__(self, item): value = self._data[cache_key] # Return cached response if it is still valid. - if (value.this_update <= _datetime.utcnow() < - value.next_update): + if value.this_update <= _datetime.utcnow() < value.next_update: return value self._data.pop(cache_key, None) diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index a24fcc5730..369055ea8d 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -20,35 +20,30 @@ from cryptography.exceptions import InvalidSignature as _InvalidSignature from cryptography.hazmat.backends import default_backend as _default_backend -from cryptography.hazmat.primitives.asymmetric.dsa import \ - DSAPublicKey as _DSAPublicKey +from cryptography.hazmat.primitives.asymmetric.dsa import DSAPublicKey as _DSAPublicKey from cryptography.hazmat.primitives.asymmetric.ec import ECDSA as _ECDSA -from cryptography.hazmat.primitives.asymmetric.ec import \ - EllipticCurvePublicKey as _EllipticCurvePublicKey -from cryptography.hazmat.primitives.asymmetric.padding import \ - PKCS1v15 as _PKCS1v15 -from cryptography.hazmat.primitives.asymmetric.rsa import \ - RSAPublicKey as _RSAPublicKey +from cryptography.hazmat.primitives.asymmetric.ec import ( + EllipticCurvePublicKey as _EllipticCurvePublicKey, +) +from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15 as _PKCS1v15 +from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey as _RSAPublicKey from cryptography.hazmat.primitives.hashes import SHA1 as _SHA1 from cryptography.hazmat.primitives.hashes import Hash as _Hash from cryptography.hazmat.primitives.serialization import Encoding as _Encoding -from cryptography.hazmat.primitives.serialization import \ - PublicFormat as _PublicFormat -from cryptography.x509 import \ - AuthorityInformationAccess as _AuthorityInformationAccess +from cryptography.hazmat.primitives.serialization import PublicFormat as _PublicFormat +from cryptography.x509 import AuthorityInformationAccess as _AuthorityInformationAccess from cryptography.x509 import ExtendedKeyUsage as _ExtendedKeyUsage from cryptography.x509 import ExtensionNotFound as _ExtensionNotFound from cryptography.x509 import TLSFeature as _TLSFeature from cryptography.x509 import TLSFeatureType as _TLSFeatureType -from cryptography.x509 import \ - load_pem_x509_certificate as _load_pem_x509_certificate +from cryptography.x509 import load_pem_x509_certificate as _load_pem_x509_certificate from cryptography.x509.ocsp import OCSPCertStatus as _OCSPCertStatus from cryptography.x509.ocsp import OCSPRequestBuilder as _OCSPRequestBuilder from cryptography.x509.ocsp import OCSPResponseStatus as _OCSPResponseStatus -from cryptography.x509.ocsp import \ - load_der_ocsp_response as _load_der_ocsp_response -from cryptography.x509.oid import \ - AuthorityInformationAccessOID as _AuthorityInformationAccessOID +from cryptography.x509.ocsp import load_der_ocsp_response as _load_der_ocsp_response +from cryptography.x509.oid import ( + AuthorityInformationAccessOID as _AuthorityInformationAccessOID, +) from cryptography.x509.oid import ExtendedKeyUsageOID as _ExtendedKeyUsageOID from requests import post as _post from requests.exceptions import RequestException as _RequestException @@ -61,21 +56,20 @@ _LOGGER = _logging.getLogger(__name__) _CERT_REGEX = _re.compile( - b'-----BEGIN CERTIFICATE[^\r\n]+.+?-----END CERTIFICATE[^\r\n]+', - _re.DOTALL) + b"-----BEGIN CERTIFICATE[^\r\n]+.+?-----END CERTIFICATE[^\r\n]+", _re.DOTALL +) def _load_trusted_ca_certs(cafile): """Parse the tlsCAFile into a list of certificates.""" - with open(cafile, 'rb') as f: + with open(cafile, "rb") as f: data = f.read() # Load all the certs in the file. trusted_ca_certs = [] backend = _default_backend() for cert_data in _re.findall(_CERT_REGEX, data): - trusted_ca_certs.append( - _load_pem_x509_certificate(cert_data, backend)) + trusted_ca_certs.append(_load_pem_x509_certificate(cert_data, backend)) return trusted_ca_certs @@ -127,14 +121,11 @@ def _public_key_hash(cert): # (excluding the tag and length fields)" # https://stackoverflow.com/a/46309453/600498 if isinstance(public_key, _RSAPublicKey): - pbytes = public_key.public_bytes( - _Encoding.DER, _PublicFormat.PKCS1) + pbytes = public_key.public_bytes(_Encoding.DER, _PublicFormat.PKCS1) elif isinstance(public_key, _EllipticCurvePublicKey): - pbytes = public_key.public_bytes( - _Encoding.X962, _PublicFormat.UncompressedPoint) + pbytes = public_key.public_bytes(_Encoding.X962, _PublicFormat.UncompressedPoint) else: - pbytes = public_key.public_bytes( - _Encoding.DER, _PublicFormat.SubjectPublicKeyInfo) + pbytes = public_key.public_bytes(_Encoding.DER, _PublicFormat.SubjectPublicKeyInfo) digest = _Hash(_SHA1(), backend=_default_backend()) digest.update(pbytes) return digest.finalize() @@ -142,16 +133,18 @@ def _public_key_hash(cert): def _get_certs_by_key_hash(certificates, issuer, responder_key_hash): return [ - cert for cert in certificates - if _public_key_hash(cert) == responder_key_hash and - cert.issuer == issuer.subject] + cert + for cert in certificates + if _public_key_hash(cert) == responder_key_hash and cert.issuer == issuer.subject + ] def _get_certs_by_name(certificates, issuer, responder_name): return [ - cert for cert in certificates - if cert.subject == responder_name and - cert.issuer == issuer.subject] + cert + for cert in certificates + if cert.subject == responder_name and cert.issuer == issuer.subject + ] def _verify_response_signature(issuer, response): @@ -189,10 +182,11 @@ def _verify_response_signature(issuer, response): _LOGGER.debug("Delegate not authorized for OCSP signing") return 0 if not _verify_signature( - issuer.public_key(), - responder_cert.signature, - responder_cert.signature_hash_algorithm, - responder_cert.tbs_certificate_bytes): + issuer.public_key(), + responder_cert.signature, + responder_cert.signature_hash_algorithm, + responder_cert.tbs_certificate_bytes, + ): _LOGGER.debug("Delegate signature verification failed") return 0 # RFC6960, Section 3.2, Number 2 @@ -200,7 +194,8 @@ def _verify_response_signature(issuer, response): responder_cert.public_key(), response.signature, response.signature_hash_algorithm, - response.tbs_response_bytes) + response.tbs_response_bytes, + ) if not ret: _LOGGER.debug("Response signature verification failed") return ret @@ -244,8 +239,9 @@ def _get_ocsp_response(cert, issuer, uri, ocsp_response_cache): response = _post( uri, data=ocsp_request.public_bytes(_Encoding.DER), - headers={'Content-Type': 'application/ocsp-request'}, - timeout=5) + headers={"Content-Type": "application/ocsp-request"}, + timeout=5, + ) except _RequestException as exc: _LOGGER.debug("HTTP request failed: %s", exc) return None @@ -253,8 +249,7 @@ def _get_ocsp_response(cert, issuer, uri, ocsp_response_cache): _LOGGER.debug("HTTP request returned %d", response.status_code) return None ocsp_response = _load_der_ocsp_response(response.content) - _LOGGER.debug( - "OCSP response status: %r", ocsp_response.response_status) + _LOGGER.debug("OCSP response status: %r", ocsp_response.response_status) if ocsp_response.response_status != _OCSPResponseStatus.SUCCESSFUL: return None # RFC6960, Section 3.2, Number 1. Only relevant if we need to @@ -298,7 +293,7 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): ocsp_response_cache = user_data.ocsp_response_cache # No stapled OCSP response - if ocsp_bytes == b'': + if ocsp_bytes == b"": _LOGGER.debug("Peer did not staple an OCSP response") if must_staple: _LOGGER.debug("Must-staple cert with no stapled response, hard fail.") @@ -313,9 +308,11 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): _LOGGER.debug("No authority access information, soft fail") # No stapled OCSP response, no responder URI, soft fail. return 1 - uris = [desc.access_location.value - for desc in ext.value - if desc.access_method == _AuthorityInformationAccessOID.OCSP] + uris = [ + desc.access_location.value + for desc in ext.value + if desc.access_method == _AuthorityInformationAccessOID.OCSP + ] if not uris: _LOGGER.debug("No OCSP URI, soft fail") # No responder URI, soft fail. @@ -328,8 +325,7 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): # successful, valid responses with a certificate status of REVOKED. for uri in uris: _LOGGER.debug("Trying %s", uri) - response = _get_ocsp_response( - cert, issuer, uri, ocsp_response_cache) + response = _get_ocsp_response(cert, issuer, uri, ocsp_response_cache) if response is None: # The endpoint didn't respond in time, or the response was # unsuccessful or didn't match the request, or the response @@ -349,8 +345,7 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): _LOGGER.debug("No issuer cert?") return 0 response = _load_der_ocsp_response(ocsp_bytes) - _LOGGER.debug( - "OCSP response status: %r", response.response_status) + _LOGGER.debug("OCSP response status: %r", response.response_status) # This happens in _request_ocsp when there is no stapled response so # we know if we can compare serial numbers for the request and response. if response.response_status != _OCSPResponseStatus.SUCCESSFUL: diff --git a/pymongo/operations.py b/pymongo/operations.py index d07f027e24..8f264c48c2 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -63,7 +63,12 @@ class DeleteOne(object): __slots__ = ("_filter", "_collation", "_hint") - def __init__(self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None) -> None: + def __init__( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create a DeleteOne instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -95,16 +100,14 @@ def __init__(self, filter: Mapping[str, Any], collation: Optional[_CollationIn] def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_delete(self._filter, 1, collation=self._collation, - hint=self._hint) + bulkobj.add_delete(self._filter, 1, collation=self._collation, hint=self._hint) def __repr__(self): return "DeleteOne(%r, %r)" % (self._filter, self._collation) def __eq__(self, other: Any) -> bool: if type(other) == type(self): - return ((other._filter, other._collation) == - (self._filter, self._collation)) + return (other._filter, other._collation) == (self._filter, self._collation) return NotImplemented def __ne__(self, other: Any) -> bool: @@ -116,7 +119,12 @@ class DeleteMany(object): __slots__ = ("_filter", "_collation", "_hint") - def __init__(self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None) -> None: + def __init__( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create a DeleteMany instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -148,16 +156,14 @@ def __init__(self, filter: Mapping[str, Any], collation: Optional[_CollationIn] def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_delete(self._filter, 0, collation=self._collation, - hint=self._hint) + bulkobj.add_delete(self._filter, 0, collation=self._collation, hint=self._hint) def __repr__(self): return "DeleteMany(%r, %r)" % (self._filter, self._collation) def __eq__(self, other: Any) -> bool: if type(other) == type(self): - return ((other._filter, other._collation) == - (self._filter, self._collation)) + return (other._filter, other._collation) == (self._filter, self._collation) return NotImplemented def __ne__(self, other: Any) -> bool: @@ -169,8 +175,14 @@ class ReplaceOne(object): __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_hint") - def __init__(self, filter: Mapping[str, Any], replacement: Mapping[str, Any], upsert: bool = False, collation: Optional[_CollationIn] = None, - hint: Optional[_IndexKeyHint] = None) -> None: + def __init__( + self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: bool = False, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create a ReplaceOne instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -210,15 +222,19 @@ def __init__(self, filter: Mapping[str, Any], replacement: Mapping[str, Any], up def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_replace(self._filter, self._doc, self._upsert, - collation=self._collation, hint=self._hint) + bulkobj.add_replace( + self._filter, self._doc, self._upsert, collation=self._collation, hint=self._hint + ) def __eq__(self, other: Any) -> bool: if type(other) == type(self): - return ( - (other._filter, other._doc, other._upsert, other._collation, - other._hint) == (self._filter, self._doc, self._upsert, - self._collation, other._hint)) + return (other._filter, other._doc, other._upsert, other._collation, other._hint) == ( + self._filter, + self._doc, + self._upsert, + self._collation, + other._hint, + ) return NotImplemented def __ne__(self, other: Any) -> bool: @@ -226,15 +242,19 @@ def __ne__(self, other: Any) -> bool: def __repr__(self): return "%s(%r, %r, %r, %r, %r)" % ( - self.__class__.__name__, self._filter, self._doc, self._upsert, - self._collation, self._hint) + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._hint, + ) class _UpdateOp(object): """Private base class for update operations.""" - __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters", - "_hint") + __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters", "_hint") def __init__(self, filter, doc, upsert, collation, array_filters, hint): if filter is not None: @@ -257,10 +277,20 @@ def __init__(self, filter, doc, upsert, collation, array_filters, hint): def __eq__(self, other): if type(other) == type(self): return ( - (other._filter, other._doc, other._upsert, other._collation, - other._array_filters, other._hint) == - (self._filter, self._doc, self._upsert, self._collation, - self._array_filters, self._hint)) + other._filter, + other._doc, + other._upsert, + other._collation, + other._array_filters, + other._hint, + ) == ( + self._filter, + self._doc, + self._upsert, + self._collation, + self._array_filters, + self._hint, + ) return NotImplemented def __ne__(self, other): @@ -268,8 +298,14 @@ def __ne__(self, other): def __repr__(self): return "%s(%r, %r, %r, %r, %r, %r)" % ( - self.__class__.__name__, self._filter, self._doc, self._upsert, - self._collation, self._array_filters, self._hint) + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._array_filters, + self._hint, + ) class UpdateOne(_UpdateOp): @@ -277,8 +313,15 @@ class UpdateOne(_UpdateOp): __slots__ = () - def __init__(self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], upsert: bool = False, collation: Optional[_CollationIn] = None, - array_filters: Optional[List[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None) -> None: + def __init__( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[List[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Represents an update_one operation. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -308,15 +351,19 @@ def __init__(self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _ .. versionchanged:: 3.5 Added the `collation` option. """ - super(UpdateOne, self).__init__(filter, update, upsert, collation, - array_filters, hint) + super(UpdateOne, self).__init__(filter, update, upsert, collation, array_filters, hint) def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_update(self._filter, self._doc, False, self._upsert, - collation=self._collation, - array_filters=self._array_filters, - hint=self._hint) + bulkobj.add_update( + self._filter, + self._doc, + False, + self._upsert, + collation=self._collation, + array_filters=self._array_filters, + hint=self._hint, + ) class UpdateMany(_UpdateOp): @@ -324,8 +371,15 @@ class UpdateMany(_UpdateOp): __slots__ = () - def __init__(self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], upsert: bool = False, collation: Optional[_CollationIn] = None, - array_filters: Optional[List[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None) -> None: + def __init__( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[List[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create an UpdateMany instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -355,15 +409,19 @@ def __init__(self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _ .. versionchanged:: 3.5 Added the `collation` option. """ - super(UpdateMany, self).__init__(filter, update, upsert, collation, - array_filters, hint) + super(UpdateMany, self).__init__(filter, update, upsert, collation, array_filters, hint) def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_update(self._filter, self._doc, True, self._upsert, - collation=self._collation, - array_filters=self._array_filters, - hint=self._hint) + bulkobj.add_update( + self._filter, + self._doc, + True, + self._upsert, + collation=self._collation, + array_filters=self._array_filters, + hint=self._hint, + ) class IndexModel(object): @@ -436,10 +494,10 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: if "name" not in kwargs: kwargs["name"] = _gen_index_name(keys) kwargs["key"] = _index_document(keys) - collation = validate_collation_or_none(kwargs.pop('collation', None)) + collation = validate_collation_or_none(kwargs.pop("collation", None)) self.__document = kwargs if collation is not None: - self.__document['collation'] = collation + self.__document["collation"] = collation @property def document(self) -> Dict[str, Any]: diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index 36e094c4cb..5bb08ec23f 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -22,7 +22,7 @@ class PeriodicExecutor(object): def __init__(self, interval, min_interval, target, name=None): - """"Run a target function periodically on a background thread. + """ "Run a target function periodically on a background thread. If the target's return value is false, the executor stops. @@ -50,8 +50,7 @@ def __init__(self, interval, min_interval, target, name=None): self._lock = threading.Lock() def __repr__(self): - return '<%s(name=%s) object at 0x%x>' % ( - self.__class__.__name__, self._name, id(self)) + return "<%s(name=%s) object at 0x%x>" % (self.__class__.__name__, self._name, id(self)) def open(self) -> None: """Start. Multiple calls have no effect. diff --git a/pymongo/pool.py b/pymongo/pool.py index c53c9f4736..d616408ef8 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -30,17 +30,32 @@ from bson.son import SON from pymongo import __version__, auth, helpers from pymongo.client_session import _validate_session_write_concern -from pymongo.common import (MAX_BSON_SIZE, MAX_CONNECTING, MAX_IDLE_TIME_SEC, - MAX_MESSAGE_SIZE, MAX_POOL_SIZE, MAX_WIRE_VERSION, - MAX_WRITE_BATCH_SIZE, MIN_POOL_SIZE, ORDERED_TYPES, - WAIT_QUEUE_TIMEOUT) -from pymongo.errors import (AutoReconnect, ConfigurationError, - ConnectionFailure, DocumentTooLarge, - InvalidOperation, NetworkTimeout, NotPrimaryError, - OperationFailure, PyMongoError, _CertificateError) +from pymongo.common import ( + MAX_BSON_SIZE, + MAX_CONNECTING, + MAX_IDLE_TIME_SEC, + MAX_MESSAGE_SIZE, + MAX_POOL_SIZE, + MAX_WIRE_VERSION, + MAX_WRITE_BATCH_SIZE, + MIN_POOL_SIZE, + ORDERED_TYPES, + WAIT_QUEUE_TIMEOUT, +) +from pymongo.errors import ( + AutoReconnect, + ConfigurationError, + ConnectionFailure, + DocumentTooLarge, + InvalidOperation, + NetworkTimeout, + NotPrimaryError, + OperationFailure, + PyMongoError, + _CertificateError, +) from pymongo.hello import Hello, HelloCompat -from pymongo.monitoring import (ConnectionCheckOutFailedReason, - ConnectionClosedReason) +from pymongo.monitoring import ConnectionCheckOutFailedReason, ConnectionClosedReason from pymongo.network import command, receive_message from pymongo.read_preferences import ReadPreference from pymongo.server_api import _add_to_command @@ -60,12 +75,15 @@ def is_ip_address(address): except (ValueError, UnicodeError): return False + try: from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl + def _set_non_inheritable_non_atomic(fd): """Set the close-on-exec flag on the given file descriptor.""" flags = fcntl(fd, F_GETFD) fcntl(fd, F_SETFD, flags | FD_CLOEXEC) + except ImportError: # Windows, various platforms we don't claim to support # (Jython, IronPython, ...), systems that don't provide @@ -74,11 +92,12 @@ def _set_non_inheritable_non_atomic(fd): """Dummy function for platforms that don't provide fcntl.""" pass + _MAX_TCP_KEEPIDLE = 120 _MAX_TCP_KEEPINTVL = 10 _MAX_TCP_KEEPCNT = 9 -if sys.platform == 'win32': +if sys.platform == "win32": try: import _winreg as winreg except ImportError: @@ -96,8 +115,8 @@ def _query(key, name, default): try: with winreg.OpenKey( - winreg.HKEY_LOCAL_MACHINE, - r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters") as key: + winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters" + ) as key: _WINDOWS_TCP_IDLE_MS = _query(key, "KeepAliveTime", 7200000) _WINDOWS_TCP_INTERVAL_MS = _query(key, "KeepAliveInterval", 1000) except OSError: @@ -108,13 +127,12 @@ def _query(key, name, default): def _set_keepalive_times(sock): idle_ms = min(_WINDOWS_TCP_IDLE_MS, _MAX_TCP_KEEPIDLE * 1000) - interval_ms = min(_WINDOWS_TCP_INTERVAL_MS, - _MAX_TCP_KEEPINTVL * 1000) - if (idle_ms < _WINDOWS_TCP_IDLE_MS or - interval_ms < _WINDOWS_TCP_INTERVAL_MS): - sock.ioctl(socket.SIO_KEEPALIVE_VALS, - (1, idle_ms, interval_ms)) + interval_ms = min(_WINDOWS_TCP_INTERVAL_MS, _MAX_TCP_KEEPINTVL * 1000) + if idle_ms < _WINDOWS_TCP_IDLE_MS or interval_ms < _WINDOWS_TCP_INTERVAL_MS: + sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, idle_ms, interval_ms)) + else: + def _set_tcp_option(sock, tcp_option, max_value): if hasattr(socket, tcp_option): sockopt = getattr(socket, tcp_option) @@ -129,88 +147,106 @@ def _set_tcp_option(sock, tcp_option, max_value): pass def _set_keepalive_times(sock): - _set_tcp_option(sock, 'TCP_KEEPIDLE', _MAX_TCP_KEEPIDLE) - _set_tcp_option(sock, 'TCP_KEEPINTVL', _MAX_TCP_KEEPINTVL) - _set_tcp_option(sock, 'TCP_KEEPCNT', _MAX_TCP_KEEPCNT) + _set_tcp_option(sock, "TCP_KEEPIDLE", _MAX_TCP_KEEPIDLE) + _set_tcp_option(sock, "TCP_KEEPINTVL", _MAX_TCP_KEEPINTVL) + _set_tcp_option(sock, "TCP_KEEPCNT", _MAX_TCP_KEEPCNT) -_METADATA: SON[str, Any] = SON([ - ('driver', SON([('name', 'PyMongo'), ('version', __version__)])), -]) -if sys.platform.startswith('linux'): +_METADATA: SON[str, Any] = SON( + [ + ("driver", SON([("name", "PyMongo"), ("version", __version__)])), + ] +) + +if sys.platform.startswith("linux"): # platform.linux_distribution was deprecated in Python 3.5 # and removed in Python 3.8. Starting in Python 3.5 it # raises DeprecationWarning # DeprecationWarning: dist() and linux_distribution() functions are deprecated in Python 3.5 _name = platform.system() - _METADATA['os'] = SON([ - ('type', _name), - ('name', _name), - ('architecture', platform.machine()), - # Kernel version (e.g. 4.4.0-17-generic). - ('version', platform.release()) - ]) -elif sys.platform == 'darwin': - _METADATA['os'] = SON([ - ('type', platform.system()), - ('name', platform.system()), - ('architecture', platform.machine()), - # (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin - # kernel version. - ('version', platform.mac_ver()[0]) - ]) -elif sys.platform == 'win32': - _METADATA['os'] = SON([ - ('type', platform.system()), - # "Windows XP", "Windows 7", "Windows 10", etc. - ('name', ' '.join((platform.system(), platform.release()))), - ('architecture', platform.machine()), - # Windows patch level (e.g. 5.1.2600-SP3) - ('version', '-'.join(platform.win32_ver()[1:3])) - ]) -elif sys.platform.startswith('java'): + _METADATA["os"] = SON( + [ + ("type", _name), + ("name", _name), + ("architecture", platform.machine()), + # Kernel version (e.g. 4.4.0-17-generic). + ("version", platform.release()), + ] + ) +elif sys.platform == "darwin": + _METADATA["os"] = SON( + [ + ("type", platform.system()), + ("name", platform.system()), + ("architecture", platform.machine()), + # (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin + # kernel version. + ("version", platform.mac_ver()[0]), + ] + ) +elif sys.platform == "win32": + _METADATA["os"] = SON( + [ + ("type", platform.system()), + # "Windows XP", "Windows 7", "Windows 10", etc. + ("name", " ".join((platform.system(), platform.release()))), + ("architecture", platform.machine()), + # Windows patch level (e.g. 5.1.2600-SP3) + ("version", "-".join(platform.win32_ver()[1:3])), + ] + ) +elif sys.platform.startswith("java"): _name, _ver, _arch = platform.java_ver()[-1] - _METADATA['os'] = SON([ - # Linux, Windows 7, Mac OS X, etc. - ('type', _name), - ('name', _name), - # x86, x86_64, AMD64, etc. - ('architecture', _arch), - # Linux kernel version, OSX version, etc. - ('version', _ver) - ]) + _METADATA["os"] = SON( + [ + # Linux, Windows 7, Mac OS X, etc. + ("type", _name), + ("name", _name), + # x86, x86_64, AMD64, etc. + ("architecture", _arch), + # Linux kernel version, OSX version, etc. + ("version", _ver), + ] + ) else: # Get potential alias (e.g. SunOS 5.11 becomes Solaris 2.11) - _aliased = platform.system_alias( - platform.system(), platform.release(), platform.version()) - _METADATA['os'] = SON([ - ('type', platform.system()), - ('name', ' '.join([part for part in _aliased[:2] if part])), - ('architecture', platform.machine()), - ('version', _aliased[2]) - ]) - -if platform.python_implementation().startswith('PyPy'): - _METADATA['platform'] = ' '.join( - (platform.python_implementation(), - '.'.join(map(str, sys.pypy_version_info)), # type: ignore - '(Python %s)' % '.'.join(map(str, sys.version_info)))) -elif sys.platform.startswith('java'): - _METADATA['platform'] = ' '.join( - (platform.python_implementation(), - '.'.join(map(str, sys.version_info)), - '(%s)' % ' '.join((platform.system(), platform.release())))) + _aliased = platform.system_alias(platform.system(), platform.release(), platform.version()) + _METADATA["os"] = SON( + [ + ("type", platform.system()), + ("name", " ".join([part for part in _aliased[:2] if part])), + ("architecture", platform.machine()), + ("version", _aliased[2]), + ] + ) + +if platform.python_implementation().startswith("PyPy"): + _METADATA["platform"] = " ".join( + ( + platform.python_implementation(), + ".".join(map(str, sys.pypy_version_info)), # type: ignore + "(Python %s)" % ".".join(map(str, sys.version_info)), + ) + ) +elif sys.platform.startswith("java"): + _METADATA["platform"] = " ".join( + ( + platform.python_implementation(), + ".".join(map(str, sys.version_info)), + "(%s)" % " ".join((platform.system(), platform.release())), + ) + ) else: - _METADATA['platform'] = ' '.join( - (platform.python_implementation(), - '.'.join(map(str, sys.version_info)))) + _METADATA["platform"] = " ".join( + (platform.python_implementation(), ".".join(map(str, sys.version_info))) + ) # If the first getaddrinfo call of this interpreter's life is on a thread, # while the main thread holds the import lock, getaddrinfo deadlocks trying # to import the IDNA codec. Import it here, where presumably we're on the # main thread, to avoid the deadlock. See PYTHON-607. -'foo'.encode('idna') +"foo".encode("idna") # Remove after PYTHON-2712 _MOCK_SERVICE_ID = False @@ -221,14 +257,14 @@ def _raise_connection_failure(address, error, msg_prefix=None): host, port = address # If connecting to a Unix socket, port will be None. if port is not None: - msg = '%s:%d: %s' % (host, port, error) + msg = "%s:%d: %s" % (host, port, error) else: - msg = '%s: %s' % (host, error) + msg = "%s: %s" % (host, error) if msg_prefix: msg = msg_prefix + msg if isinstance(error, socket.timeout): raise NetworkTimeout(msg) from error - elif isinstance(error, _SSLError) and 'timed out' in str(error): + elif isinstance(error, _SSLError) and "timed out" in str(error): # Eventlet does not distinguish TLS network timeouts from other # SSLErrors (https://github.com/eventlet/eventlet/issues/692). # Luckily, we can work around this limitation because the phrase @@ -256,26 +292,47 @@ class PoolOptions(object): """ - __slots__ = ('__max_pool_size', '__min_pool_size', - '__max_idle_time_seconds', - '__connect_timeout', '__socket_timeout', - '__wait_queue_timeout', - '__ssl_context', '__tls_allow_invalid_hostnames', - '__event_listeners', '__appname', '__driver', '__metadata', - '__compression_settings', '__max_connecting', - '__pause_enabled', '__server_api', '__load_balanced', - '__credentials') - - def __init__(self, max_pool_size=MAX_POOL_SIZE, - min_pool_size=MIN_POOL_SIZE, - max_idle_time_seconds=MAX_IDLE_TIME_SEC, connect_timeout=None, - socket_timeout=None, wait_queue_timeout=WAIT_QUEUE_TIMEOUT, - ssl_context=None, - tls_allow_invalid_hostnames=False, - event_listeners=None, appname=None, driver=None, - compression_settings=None, max_connecting=MAX_CONNECTING, - pause_enabled=True, server_api=None, load_balanced=None, - credentials=None): + __slots__ = ( + "__max_pool_size", + "__min_pool_size", + "__max_idle_time_seconds", + "__connect_timeout", + "__socket_timeout", + "__wait_queue_timeout", + "__ssl_context", + "__tls_allow_invalid_hostnames", + "__event_listeners", + "__appname", + "__driver", + "__metadata", + "__compression_settings", + "__max_connecting", + "__pause_enabled", + "__server_api", + "__load_balanced", + "__credentials", + ) + + def __init__( + self, + max_pool_size=MAX_POOL_SIZE, + min_pool_size=MIN_POOL_SIZE, + max_idle_time_seconds=MAX_IDLE_TIME_SEC, + connect_timeout=None, + socket_timeout=None, + wait_queue_timeout=WAIT_QUEUE_TIMEOUT, + ssl_context=None, + tls_allow_invalid_hostnames=False, + event_listeners=None, + appname=None, + driver=None, + compression_settings=None, + max_connecting=MAX_CONNECTING, + pause_enabled=True, + server_api=None, + load_balanced=None, + credentials=None, + ): self.__max_pool_size = max_pool_size self.__min_pool_size = min_pool_size self.__max_idle_time_seconds = max_idle_time_seconds @@ -295,7 +352,7 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, self.__credentials = credentials self.__metadata = copy.deepcopy(_METADATA) if appname: - self.__metadata['application'] = {'name': appname} + self.__metadata["application"] = {"name": appname} # Combine the "driver" MongoClient option with PyMongo's info, like: # { @@ -307,14 +364,17 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, # } if driver: if driver.name: - self.__metadata['driver']['name'] = "%s|%s" % ( - _METADATA['driver']['name'], driver.name) + self.__metadata["driver"]["name"] = "%s|%s" % ( + _METADATA["driver"]["name"], + driver.name, + ) if driver.version: - self.__metadata['driver']['version'] = "%s|%s" % ( - _METADATA['driver']['version'], driver.version) + self.__metadata["driver"]["version"] = "%s|%s" % ( + _METADATA["driver"]["version"], + driver.version, + ) if driver.platform: - self.__metadata['platform'] = "%s|%s" % ( - _METADATA['platform'], driver.platform) + self.__metadata["platform"] = "%s|%s" % (_METADATA["platform"], driver.platform) @property def _credentials(self): @@ -329,15 +389,15 @@ def non_default_options(self): """ opts = {} if self.__max_pool_size != MAX_POOL_SIZE: - opts['maxPoolSize'] = self.__max_pool_size + opts["maxPoolSize"] = self.__max_pool_size if self.__min_pool_size != MIN_POOL_SIZE: - opts['minPoolSize'] = self.__min_pool_size + opts["minPoolSize"] = self.__min_pool_size if self.__max_idle_time_seconds != MAX_IDLE_TIME_SEC: - opts['maxIdleTimeMS'] = self.__max_idle_time_seconds * 1000 + opts["maxIdleTimeMS"] = self.__max_idle_time_seconds * 1000 if self.__wait_queue_timeout != WAIT_QUEUE_TIMEOUT: - opts['waitQueueTimeoutMS'] = self.__wait_queue_timeout * 1000 + opts["waitQueueTimeoutMS"] = self.__wait_queue_timeout * 1000 if self.__max_connecting != MAX_CONNECTING: - opts['maxConnecting'] = self.__max_connecting + opts["maxConnecting"] = self.__max_connecting return opts @property @@ -383,14 +443,12 @@ def max_idle_time_seconds(self): @property def connect_timeout(self): - """How long a connection can take to be opened before timing out. - """ + """How long a connection can take to be opened before timing out.""" return self.__connect_timeout @property def socket_timeout(self): - """How long a send or receive on a socket can take before timing out. - """ + """How long a send or receive on a socket can take before timing out.""" return self.__socket_timeout @property @@ -402,32 +460,27 @@ def wait_queue_timeout(self): @property def _ssl_context(self): - """An SSLContext instance or None. - """ + """An SSLContext instance or None.""" return self.__ssl_context @property def tls_allow_invalid_hostnames(self): - """If True skip ssl.match_hostname. - """ + """If True skip ssl.match_hostname.""" return self.__tls_allow_invalid_hostnames @property def _event_listeners(self): - """An instance of pymongo.monitoring._EventListeners. - """ + """An instance of pymongo.monitoring._EventListeners.""" return self.__event_listeners @property def appname(self): - """The application name, for sending with hello in server handshake. - """ + """The application name, for sending with hello in server handshake.""" return self.__appname @property def driver(self): - """Driver name and version, for sending with hello in handshake. - """ + """Driver name and version, for sending with hello in handshake.""" return self.__driver @property @@ -436,20 +489,17 @@ def _compression_settings(self): @property def metadata(self): - """A dict of metadata about the application, driver, os, and platform. - """ + """A dict of metadata about the application, driver, os, and platform.""" return self.__metadata.copy() @property def server_api(self): - """A pymongo.server_api.ServerApi or None. - """ + """A pymongo.server_api.ServerApi or None.""" return self.__server_api @property def load_balanced(self): - """True if this Pool is configured in load balanced mode. - """ + """True if this Pool is configured in load balanced mode.""" return self.__load_balanced @@ -476,6 +526,7 @@ class SocketInfo(object): - `address`: the server's (host, port) - `id`: the id of this socket in it's pool """ + def __init__(self, sock, pool, address, id): self.pool_ref = weakref.ref(pool) self.sock = sock @@ -544,7 +595,7 @@ def hello_cmd(self): self.op_msg_enabled = True return SON([(HelloCompat.CMD, 1)]) else: - return SON([(HelloCompat.LEGACY_CMD, 1), ('helloOk', True)]) + return SON([(HelloCompat.LEGACY_CMD, 1), ("helloOk", True)]) def hello(self): return self._hello(None, None, None) @@ -555,58 +606,58 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): awaitable = False if performing_handshake: self.performed_handshake = True - cmd['client'] = self.opts.metadata + cmd["client"] = self.opts.metadata if self.compression_settings: - cmd['compression'] = self.compression_settings.compressors + cmd["compression"] = self.compression_settings.compressors if self.opts.load_balanced: - cmd['loadBalanced'] = True + cmd["loadBalanced"] = True elif topology_version is not None: - cmd['topologyVersion'] = topology_version - cmd['maxAwaitTimeMS'] = int(heartbeat_frequency*1000) + cmd["topologyVersion"] = topology_version + cmd["maxAwaitTimeMS"] = int(heartbeat_frequency * 1000) awaitable = True # If connect_timeout is None there is no timeout. if self.opts.connect_timeout: - self.sock.settimeout( - self.opts.connect_timeout + heartbeat_frequency) + self.sock.settimeout(self.opts.connect_timeout + heartbeat_frequency) if not performing_handshake and cluster_time is not None: - cmd['$clusterTime'] = cluster_time + cmd["$clusterTime"] = cluster_time creds = self.opts._credentials if creds: - if creds.mechanism == 'DEFAULT' and creds.username: - cmd['saslSupportedMechs'] = creds.source + '.' + creds.username + if creds.mechanism == "DEFAULT" and creds.username: + cmd["saslSupportedMechs"] = creds.source + "." + creds.username auth_ctx = auth._AuthContext.from_credentials(creds) if auth_ctx: - cmd['speculativeAuthenticate'] = auth_ctx.speculate_command() + cmd["speculativeAuthenticate"] = auth_ctx.speculate_command() else: auth_ctx = None - doc = self.command('admin', cmd, publish_events=False, - exhaust_allowed=awaitable) + doc = self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable) # PYTHON-2712 will remove this topologyVersion fallback logic. if self.opts.load_balanced and _MOCK_SERVICE_ID: - process_id = doc.get('topologyVersion', {}).get('processId') - doc.setdefault('serviceId', process_id) + process_id = doc.get("topologyVersion", {}).get("processId") + doc.setdefault("serviceId", process_id) if not self.opts.load_balanced: - doc.pop('serviceId', None) + doc.pop("serviceId", None) hello = Hello(doc, awaitable=awaitable) self.is_writable = hello.is_writable self.max_wire_version = hello.max_wire_version self.max_bson_size = hello.max_bson_size self.max_message_size = hello.max_message_size self.max_write_batch_size = hello.max_write_batch_size - self.supports_sessions = ( - hello.logical_session_timeout_minutes is not None) + self.supports_sessions = hello.logical_session_timeout_minutes is not None self.hello_ok = hello.hello_ok self.is_repl = hello.server_type in ( - SERVER_TYPE.RSPrimary, SERVER_TYPE.RSSecondary, - SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther, SERVER_TYPE.RSGhost) + SERVER_TYPE.RSPrimary, + SERVER_TYPE.RSSecondary, + SERVER_TYPE.RSArbiter, + SERVER_TYPE.RSOther, + SERVER_TYPE.RSGhost, + ) self.is_standalone = hello.server_type == SERVER_TYPE.Standalone self.is_mongos = hello.server_type == SERVER_TYPE.Mongos if performing_handshake and self.compression_settings: - ctx = self.compression_settings.get_compression_context( - hello.compressors) + ctx = self.compression_settings.get_compression_context(hello.compressors) self.compression_context = ctx self.op_msg_enabled = True @@ -619,8 +670,9 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): if self.opts.load_balanced: if not hello.service_id: raise ConfigurationError( - 'Driver attempted to initialize in load balancing mode,' - ' but the server does not support this mode') + "Driver attempted to initialize in load balancing mode," + " but the server does not support this mode" + ) self.service_id = hello.service_id self.generation = self.pool_gen.get(self.service_id) return hello @@ -633,23 +685,28 @@ def _next_reply(self): helpers._check_command_response(response_doc, self.max_wire_version) # Remove after PYTHON-2712. if not self.opts.load_balanced: - response_doc.pop('serviceId', None) + response_doc.pop("serviceId", None) return response_doc - def command(self, dbname, spec, - read_preference=ReadPreference.PRIMARY, - codec_options=DEFAULT_CODEC_OPTIONS, check=True, - allowable_errors=None, - read_concern=None, - write_concern=None, - parse_write_concern_error=False, - collation=None, - session=None, - client=None, - retryable_write=False, - publish_events=True, - user_fields=None, - exhaust_allowed=False): + def command( + self, + dbname, + spec, + read_preference=ReadPreference.PRIMARY, + codec_options=DEFAULT_CODEC_OPTIONS, + check=True, + allowable_errors=None, + read_concern=None, + write_concern=None, + parse_write_concern_error=False, + collation=None, + session=None, + client=None, + retryable_write=False, + publish_events=True, + user_fields=None, + exhaust_allowed=False, + ): """Execute a command or raise an error. :Parameters: @@ -679,36 +736,43 @@ def command(self, dbname, spec, if not isinstance(spec, ORDERED_TYPES): # type:ignore[arg-type] spec = SON(spec) - if not (write_concern is None or write_concern.acknowledged or - collation is None): - raise ConfigurationError( - 'Collation is unsupported for unacknowledged writes.') - if (write_concern and - not write_concern.is_server_default): - spec['writeConcern'] = write_concern.document + if not (write_concern is None or write_concern.acknowledged or collation is None): + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + if write_concern and not write_concern.is_server_default: + spec["writeConcern"] = write_concern.document self.add_server_api(spec) if session: - session._apply_to(spec, retryable_write, read_preference, - self) + session._apply_to(spec, retryable_write, read_preference, self) self.send_cluster_time(spec, session, client) listeners = self.listeners if publish_events else None unacknowledged = write_concern and not write_concern.acknowledged if self.op_msg_enabled: self._raise_if_not_writable(unacknowledged) try: - return command(self, dbname, spec, - self.is_mongos, read_preference, codec_options, - session, client, check, allowable_errors, - self.address, listeners, - self.max_bson_size, read_concern, - parse_write_concern_error=parse_write_concern_error, - collation=collation, - compression_ctx=self.compression_context, - use_op_msg=self.op_msg_enabled, - unacknowledged=unacknowledged, - user_fields=user_fields, - exhaust_allowed=exhaust_allowed) + return command( + self, + dbname, + spec, + self.is_mongos, + read_preference, + codec_options, + session, + client, + check, + allowable_errors, + self.address, + listeners, + self.max_bson_size, + read_concern, + parse_write_concern_error=parse_write_concern_error, + collation=collation, + compression_ctx=self.compression_context, + use_op_msg=self.op_msg_enabled, + unacknowledged=unacknowledged, + user_fields=user_fields, + exhaust_allowed=exhaust_allowed, + ) except (OperationFailure, NotPrimaryError): raise # Catch socket.error, KeyboardInterrupt, etc. and close ourselves. @@ -720,12 +784,11 @@ def send_message(self, message, max_doc_size): If a network exception is raised, the socket is closed. """ - if (self.max_bson_size is not None - and max_doc_size > self.max_bson_size): + if self.max_bson_size is not None and max_doc_size > self.max_bson_size: raise DocumentTooLarge( "BSON document too large (%d bytes) - the connected server " - "supports BSON document sizes up to %d bytes." % - (max_doc_size, self.max_bson_size)) + "supports BSON document sizes up to %d bytes." % (max_doc_size, self.max_bson_size) + ) try: self.sock.sendall(message) @@ -748,8 +811,7 @@ def _raise_if_not_writable(self, unacknowledged): """ if unacknowledged and not self.is_writable: # Write won't succeed, bail as if we'd received a not primary error. - raise NotPrimaryError("not primary", { - "ok": 0, "errmsg": "not primary", "code": 10107}) + raise NotPrimaryError("not primary", {"ok": 0, "errmsg": "not primary", "code": 10107}) def unack_write(self, msg, max_doc_size): """Send unack OP_MSG. @@ -803,8 +865,8 @@ def validate_session(self, client, session): if session: if session._client is not client: raise InvalidOperation( - 'Can only use session with the MongoClient that' - ' started it') + "Can only use session with the MongoClient that" " started it" + ) def close_socket(self, reason): """Close this connection with a reason.""" @@ -812,8 +874,7 @@ def close_socket(self, reason): return self._close_socket() if reason and self.enabled_for_cmap: - self.listeners.publish_connection_closed( - self.address, self.id, reason) + self.listeners.publish_connection_closed(self.address, self.id, reason) def _close_socket(self): """Close this connection.""" @@ -893,7 +954,7 @@ def __repr__(self): return "SocketInfo(%s)%s at %s" % ( repr(self.sock), self.closed and " CLOSED" or "", - id(self) + id(self), ) @@ -907,10 +968,9 @@ def _create_connection(address, options): host, port = address # Check if dealing with a unix domain socket - if host.endswith('.sock'): + if host.endswith(".sock"): if not hasattr(socket, "AF_UNIX"): - raise ConnectionFailure("UNIX-sockets are not supported " - "on this system") + raise ConnectionFailure("UNIX-sockets are not supported " "on this system") sock = socket.socket(socket.AF_UNIX) # SOCK_CLOEXEC not supported for Unix sockets. _set_non_inheritable_non_atomic(sock.fileno()) @@ -925,7 +985,7 @@ def _create_connection(address, options): # is 'localhost' (::1 is fine). Avoids slow connect issues # like PYTHON-356. family = socket.AF_INET - if socket.has_ipv6 and host != 'localhost': + if socket.has_ipv6 and host != "localhost": family = socket.AF_UNSPEC err = None @@ -935,8 +995,7 @@ def _create_connection(address, options): # number of platforms (newer Linux and *BSD). Starting with CPython 3.4 # all file descriptors are created non-inheritable. See PEP 446. try: - sock = socket.socket( - af, socktype | getattr(socket, 'SOCK_CLOEXEC', 0), proto) + sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto) except socket.error: # Can SOCK_CLOEXEC be defined even if the kernel doesn't support # it? @@ -961,7 +1020,7 @@ def _create_connection(address, options): # host with an OS/kernel or Python interpreter that doesn't # support IPv6. The test case is Jython2.5.1 which doesn't # support IPv6 at all. - raise socket.error('getaddrinfo failed') + raise socket.error("getaddrinfo failed") def _configured_socket(address, options): @@ -999,9 +1058,11 @@ def _configured_socket(address, options): # failures alike. Permanent handshake failures, like protocol # mismatch, will be turned into ServerSelectionTimeoutErrors later. _raise_connection_failure(address, exc, "SSL handshake failed: ") - if (ssl_context.verify_mode and not - getattr(ssl_context, "check_hostname", False) and - not options.tls_allow_invalid_hostnames): + if ( + ssl_context.verify_mode + and not getattr(ssl_context, "check_hostname", False) + and not options.tls_allow_invalid_hostnames + ): try: ssl.match_hostname(sock.getpeercert(), hostname=host) except _CertificateError: @@ -1016,6 +1077,7 @@ class _PoolClosedError(PyMongoError): """Internal error raised when a thread tries to get a connection from a closed pool. """ + pass @@ -1094,9 +1156,10 @@ def __init__(self, address, options, handshake=True): self.handshake = handshake # Don't publish events in Monitor pools. self.enabled_for_cmap = ( - self.handshake and - self.opts._event_listeners is not None and - self.opts._event_listeners.enabled_for_cmap) + self.handshake + and self.opts._event_listeners is not None + and self.opts._event_listeners.enabled_for_cmap + ) # The first portion of the wait queue. # Enforces: maxPoolSize @@ -1105,7 +1168,7 @@ def __init__(self, address, options, handshake=True): self.requests = 0 self.max_pool_size = self.opts.max_pool_size if not self.max_pool_size: - self.max_pool_size = float('inf') + self.max_pool_size = float("inf") # The second portion of the wait queue. # Enforces: maxConnecting # Also used for: clearing the wait queue @@ -1114,7 +1177,8 @@ def __init__(self, address, options, handshake=True): self._pending = 0 if self.enabled_for_cmap: self.opts._event_listeners.publish_pool_created( - self.address, self.opts.non_default_options) + self.address, self.opts.non_default_options + ) # Similar to active_sockets but includes threads in the wait queue. self.operation_count = 0 # Retain references to pinned connections to prevent the CPython GC @@ -1141,8 +1205,7 @@ def _reset(self, close, pause=True, service_id=None): with self.size_cond: if self.closed: return - if (self.opts.pause_enabled and pause and - not self.opts.load_balanced): + if self.opts.pause_enabled and pause and not self.opts.load_balanced: old_state, self.state = self.state, PoolState.PAUSED self.gen.inc(service_id) newpid = os.getpid() @@ -1180,8 +1243,7 @@ def _reset(self, close, pause=True, service_id=None): listeners.publish_pool_closed(self.address) else: if old_state != PoolState.PAUSED and self.enabled_for_cmap: - listeners.publish_pool_cleared(self.address, - service_id=service_id) + listeners.publish_pool_cleared(self.address, service_id=service_id) for sock_info in sockets: sock_info.close_socket(ConnectionClosedReason.STALE) @@ -1219,16 +1281,17 @@ def remove_stale_sockets(self, reference_generation): if self.opts.max_idle_time_seconds is not None: with self.lock: - while (self.sockets and - self.sockets[-1].idle_time_seconds() > self.opts.max_idle_time_seconds): + while ( + self.sockets + and self.sockets[-1].idle_time_seconds() > self.opts.max_idle_time_seconds + ): sock_info = self.sockets.pop() sock_info.close_socket(ConnectionClosedReason.IDLE) while True: with self.size_cond: # There are enough sockets in the pool. - if (len(self.sockets) + self.active_sockets >= - self.opts.min_pool_size): + if len(self.sockets) + self.active_sockets >= self.opts.min_pool_size: return if self.requests >= self.opts.min_pool_size: return @@ -1282,7 +1345,8 @@ def connect(self): except BaseException as error: if self.enabled_for_cmap: listeners.publish_connection_closed( - self.address, conn_id, ConnectionClosedReason.ERROR) + self.address, conn_id, ConnectionClosedReason.ERROR + ) if isinstance(error, (IOError, OSError, _SSLError)): _raise_connection_failure(self.address, error) @@ -1326,8 +1390,7 @@ def get_socket(self, handler=None): sock_info = self._get_socket() if self.enabled_for_cmap: - listeners.publish_connection_checked_out( - self.address, sock_info.id) + listeners.publish_connection_checked_out(self.address, sock_info.id) try: yield sock_info except: @@ -1359,9 +1422,9 @@ def _raise_if_not_ready(self, emit_event): if self.state != PoolState.READY: if self.enabled_for_cmap and emit_event: self.opts._event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.CONN_ERROR) - _raise_connection_failure( - self.address, AutoReconnect('connection pool paused')) + self.address, ConnectionCheckOutFailedReason.CONN_ERROR + ) + _raise_connection_failure(self.address, AutoReconnect("connection pool paused")) def _get_socket(self): """Get or create a SocketInfo. Can raise ConnectionFailure.""" @@ -1374,10 +1437,11 @@ def _get_socket(self): if self.closed: if self.enabled_for_cmap: self.opts._event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.POOL_CLOSED) + self.address, ConnectionCheckOutFailedReason.POOL_CLOSED + ) raise _PoolClosedError( - 'Attempted to check out a connection from closed connection ' - 'pool') + "Attempted to check out a connection from closed connection " "pool" + ) with self.lock: self.operation_count += 1 @@ -1414,13 +1478,11 @@ def _get_socket(self): # to be checked back into the pool. with self._max_connecting_cond: self._raise_if_not_ready(emit_event=False) - while not (self.sockets or - self._pending < self._max_connecting): + while not (self.sockets or self._pending < self._max_connecting): if not _cond_wait(self._max_connecting_cond, deadline): # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. - if (self.sockets or - self._pending < self._max_connecting): + if self.sockets or self._pending < self._max_connecting: self._max_connecting_cond.notify() emitted_event = True self._raise_wait_queue_timeout() @@ -1453,7 +1515,8 @@ def _get_socket(self): if self.enabled_for_cmap and not emitted_event: self.opts._event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.CONN_ERROR) + self.address, ConnectionCheckOutFailedReason.CONN_ERROR + ) raise sock_info.active = True @@ -1483,14 +1546,13 @@ def return_socket(self, sock_info): # CMAP requires the closed event be emitted after the check in. if self.enabled_for_cmap: listeners.publish_connection_closed( - self.address, sock_info.id, - ConnectionClosedReason.ERROR) + self.address, sock_info.id, ConnectionClosedReason.ERROR + ) else: with self.lock: # Hold the lock to ensure this section does not race with # Pool.reset(). - if self.stale_generation(sock_info.generation, - sock_info.service_id): + if self.stale_generation(sock_info.generation, sock_info.service_id): sock_info.close_socket(ConnectionClosedReason.STALE) else: sock_info.update_last_checkin_time() @@ -1525,14 +1587,16 @@ def _perished(self, sock_info): """ idle_time_seconds = sock_info.idle_time_seconds() # If socket is idle, open a new one. - if (self.opts.max_idle_time_seconds is not None and - idle_time_seconds > self.opts.max_idle_time_seconds): + if ( + self.opts.max_idle_time_seconds is not None + and idle_time_seconds > self.opts.max_idle_time_seconds + ): sock_info.close_socket(ConnectionClosedReason.IDLE) return True - if (self._check_interval_seconds is not None and ( - 0 == self._check_interval_seconds or - idle_time_seconds > self._check_interval_seconds)): + if self._check_interval_seconds is not None and ( + 0 == self._check_interval_seconds or idle_time_seconds > self._check_interval_seconds + ): if sock_info.socket_closed(): sock_info.close_socket(ConnectionClosedReason.ERROR) return True @@ -1547,20 +1611,28 @@ def _raise_wait_queue_timeout(self): listeners = self.opts._event_listeners if self.enabled_for_cmap: listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.TIMEOUT) + self.address, ConnectionCheckOutFailedReason.TIMEOUT + ) if self.opts.load_balanced: other_ops = self.active_sockets - self.ncursors - self.ntxns raise ConnectionFailure( - 'Timeout waiting for connection from the connection pool. ' - 'maxPoolSize: %s, connections in use by cursors: %s, ' - 'connections in use by transactions: %s, connections in use ' - 'by other operations: %s, wait_queue_timeout: %s' % ( - self.opts.max_pool_size, self.ncursors, self.ntxns, - other_ops, self.opts.wait_queue_timeout)) + "Timeout waiting for connection from the connection pool. " + "maxPoolSize: %s, connections in use by cursors: %s, " + "connections in use by transactions: %s, connections in use " + "by other operations: %s, wait_queue_timeout: %s" + % ( + self.opts.max_pool_size, + self.ncursors, + self.ntxns, + other_ops, + self.opts.wait_queue_timeout, + ) + ) raise ConnectionFailure( - 'Timed out while checking out a connection from connection pool. ' - 'maxPoolSize: %s, wait_queue_timeout: %s' % ( - self.opts.max_pool_size, self.opts.wait_queue_timeout)) + "Timed out while checking out a connection from connection pool. " + "maxPoolSize: %s, wait_queue_timeout: %s" + % (self.opts.max_pool_size, self.opts.wait_queue_timeout) + ) def __del__(self): # Avoid ResourceWarnings in Python 3 diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index c5a5f0936d..d42cafb084 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -23,17 +23,12 @@ from errno import EINTR as _EINTR from ipaddress import ip_address as _ip_address -from cryptography.x509 import \ - load_der_x509_certificate as _load_der_x509_certificate +from cryptography.x509 import load_der_x509_certificate as _load_der_x509_certificate from OpenSSL import SSL as _SSL from OpenSSL import crypto as _crypto -from service_identity import ( - CertificateError as _SICertificateError -) +from service_identity import CertificateError as _SICertificateError from service_identity import VerificationError as _SIVerificationError -from service_identity.pyopenssl import ( # - verify_hostname as _verify_hostname -) +from service_identity.pyopenssl import verify_hostname as _verify_hostname from service_identity.pyopenssl import verify_ip_address as _verify_ip_address from pymongo.errors import ConfigurationError as _ConfigurationError @@ -45,6 +40,7 @@ try: import certifi + _HAVE_CERTIFI = True except ImportError: _HAVE_CERTIFI = False @@ -69,11 +65,11 @@ _VERIFY_MAP = { _stdlibssl.CERT_NONE: _SSL.VERIFY_NONE, _stdlibssl.CERT_OPTIONAL: _SSL.VERIFY_PEER, - _stdlibssl.CERT_REQUIRED: _SSL.VERIFY_PEER | _SSL.VERIFY_FAIL_IF_NO_PEER_CERT + _stdlibssl.CERT_REQUIRED: _SSL.VERIFY_PEER | _SSL.VERIFY_FAIL_IF_NO_PEER_CERT, } -_REVERSE_VERIFY_MAP = dict( - (value, key) for key, value in _VERIFY_MAP.items()) +_REVERSE_VERIFY_MAP = dict((value, key) for key, value in _VERIFY_MAP.items()) + def _is_ip_address(address): try: @@ -82,22 +78,21 @@ def _is_ip_address(address): except (ValueError, UnicodeError): return False + # According to the docs for Connection.send it can raise # WantX509LookupError and should be retried. -_RETRY_ERRORS = ( - _SSL.WantReadError, _SSL.WantWriteError, _SSL.WantX509LookupError) +_RETRY_ERRORS = (_SSL.WantReadError, _SSL.WantWriteError, _SSL.WantX509LookupError) def _ragged_eof(exc): """Return True if the OpenSSL.SSL.SysCallError is a ragged EOF.""" - return exc.args == (-1, 'Unexpected EOF') + return exc.args == (-1, "Unexpected EOF") # https://github.com/pyca/pyopenssl/issues/168 # https://github.com/pyca/pyopenssl/issues/176 # https://docs.python.org/3/library/ssl.html#notes-on-non-blocking-sockets class _sslConn(_SSL.Connection): - def __init__(self, ctx, sock, suppress_ragged_eofs): self.socket_checker = _SocketChecker() self.suppress_ragged_eofs = suppress_ragged_eofs @@ -111,8 +106,7 @@ def _call(self, call, *args, **kwargs): try: return call(*args, **kwargs) except _RETRY_ERRORS: - self.socket_checker.select( - self, True, True, timeout) + self.socket_checker.select(self, True, True, timeout) if timeout and _time.monotonic() - start > timeout: raise _socket.timeout("timed out") continue @@ -146,7 +140,8 @@ def sendall(self, buf, flags=0): while total_sent < total_length: try: sent = self._call( - super(_sslConn, self).send, view[total_sent:], flags) # type: ignore + super(_sslConn, self).send, view[total_sent:], flags # type: ignore + ) # XXX: It's not clear if this can actually happen. PyOpenSSL # doesn't appear to have any interrupt handling, nor any interrupt # errors for OpenSSL connections. @@ -163,6 +158,7 @@ def sendall(self, buf, flags=0): class _CallbackData(object): """Data class which is passed to the OCSP callback.""" + def __init__(self): self.trusted_ca_certs = None self.check_ocsp_endpoint = None @@ -174,7 +170,7 @@ class SSLContext(object): context. """ - __slots__ = ('_protocol', '_ctx', '_callback_data', '_check_hostname') + __slots__ = ("_protocol", "_ctx", "_callback_data", "_check_hostname") def __init__(self, protocol): self._protocol = protocol @@ -186,8 +182,7 @@ def __init__(self, protocol): # side configuration and wrap_socket tries to support both client and # server side sockets. self._callback_data.check_ocsp_endpoint = True - self._ctx.set_ocsp_client_callback( - callback=_ocsp_callback, data=self._callback_data) + self._ctx.set_ocsp_client_callback(callback=_ocsp_callback, data=self._callback_data) @property def protocol(self): @@ -205,12 +200,14 @@ def __get_verify_mode(self): def __set_verify_mode(self, value): """Setter for verify_mode.""" + def _cb(connobj, x509obj, errnum, errdepth, retcode): # It seems we don't need to do anything here. Twisted doesn't, # and OpenSSL's SSL_CTX_set_verify let's you pass NULL # for the callback option. It's weird that PyOpenSSL requires # this. return retcode + self._ctx.set_verify(_VERIFY_MAP[value], _cb) verify_mode = property(__get_verify_mode, __set_verify_mode) @@ -233,8 +230,7 @@ def __set_check_ocsp_endpoint(self, value): raise TypeError("check_ocsp must be True or False") self._callback_data.check_ocsp_endpoint = value - check_ocsp_endpoint = property(__get_check_ocsp_endpoint, - __set_check_ocsp_endpoint) + check_ocsp_endpoint = property(__get_check_ocsp_endpoint, __set_check_ocsp_endpoint) def __get_options(self): # Calling set_options adds the option to the existing bitmask and @@ -262,11 +258,13 @@ def load_cert_chain(self, certfile, keyfile=None, password=None): # https://github.com/python/cpython/blob/v3.8.0/Modules/_ssl.c#L3930-L3971 # Password callback MUST be set first or it will be ignored. if password: + def _pwcb(max_length, prompt_twice, user_data): # XXX:We could check the password length against what OpenSSL # tells us is the max, but we can't raise an exception, so... # warn? - return password.encode('utf-8') + return password.encode("utf-8") + self._ctx.set_passwd_cb(_pwcb) self._ctx.use_certificate_chain_file(certfile) self._ctx.use_privatekey_file(keyfile or certfile) @@ -289,7 +287,8 @@ def _load_certifi(self): "tlsAllowInvalidCertificates is False but no system " "CA certificates could be loaded. Please install the " "certifi package, or provide a path to a CA file using " - "the tlsCAFile option") + "the tlsCAFile option" + ) def _load_wincerts(self, store): """Attempt to load CA certs from Windows trust store.""" @@ -299,8 +298,8 @@ def _load_wincerts(self, store): if encoding == "x509_asn": if trust is True or oid in trust: cert_store.add_cert( - _crypto.X509.from_cryptography( - _load_der_x509_certificate(cert))) + _crypto.X509.from_cryptography(_load_der_x509_certificate(cert)) + ) def load_default_certs(self): """A PyOpenSSL version of load_default_certs from CPython.""" @@ -309,7 +308,7 @@ def load_default_certs(self): # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths if _sys.platform == "win32": try: - for storename in ('CA', 'ROOT'): + for storename in ("CA", "ROOT"): self._load_wincerts(storename) except PermissionError: # Fall back to certifi @@ -325,10 +324,15 @@ def set_default_verify_paths(self): # but not that same as CPython's. self._ctx.set_default_verify_paths() - def wrap_socket(self, sock, server_side=False, - do_handshake_on_connect=True, - suppress_ragged_eofs=True, - server_hostname=None, session=None): + def wrap_socket( + self, + sock, + server_side=False, + do_handshake_on_connect=True, + suppress_ragged_eofs=True, + server_hostname=None, + session=None, + ): """Wrap an existing Python socket sock and return a TLS socket object. """ @@ -342,7 +346,7 @@ def wrap_socket(self, sock, server_side=False, if server_hostname and not _is_ip_address(server_hostname): # XXX: Do this in a callback registered with # SSLContext.set_info_callback? See Twisted for an example. - ssl_conn.set_tlsext_host_name(server_hostname.encode('idna')) + ssl_conn.set_tlsext_host_name(server_hostname.encode("idna")) if self.verify_mode != _stdlibssl.CERT_NONE: # Request a stapled OCSP response. ssl_conn.request_ocsp() diff --git a/pymongo/read_concern.py b/pymongo/read_concern.py index aaf67ef5a6..dfb3930ab0 100644 --- a/pymongo/read_concern.py +++ b/pymongo/read_concern.py @@ -35,8 +35,7 @@ def __init__(self, level: Optional[str] = None) -> None: if level is None or isinstance(level, str): self.__level = level else: - raise TypeError( - 'level must be a string or None.') + raise TypeError("level must be a string or None.") @property def level(self) -> Optional[str]: @@ -47,7 +46,7 @@ def level(self) -> Optional[str]: def ok_for_legacy(self) -> bool: """Return ``True`` if this read concern is compatible with old wire protocol versions.""" - return self.level is None or self.level == 'local' + return self.level is None or self.level == "local" @property def document(self) -> Dict[str, Any]: @@ -59,7 +58,7 @@ def document(self) -> Dict[str, Any]: """ doc = {} if self.__level: - doc['level'] = self.level + doc["level"] = self.level return doc def __eq__(self, other: Any) -> bool: @@ -69,8 +68,8 @@ def __eq__(self, other: Any) -> bool: def __repr__(self): if self.level: - return 'ReadConcern(%s)' % self.level - return 'ReadConcern()' + return "ReadConcern(%s)" % self.level + return "ReadConcern()" DEFAULT_READ_CONCERN = ReadConcern() diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index cc1317fb88..02a2e88bf0 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -19,8 +19,10 @@ from pymongo import max_staleness_selectors from pymongo.errors import ConfigurationError -from pymongo.server_selectors import (member_with_tags_server_selector, - secondary_with_tags_server_selector) +from pymongo.server_selectors import ( + member_with_tags_server_selector, + secondary_with_tags_server_selector, +) _PRIMARY = 0 _PRIMARY_PREFERRED = 1 @@ -30,41 +32,40 @@ _MONGOS_MODES = ( - 'primary', - 'primaryPreferred', - 'secondary', - 'secondaryPreferred', - 'nearest', + "primary", + "primaryPreferred", + "secondary", + "secondaryPreferred", + "nearest", ) def _validate_tag_sets(tag_sets): - """Validate tag sets for a MongoClient. - """ + """Validate tag sets for a MongoClient.""" if tag_sets is None: return tag_sets if not isinstance(tag_sets, (list, tuple)): - raise TypeError(( - "Tag sets %r invalid, must be a sequence") % (tag_sets,)) + raise TypeError(("Tag sets %r invalid, must be a sequence") % (tag_sets,)) if len(tag_sets) == 0: - raise ValueError(( - "Tag sets %r invalid, must be None or contain at least one set of" - " tags") % (tag_sets,)) + raise ValueError( + ("Tag sets %r invalid, must be None or contain at least one set of" " tags") + % (tag_sets,) + ) for tags in tag_sets: if not isinstance(tags, abc.Mapping): raise TypeError( "Tag set %r invalid, must be an instance of dict, " "bson.son.SON or other type that inherits from " - "collection.Mapping" % (tags,)) + "collection.Mapping" % (tags,) + ) return list(tag_sets) def _invalid_max_staleness_msg(max_staleness): - return ("maxStalenessSeconds must be a positive integer, not %s" % - max_staleness) + return "maxStalenessSeconds must be a positive integer, not %s" % max_staleness # Some duplication with common.py to avoid import cycle. @@ -98,13 +99,17 @@ def _validate_hedge(hedge): class _ServerMode(object): - """Base class for all read preferences. - """ + """Base class for all read preferences.""" - __slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness", - "__hedge") + __slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness", "__hedge") - def __init__(self, mode: int, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: + def __init__( + self, + mode: int, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: self.__mongos_mode = _MONGOS_MODES[mode] self.__mode = mode self.__tag_sets = _validate_tag_sets(tag_sets) @@ -113,33 +118,29 @@ def __init__(self, mode: int, tag_sets: Optional[_TagSets] = None, max_staleness @property def name(self) -> str: - """The name of this read preference. - """ + """The name of this read preference.""" return self.__class__.__name__ @property def mongos_mode(self) -> str: - """The mongos mode of this read preference. - """ + """The mongos mode of this read preference.""" return self.__mongos_mode @property def document(self) -> Dict[str, Any]: - """Read preference as a document. - """ - doc: Dict[str, Any] = {'mode': self.__mongos_mode} + """Read preference as a document.""" + doc: Dict[str, Any] = {"mode": self.__mongos_mode} if self.__tag_sets not in (None, [{}]): - doc['tags'] = self.__tag_sets + doc["tags"] = self.__tag_sets if self.__max_staleness != -1: - doc['maxStalenessSeconds'] = self.__max_staleness + doc["maxStalenessSeconds"] = self.__max_staleness if self.__hedge not in (None, {}): - doc['hedge'] = self.__hedge + doc["hedge"] = self.__hedge return doc @property def mode(self) -> int: - """The mode of this read preference instance. - """ + """The mode of this read preference instance.""" return self.__mode @property @@ -203,14 +204,20 @@ def min_wire_version(self) -> int: def __repr__(self): return "%s(tag_sets=%r, max_staleness=%r, hedge=%r)" % ( - self.name, self.__tag_sets, self.__max_staleness, self.__hedge) + self.name, + self.__tag_sets, + self.__max_staleness, + self.__hedge, + ) def __eq__(self, other: Any) -> bool: if isinstance(other, _ServerMode): - return (self.mode == other.mode and - self.tag_sets == other.tag_sets and - self.max_staleness == other.max_staleness and - self.hedge == other.hedge) + return ( + self.mode == other.mode + and self.tag_sets == other.tag_sets + and self.max_staleness == other.max_staleness + and self.hedge == other.hedge + ) return NotImplemented def __ne__(self, other: Any) -> bool: @@ -221,18 +228,20 @@ def __getstate__(self): Needed explicitly because __slots__() defined. """ - return {'mode': self.__mode, - 'tag_sets': self.__tag_sets, - 'max_staleness': self.__max_staleness, - 'hedge': self.__hedge} + return { + "mode": self.__mode, + "tag_sets": self.__tag_sets, + "max_staleness": self.__max_staleness, + "hedge": self.__hedge, + } def __setstate__(self, value): """Restore from pickling.""" - self.__mode = value['mode'] + self.__mode = value["mode"] self.__mongos_mode = _MONGOS_MODES[self.__mode] - self.__tag_sets = _validate_tag_sets(value['tag_sets']) - self.__max_staleness = _validate_max_staleness(value['max_staleness']) - self.__hedge = _validate_hedge(value['hedge']) + self.__tag_sets = _validate_tag_sets(value["tag_sets"]) + self.__max_staleness = _validate_max_staleness(value["max_staleness"]) + self.__hedge = _validate_hedge(value["hedge"]) class Primary(_ServerMode): @@ -293,9 +302,13 @@ class PrimaryPreferred(_ServerMode): __slots__ = () - def __init__(self, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: - super(PrimaryPreferred, self).__init__( - _PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super(PrimaryPreferred, self).__init__(_PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" @@ -303,9 +316,8 @@ def __call__(self, selection: Any) -> Any: return selection.primary_selection else: return secondary_with_tags_server_selector( - self.tag_sets, - max_staleness_selectors.select( - self.max_staleness, selection)) + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) class Secondary(_ServerMode): @@ -333,16 +345,19 @@ class Secondary(_ServerMode): __slots__ = () - def __init__(self, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: - super(Secondary, self).__init__( - _SECONDARY, tag_sets, max_staleness, hedge) + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super(Secondary, self).__init__(_SECONDARY, tag_sets, max_staleness, hedge) def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" return secondary_with_tags_server_selector( - self.tag_sets, - max_staleness_selectors.select( - self.max_staleness, selection)) + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) class SecondaryPreferred(_ServerMode): @@ -374,16 +389,21 @@ class SecondaryPreferred(_ServerMode): __slots__ = () - def __init__(self, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: super(SecondaryPreferred, self).__init__( - _SECONDARY_PREFERRED, tag_sets, max_staleness, hedge) + _SECONDARY_PREFERRED, tag_sets, max_staleness, hedge + ) def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" secondaries = secondary_with_tags_server_selector( - self.tag_sets, - max_staleness_selectors.select( - self.max_staleness, selection)) + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) if secondaries: return secondaries @@ -416,16 +436,19 @@ class Nearest(_ServerMode): __slots__ = () - def __init__(self, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: - super(Nearest, self).__init__( - _NEAREST, tag_sets, max_staleness, hedge) + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super(Nearest, self).__init__(_NEAREST, tag_sets, max_staleness, hedge) def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" return member_with_tags_server_selector( - self.tag_sets, - max_staleness_selectors.select( - self.max_staleness, selection)) + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) class _AggWritePref: @@ -439,7 +462,7 @@ class _AggWritePref: - `pref`: The read preference to use on MongoDB 5.0+. """ - __slots__ = ('pref', 'effective_pref') + __slots__ = ("pref", "effective_pref") def __init__(self, pref): self.pref = pref @@ -447,9 +470,11 @@ def __init__(self, pref): def selection_hook(self, topology_description): common_wv = topology_description.common_wire_version - if (topology_description.has_readable_server( - ReadPreference.PRIMARY_PREFERRED) and - common_wv and common_wv < 13): + if ( + topology_description.has_readable_server(ReadPreference.PRIMARY_PREFERRED) + and common_wv + and common_wv < 13 + ): self.effective_pref = ReadPreference.PRIMARY else: self.effective_pref = self.pref @@ -467,28 +492,29 @@ def __getattr__(self, name): return getattr(self.effective_pref, name) -_ALL_READ_PREFERENCES = (Primary, PrimaryPreferred, - Secondary, SecondaryPreferred, Nearest) +_ALL_READ_PREFERENCES = (Primary, PrimaryPreferred, Secondary, SecondaryPreferred, Nearest) -def make_read_preference(mode: int, tag_sets: Optional[_TagSets], max_staleness: int = -1) -> _ServerMode: +def make_read_preference( + mode: int, tag_sets: Optional[_TagSets], max_staleness: int = -1 +) -> _ServerMode: if mode == _PRIMARY: if tag_sets not in (None, [{}]): - raise ConfigurationError("Read preference primary " - "cannot be combined with tags") + raise ConfigurationError("Read preference primary " "cannot be combined with tags") if max_staleness != -1: - raise ConfigurationError("Read preference primary cannot be " - "combined with maxStalenessSeconds") + raise ConfigurationError( + "Read preference primary cannot be " "combined with maxStalenessSeconds" + ) return Primary() return _ALL_READ_PREFERENCES[mode](tag_sets, max_staleness) # type: ignore _MODES = ( - 'PRIMARY', - 'PRIMARY_PREFERRED', - 'SECONDARY', - 'SECONDARY_PREFERRED', - 'NEAREST', + "PRIMARY", + "PRIMARY_PREFERRED", + "SECONDARY", + "SECONDARY_PREFERRED", + "NEAREST", ) @@ -542,6 +568,7 @@ class ReadPreference(object): - ``NEAREST``: Read from any shard member. """ + PRIMARY = Primary() PRIMARY_PREFERRED = PrimaryPreferred() SECONDARY = Secondary() @@ -550,13 +577,13 @@ class ReadPreference(object): def read_pref_mode_from_name(name: str) -> int: - """Get the read preference mode from mongos/uri name. - """ + """Get the read preference mode from mongos/uri name.""" return _MONGOS_MODES.index(name) class MovingAverage(object): """Tracks an exponentially-weighted moving average.""" + average: Optional[float] def __init__(self) -> None: diff --git a/pymongo/response.py b/pymongo/response.py index 3094399da6..1369eac4e0 100644 --- a/pymongo/response.py +++ b/pymongo/response.py @@ -16,11 +16,9 @@ class Response(object): - __slots__ = ('_data', '_address', '_request_id', '_duration', - '_from_command', '_docs') + __slots__ = ("_data", "_address", "_request_id", "_duration", "_from_command", "_docs") - def __init__(self, data, address, request_id, duration, from_command, - docs): + def __init__(self, data, address, request_id, duration, from_command, docs): """Represent a response from the server. :Parameters: @@ -69,10 +67,11 @@ def docs(self): class PinnedResponse(Response): - __slots__ = ('_socket_info', '_more_to_come') + __slots__ = ("_socket_info", "_more_to_come") - def __init__(self, data, address, socket_info, request_id, duration, - from_command, docs, more_to_come): + def __init__( + self, data, address, socket_info, request_id, duration, from_command, docs, more_to_come + ): """Represent a response to an exhaust cursor's initial query. :Parameters: @@ -87,11 +86,9 @@ def __init__(self, data, address, socket_info, request_id, duration, - `more_to_come`: Bool indicating whether cursor is ready to be exhausted. """ - super(PinnedResponse, self).__init__(data, - address, - request_id, - duration, - from_command, docs) + super(PinnedResponse, self).__init__( + data, address, request_id, duration, from_command, docs + ) self._socket_info = socket_info self._more_to_come = more_to_come diff --git a/pymongo/results.py b/pymongo/results.py index 637bf73b0f..127f574184 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -29,10 +29,12 @@ def __init__(self, acknowledged: bool) -> None: def _raise_if_unacknowledged(self, property_name): """Raise an exception on property access if unacknowledged.""" if not self.__acknowledged: - raise InvalidOperation("A value for %s is not available when " - "the write is unacknowledged. Check the " - "acknowledged attribute to avoid this " - "error." % (property_name,)) + raise InvalidOperation( + "A value for %s is not available when " + "the write is unacknowledged. Check the " + "acknowledged attribute to avoid this " + "error." % (property_name,) + ) @property def acknowledged(self) -> bool: @@ -55,8 +57,7 @@ def acknowledged(self) -> bool: class InsertOneResult(_WriteResult): - """The return type for :meth:`~pymongo.collection.Collection.insert_one`. - """ + """The return type for :meth:`~pymongo.collection.Collection.insert_one`.""" __slots__ = ("__inserted_id", "__acknowledged") @@ -71,8 +72,7 @@ def inserted_id(self) -> Any: class InsertManyResult(_WriteResult): - """The return type for :meth:`~pymongo.collection.Collection.insert_many`. - """ + """The return type for :meth:`~pymongo.collection.Collection.insert_many`.""" __slots__ = ("__inserted_ids", "__acknowledged") @@ -119,7 +119,7 @@ def matched_count(self) -> int: @property def modified_count(self) -> int: - """The number of documents modified. """ + """The number of documents modified.""" self._raise_if_unacknowledged("modified_count") return cast(int, self.__raw_result.get("nModified")) @@ -211,6 +211,7 @@ def upserted_ids(self) -> Optional[Dict[int, Any]]: """A map of operation index to the _id of the upserted document.""" self._raise_if_unacknowledged("upserted_ids") if self.__bulk_api_result: - return dict((upsert["index"], upsert["_id"]) - for upsert in self.bulk_api_result["upserted"]) + return dict( + (upsert["index"], upsert["_id"]) for upsert in self.bulk_api_result["upserted"] + ) return None diff --git a/pymongo/saslprep.py b/pymongo/saslprep.py index 99445b06f0..b96d6fcb56 100644 --- a/pymongo/saslprep.py +++ b/pymongo/saslprep.py @@ -19,13 +19,16 @@ import stringprep except ImportError: HAVE_STRINGPREP = False + def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) -> str: """SASLprep dummy""" if isinstance(data, str): raise TypeError( "The stringprep module is not available. Usernames and " - "passwords must be instances of bytes.") + "passwords must be instances of bytes." + ) return data + else: HAVE_STRINGPREP = True import unicodedata @@ -43,7 +46,8 @@ def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) stringprep.in_table_c6, stringprep.in_table_c7, stringprep.in_table_c8, - stringprep.in_table_c9) + stringprep.in_table_c9, + ) def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) -> str: """An implementation of RFC4013 SASLprep. @@ -78,12 +82,12 @@ def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) in_table_c12 = stringprep.in_table_c12 in_table_b1 = stringprep.in_table_b1 data = "".join( - ["\u0020" if in_table_c12(elt) else elt - for elt in data if not in_table_b1(elt)]) + ["\u0020" if in_table_c12(elt) else elt for elt in data if not in_table_b1(elt)] + ) # RFC3454 section 2, step 2 - Normalize # RFC4013 section 2.2 normalization - data = unicodedata.ucd_3_2_0.normalize('NFKC', data) + data = unicodedata.ucd_3_2_0.normalize("NFKC", data) in_table_d1 = stringprep.in_table_d1 if in_table_d1(data[0]): @@ -104,7 +108,6 @@ def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) # RFC3454 section 2, step 3 and 4 - Prohibit and check bidi for char in data: if any(in_table(char) for in_table in prohibited): - raise ValueError( - "SASLprep: failed prohibited character check") + raise ValueError("SASLprep: failed prohibited character check") return data diff --git a/pymongo/server.py b/pymongo/server.py index 74093b05ed..be1e7da89c 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -23,12 +23,13 @@ from pymongo.response import PinnedResponse, Response from pymongo.server_type import SERVER_TYPE -_CURSOR_DOC_FIELDS = {'cursor': {'firstBatch': 1, 'nextBatch': 1}} +_CURSOR_DOC_FIELDS = {"cursor": {"firstBatch": 1, "nextBatch": 1}} class Server(object): - def __init__(self, server_description, pool, monitor, topology_id=None, - listeners=None, events=None): + def __init__( + self, server_description, pool, monitor, topology_id=None, listeners=None, events=None + ): """Represent one MongoDB server.""" self._description = server_description self._pool = pool @@ -60,8 +61,12 @@ def close(self): if self._publish: assert self._listener is not None assert self._events is not None - self._events.put((self._listener.publish_server_closed, - (self._description.address, self._topology_id))) + self._events.put( + ( + self._listener.publish_server_closed, + (self._description.address, self._topology_id), + ) + ) self._monitor.close() self._pool.reset_without_pause() @@ -69,8 +74,7 @@ def request_check(self): """Check the server's state soon.""" self._monitor.request_check() - def run_operation(self, sock_info, operation, read_preference, listeners, - unpack_res): + def run_operation(self, sock_info, operation, read_preference, listeners, unpack_res): """Run a _Query or _GetMore operation and return a Response object. This method is used only to run _Query/_GetMore operations from @@ -90,20 +94,18 @@ def run_operation(self, sock_info, operation, read_preference, listeners, start = datetime.now() use_cmd = operation.use_command(sock_info) - more_to_come = (operation.sock_mgr - and operation.sock_mgr.more_to_come) + more_to_come = operation.sock_mgr and operation.sock_mgr.more_to_come if more_to_come: request_id = 0 else: - message = operation.get_message( - read_preference, sock_info, use_cmd) + message = operation.get_message(read_preference, sock_info, use_cmd) request_id, data, max_doc_size = self._split_message(message) if publish: cmd, dbn = operation.as_command(sock_info) listeners.publish_command_start( - cmd, dbn, request_id, sock_info.address, - service_id=sock_info.service_id) + cmd, dbn, request_id, sock_info.address, service_id=sock_info.service_id + ) start = datetime.now() try: @@ -120,10 +122,13 @@ def run_operation(self, sock_info, operation, read_preference, listeners, else: user_fields = None legacy_response = True - docs = unpack_res(reply, operation.cursor_id, - operation.codec_options, - legacy_response=legacy_response, - user_fields=user_fields) + docs = unpack_res( + reply, + operation.cursor_id, + operation.codec_options, + legacy_response=legacy_response, + user_fields=user_fields, + ) if use_cmd: first = docs[0] operation.client._process_response(first, operation.session) @@ -136,9 +141,13 @@ def run_operation(self, sock_info, operation, read_preference, listeners, else: failure = _convert_exception(exc) listeners.publish_command_failure( - duration, failure, operation.name, - request_id, sock_info.address, - service_id=sock_info.service_id) + duration, + failure, + operation.name, + request_id, + sock_info.address, + service_id=sock_info.service_id, + ) raise if publish: @@ -150,25 +159,26 @@ def run_operation(self, sock_info, operation, read_preference, listeners, elif operation.name == "explain": res = docs[0] if docs else {} else: - res = {"cursor": {"id": reply.cursor_id, - "ns": operation.namespace()}, - "ok": 1} + res = {"cursor": {"id": reply.cursor_id, "ns": operation.namespace()}, "ok": 1} if operation.name == "find": res["cursor"]["firstBatch"] = docs else: res["cursor"]["nextBatch"] = docs listeners.publish_command_success( - duration, res, operation.name, request_id, - sock_info.address, service_id=sock_info.service_id) + duration, + res, + operation.name, + request_id, + sock_info.address, + service_id=sock_info.service_id, + ) # Decrypt response. client = operation.client if client and client._encrypter: if use_cmd: - decrypted = client._encrypter.decrypt( - reply.raw_command_response()) - docs = _decode_all_selective( - decrypted, operation.codec_options, user_fields) + decrypted = client._encrypter.decrypt(reply.raw_command_response()) + docs = _decode_all_selective(decrypted, operation.codec_options, user_fields) response: Response @@ -191,7 +201,8 @@ def run_operation(self, sock_info, operation, read_preference, listeners, request_id=request_id, from_command=use_cmd, docs=docs, - more_to_come=more_to_come) + more_to_come=more_to_come, + ) else: response = Response( data=reply, @@ -199,7 +210,8 @@ def run_operation(self, sock_info, operation, read_preference, listeners, duration=duration, request_id=request_id, from_command=use_cmd, - docs=docs) + docs=docs, + ) return response @@ -233,4 +245,4 @@ def _split_message(self, message): return request_id, data, 0 def __repr__(self): - return '<%s %r>' % (self.__class__.__name__, self._description) + return "<%s %r>" % (self.__class__.__name__, self._description) diff --git a/pymongo/server_api.py b/pymongo/server_api.py index 4a1b925ca9..110406366a 100644 --- a/pymongo/server_api.py +++ b/pymongo/server_api.py @@ -97,6 +97,7 @@ class ServerApiVersion: class ServerApi(object): """MongoDB Versioned API.""" + def __init__(self, version, strict=None, deprecation_errors=None): """Options to configure MongoDB Versioned API. @@ -116,12 +117,13 @@ def __init__(self, version, strict=None, deprecation_errors=None): if strict is not None and not isinstance(strict, bool): raise TypeError( "Wrong type for ServerApi strict, value must be an instance " - "of bool, not %s" % (type(strict),)) - if (deprecation_errors is not None and - not isinstance(deprecation_errors, bool)): + "of bool, not %s" % (type(strict),) + ) + if deprecation_errors is not None and not isinstance(deprecation_errors, bool): raise TypeError( "Wrong type for ServerApi deprecation_errors, value must be " - "an instance of bool, not %s" % (type(deprecation_errors),)) + "an instance of bool, not %s" % (type(deprecation_errors),) + ) self._version = version self._strict = strict self._deprecation_errors = deprecation_errors @@ -161,8 +163,8 @@ def _add_to_command(cmd, server_api): """ if not server_api: return - cmd['apiVersion'] = server_api.version + cmd["apiVersion"] = server_api.version if server_api.strict is not None: - cmd['apiStrict'] = server_api.strict + cmd["apiStrict"] = server_api.strict if server_api.deprecation_errors is not None: - cmd['apiDeprecationErrors'] = server_api.deprecation_errors + cmd["apiDeprecationErrors"] = server_api.deprecation_errors diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 0a9b799165..6b2a71df0b 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -35,13 +35,30 @@ class ServerDescription(object): """ __slots__ = ( - '_address', '_server_type', '_all_hosts', '_tags', '_replica_set_name', - '_primary', '_max_bson_size', '_max_message_size', - '_max_write_batch_size', '_min_wire_version', '_max_wire_version', - '_round_trip_time', '_me', '_is_writable', '_is_readable', - '_ls_timeout_minutes', '_error', '_set_version', '_election_id', - '_cluster_time', '_last_write_date', '_last_update_time', - '_topology_version') + "_address", + "_server_type", + "_all_hosts", + "_tags", + "_replica_set_name", + "_primary", + "_max_bson_size", + "_max_message_size", + "_max_write_batch_size", + "_min_wire_version", + "_max_wire_version", + "_round_trip_time", + "_me", + "_is_writable", + "_is_readable", + "_ls_timeout_minutes", + "_error", + "_set_version", + "_election_id", + "_cluster_time", + "_last_write_date", + "_last_update_time", + "_topology_version", + ) def __init__( self, @@ -76,9 +93,9 @@ def __init__( self._error = error self._topology_version = hello.topology_version if error: - details = getattr(error, 'details', None) + details = getattr(error, "details", None) if isinstance(details, dict): - self._topology_version = details.get('topologyVersion') + self._topology_version = details.get("topologyVersion") self._last_write_date: Optional[float] if hello.last_write_date: @@ -154,7 +171,7 @@ def election_id(self) -> Optional[ObjectId]: return self._election_id @property - def cluster_time(self)-> Optional[Mapping[str, Any]]: + def cluster_time(self) -> Optional[Mapping[str, Any]]: return self._cluster_time @property @@ -210,10 +227,10 @@ def is_server_type_known(self) -> bool: @property def retryable_writes_supported(self) -> bool: """Checks if this server supports retryable writes.""" - return (( - self._ls_timeout_minutes is not None and - self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary)) - or self._server_type == SERVER_TYPE.LoadBalancer) + return ( + self._ls_timeout_minutes is not None + and self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary) + ) or self._server_type == SERVER_TYPE.LoadBalancer @property def retryable_reads_supported(self) -> bool: @@ -224,27 +241,28 @@ def retryable_reads_supported(self) -> bool: def topology_version(self) -> Optional[Mapping[str, Any]]: return self._topology_version - def to_unknown(self, error: Optional[Exception] = None) -> "ServerDescription": + def to_unknown(self, error: Optional[Exception] = None) -> "ServerDescription": unknown = ServerDescription(self.address, error=error) unknown._topology_version = self.topology_version return unknown def __eq__(self, other: Any) -> bool: if isinstance(other, ServerDescription): - return ((self._address == other.address) and - (self._server_type == other.server_type) and - (self._min_wire_version == other.min_wire_version) and - (self._max_wire_version == other.max_wire_version) and - (self._me == other.me) and - (self._all_hosts == other.all_hosts) and - (self._tags == other.tags) and - (self._replica_set_name == other.replica_set_name) and - (self._set_version == other.set_version) and - (self._election_id == other.election_id) and - (self._primary == other.primary) and - (self._ls_timeout_minutes == - other.logical_session_timeout_minutes) and - (self._error == other.error)) + return ( + (self._address == other.address) + and (self._server_type == other.server_type) + and (self._min_wire_version == other.min_wire_version) + and (self._max_wire_version == other.max_wire_version) + and (self._me == other.me) + and (self._all_hosts == other.all_hosts) + and (self._tags == other.tags) + and (self._replica_set_name == other.replica_set_name) + and (self._set_version == other.set_version) + and (self._election_id == other.election_id) + and (self._primary == other.primary) + and (self._ls_timeout_minutes == other.logical_session_timeout_minutes) + and (self._error == other.error) + ) return NotImplemented @@ -252,12 +270,16 @@ def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self): - errmsg = '' + errmsg = "" if self.error: - errmsg = ', error=%r' % (self.error,) + errmsg = ", error=%r" % (self.error,) return "<%s %s server_type: %s, rtt: %s%s>" % ( - self.__class__.__name__, self.address, self.server_type_name, - self.round_trip_time, errmsg) + self.__class__.__name__, + self.address, + self.server_type_name, + self.round_trip_time, + errmsg, + ) # For unittesting only. Use under no circumstances! _host_to_round_trip_time: Dict = {} diff --git a/pymongo/server_selectors.py b/pymongo/server_selectors.py index cc18450ad8..313566cb83 100644 --- a/pymongo/server_selectors.py +++ b/pymongo/server_selectors.py @@ -29,32 +29,28 @@ def from_topology_description(cls, topology_description): primary = sd break - return Selection(topology_description, - topology_description.known_servers, - topology_description.common_wire_version, - primary) - - def __init__(self, - topology_description, - server_descriptions, - common_wire_version, - primary): + return Selection( + topology_description, + topology_description.known_servers, + topology_description.common_wire_version, + primary, + ) + + def __init__(self, topology_description, server_descriptions, common_wire_version, primary): self.topology_description = topology_description self.server_descriptions = server_descriptions self.primary = primary self.common_wire_version = common_wire_version def with_server_descriptions(self, server_descriptions): - return Selection(self.topology_description, - server_descriptions, - self.common_wire_version, - self.primary) + return Selection( + self.topology_description, server_descriptions, self.common_wire_version, self.primary + ) def secondary_with_max_last_write_date(self): secondaries = secondary_server_selector(self) if secondaries.server_descriptions: - return max(secondaries.server_descriptions, - key=lambda sd: sd.last_write_date) + return max(secondaries.server_descriptions, key=lambda sd: sd.last_write_date) @property def primary_selection(self): @@ -82,30 +78,31 @@ def any_server_selector(selection): def readable_server_selector(selection): return selection.with_server_descriptions( - [s for s in selection.server_descriptions if s.is_readable]) + [s for s in selection.server_descriptions if s.is_readable] + ) def writable_server_selector(selection): return selection.with_server_descriptions( - [s for s in selection.server_descriptions if s.is_writable]) + [s for s in selection.server_descriptions if s.is_writable] + ) def secondary_server_selector(selection): return selection.with_server_descriptions( - [s for s in selection.server_descriptions - if s.server_type == SERVER_TYPE.RSSecondary]) + [s for s in selection.server_descriptions if s.server_type == SERVER_TYPE.RSSecondary] + ) def arbiter_server_selector(selection): return selection.with_server_descriptions( - [s for s in selection.server_descriptions - if s.server_type == SERVER_TYPE.RSArbiter]) + [s for s in selection.server_descriptions if s.server_type == SERVER_TYPE.RSArbiter] + ) def writable_preferred_server_selector(selection): """Like PrimaryPreferred but doesn't use tags or latency.""" - return (writable_server_selector(selection) or - secondary_server_selector(selection)) + return writable_server_selector(selection) or secondary_server_selector(selection) def apply_single_tag_set(tag_set, selection): @@ -116,6 +113,7 @@ def apply_single_tag_set(tag_set, selection): The empty tag set {} matches any server. """ + def tags_match(server_tags): for key, value in tag_set.items(): if key not in server_tags or server_tags[key] != value: @@ -124,7 +122,8 @@ def tags_match(server_tags): return True return selection.with_server_descriptions( - [s for s in selection.server_descriptions if tags_match(s.tags)]) + [s for s in selection.server_descriptions if tags_match(s.tags)] + ) def apply_tag_sets(tag_sets, selection): diff --git a/pymongo/settings.py b/pymongo/settings.py index d17b5e8b86..2bd2527cdf 100644 --- a/pymongo/settings.py +++ b/pymongo/settings.py @@ -27,32 +27,35 @@ class TopologySettings(object): - def __init__(self, - seeds=None, - replica_set_name=None, - pool_class=None, - pool_options=None, - monitor_class=None, - condition_class=None, - local_threshold_ms=LOCAL_THRESHOLD_MS, - server_selection_timeout=SERVER_SELECTION_TIMEOUT, - heartbeat_frequency=common.HEARTBEAT_FREQUENCY, - server_selector=None, - fqdn=None, - direct_connection=False, - load_balanced=None, - srv_service_name=common.SRV_SERVICE_NAME, - srv_max_hosts=0): + def __init__( + self, + seeds=None, + replica_set_name=None, + pool_class=None, + pool_options=None, + monitor_class=None, + condition_class=None, + local_threshold_ms=LOCAL_THRESHOLD_MS, + server_selection_timeout=SERVER_SELECTION_TIMEOUT, + heartbeat_frequency=common.HEARTBEAT_FREQUENCY, + server_selector=None, + fqdn=None, + direct_connection=False, + load_balanced=None, + srv_service_name=common.SRV_SERVICE_NAME, + srv_max_hosts=0, + ): """Represent MongoClient's configuration. Take a list of (host, port) pairs and optional replica set name. """ if heartbeat_frequency < common.MIN_HEARTBEAT_INTERVAL: raise ConfigurationError( - "heartbeatFrequencyMS cannot be less than %d" % ( - common.MIN_HEARTBEAT_INTERVAL * 1000,)) + "heartbeatFrequencyMS cannot be less than %d" + % (common.MIN_HEARTBEAT_INTERVAL * 1000,) + ) - self._seeds = seeds or [('localhost', 27017)] + self._seeds = seeds or [("localhost", 27017)] self._replica_set_name = replica_set_name self._pool_class = pool_class or pool.Pool self._pool_options = pool_options or PoolOptions() @@ -71,7 +74,7 @@ def __init__(self, self._topology_id = ObjectId() # Store the allocation traceback to catch unclosed clients in the # test suite. - self._stack = ''.join(traceback.format_stack()) + self._stack = "".join(traceback.format_stack()) @property def seeds(self): @@ -153,6 +156,4 @@ def get_topology_type(self): def get_server_descriptions(self): """Initial dict of (address, ServerDescription) for all seeds.""" - return dict([ - (address, ServerDescription(address)) - for address in self.seeds]) + return dict([(address, ServerDescription(address)) for address in self.seeds]) diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py index 42db7b9373..70c12f0699 100644 --- a/pymongo/socket_checker.py +++ b/pymongo/socket_checker.py @@ -21,12 +21,12 @@ # PYTHON-2320: Jython does not fully support poll on SSL sockets, # https://bugs.jython.org/issue2900 -_HAVE_POLL = hasattr(select, "poll") and not sys.platform.startswith('java') +_HAVE_POLL = hasattr(select, "poll") and not sys.platform.startswith("java") _SelectError = getattr(select, "error", OSError) def _errno_from_exception(exc): - if hasattr(exc, 'errno'): + if hasattr(exc, "errno"): return exc.errno if exc.args: return exc.args[0] @@ -34,7 +34,6 @@ def _errno_from_exception(exc): class SocketChecker(object): - def __init__(self) -> None: self._poller: Optional[select.poll] if _HAVE_POLL: @@ -42,7 +41,9 @@ def __init__(self) -> None: else: self._poller = None - def select(self, sock: Any, read: bool = False, write: bool = False, timeout: Optional[float] = 0) -> bool: + def select( + self, sock: Any, read: bool = False, write: bool = False, timeout: Optional[float] = 0 + ) -> bool: """Select for reads or writes with a timeout in seconds (or None). Returns True if the socket is readable/writable, False on timeout. @@ -83,8 +84,7 @@ def select(self, sock: Any, read: bool = False, write: bool = False, timeout: Op raise def socket_closed(self, sock: Any) -> bool: - """Return True if we know socket has been closed, False otherwise. - """ + """Return True if we know socket has been closed, False otherwise.""" try: return self.select(sock, read=True) except (RuntimeError, KeyError): diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index 989e79131c..fe2dd49aa0 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -19,6 +19,7 @@ try: from dns import resolver + _HAVE_DNSPYTHON = True except ImportError: _HAVE_DNSPYTHON = False @@ -37,19 +38,21 @@ def maybe_decode(text): # PYTHON-2667 Lazily call dns.resolver methods for compatibility with eventlet. def _resolve(*args, **kwargs): - if hasattr(resolver, 'resolve'): + if hasattr(resolver, "resolve"): # dnspython >= 2 return resolver.resolve(*args, **kwargs) # dnspython 1.X return resolver.query(*args, **kwargs) + _INVALID_HOST_MSG = ( "Invalid URI host: %s is not a valid hostname for 'mongodb+srv://'. " - "Did you mean to use 'mongodb://'?") + "Did you mean to use 'mongodb://'?" +) + class _SrvResolver(object): - def __init__(self, fqdn, - connect_timeout, srv_service_name, srv_max_hosts=0): + def __init__(self, fqdn, connect_timeout, srv_service_name, srv_max_hosts=0): self.__fqdn = fqdn self.__srv = srv_service_name self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT @@ -71,23 +74,21 @@ def __init__(self, fqdn, def get_options(self): try: - results = _resolve(self.__fqdn, 'TXT', - lifetime=self.__connect_timeout) + results = _resolve(self.__fqdn, "TXT", lifetime=self.__connect_timeout) except (resolver.NoAnswer, resolver.NXDOMAIN): # No TXT records return None except Exception as exc: raise ConfigurationError(str(exc)) if len(results) > 1: - raise ConfigurationError('Only one TXT record is supported') - return ( - b'&'.join([b''.join(res.strings) for res in results])).decode( - 'utf-8') + raise ConfigurationError("Only one TXT record is supported") + return (b"&".join([b"".join(res.strings) for res in results])).decode("utf-8") def _resolve_uri(self, encapsulate_errors): try: - results = _resolve('_' + self.__srv + '._tcp.' + self.__fqdn, - 'SRV', lifetime=self.__connect_timeout) + results = _resolve( + "_" + self.__srv + "._tcp." + self.__fqdn, "SRV", lifetime=self.__connect_timeout + ) except Exception as exc: if not encapsulate_errors: # Raise the original error. @@ -101,13 +102,13 @@ def _get_srv_response_and_hosts(self, encapsulate_errors): # Construct address tuples nodes = [ - (maybe_decode(res.target.to_text(omit_final_dot=True)), res.port) - for res in results] + (maybe_decode(res.target.to_text(omit_final_dot=True)), res.port) for res in results + ] # Validate hosts for node in nodes: try: - nlist = node[0].split(".")[1:][-self.__slen:] + nlist = node[0].split(".")[1:][-self.__slen :] except Exception: raise ConfigurationError("Invalid SRV host: %s" % (node[0],)) if self.__plist != nlist: diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index b3428197b7..7b5417fefa 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -36,13 +36,20 @@ # import the ssl module even if we're only using it for this purpose. import ssl as _stdlibssl from ssl import CERT_NONE, CERT_REQUIRED + HAS_SNI = _ssl.HAS_SNI IPADDR_SAFE = _ssl.IS_PYOPENSSL or sys.version_info[:2] >= (3, 7) SSLError = _ssl.SSLError - def get_ssl_context(certfile, passphrase, ca_certs, crlfile, - allow_invalid_certificates, allow_invalid_hostnames, - disable_ocsp_endpoint_check): + def get_ssl_context( + certfile, + passphrase, + ca_certs, + crlfile, + allow_invalid_certificates, + allow_invalid_hostnames, + disable_ocsp_endpoint_check, + ): """Create and return an SSLContext object.""" verify_mode = CERT_NONE if allow_invalid_certificates else CERT_REQUIRED ctx = _ssl.SSLContext(_ssl.PROTOCOL_SSLv23) @@ -67,14 +74,12 @@ def get_ssl_context(certfile, passphrase, ca_certs, crlfile, try: ctx.load_cert_chain(certfile, None, passphrase) except _ssl.SSLError as exc: - raise ConfigurationError( - "Private key doesn't match certificate: %s" % (exc,)) + raise ConfigurationError("Private key doesn't match certificate: %s" % (exc,)) if crlfile is not None: if _ssl.IS_PYOPENSSL: - raise ConfigurationError( - "tlsCRLFile cannot be used with PyOpenSSL") + raise ConfigurationError("tlsCRLFile cannot be used with PyOpenSSL") # Match the server's behavior. - setattr(ctx, 'verify_flags', getattr(_ssl, "VERIFY_CRL_CHECK_LEAF", 0)) + setattr(ctx, "verify_flags", getattr(_ssl, "VERIFY_CRL_CHECK_LEAF", 0)) ctx.load_verify_locations(crlfile) if ca_certs is not None: ctx.load_verify_locations(ca_certs) @@ -82,9 +87,12 @@ def get_ssl_context(certfile, passphrase, ca_certs, crlfile, ctx.load_default_certs() ctx.verify_mode = verify_mode return ctx + else: + class SSLError(Exception): # type: ignore pass + HAS_SNI = False IPADDR_SAFE = False diff --git a/pymongo/topology.py b/pymongo/topology.py index b2d31ed314..6134b8201b 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -25,23 +25,37 @@ from pymongo import common, helpers, periodic_executor from pymongo.client_session import _ServerSessionPool -from pymongo.errors import (ConfigurationError, ConnectionFailure, - InvalidOperation, NetworkTimeout, NotPrimaryError, - OperationFailure, PyMongoError, - ServerSelectionTimeoutError, WriteError) +from pymongo.errors import ( + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NetworkTimeout, + NotPrimaryError, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, + WriteError, +) from pymongo.hello import Hello from pymongo.monitor import SrvMonitor from pymongo.pool import PoolOptions from pymongo.server import Server from pymongo.server_description import ServerDescription -from pymongo.server_selectors import (Selection, any_server_selector, - arbiter_server_selector, - readable_server_selector, - secondary_server_selector, - writable_server_selector) +from pymongo.server_selectors import ( + Selection, + any_server_selector, + arbiter_server_selector, + readable_server_selector, + secondary_server_selector, + writable_server_selector, +) from pymongo.topology_description import ( - SRV_POLLING_TOPOLOGIES, TOPOLOGY_TYPE, TopologyDescription, - _updated_topology_description_srv_polling, updated_topology_description) + SRV_POLLING_TOPOLOGIES, + TOPOLOGY_TYPE, + TopologyDescription, + _updated_topology_description_srv_polling, + updated_topology_description, +) def process_events_queue(queue_ref): @@ -63,6 +77,7 @@ def process_events_queue(queue_ref): class Topology(object): """Monitor a topology of one or more servers.""" + def __init__(self, topology_settings): self._topology_id = topology_settings._topology_id self._listeners = topology_settings._pool_options._event_listeners @@ -79,8 +94,7 @@ def __init__(self, topology_settings): if self._publish_tp: assert self._events is not None - self._events.put((self._listeners.publish_topology_opened, - (self._topology_id,))) + self._events.put((self._listeners.publish_topology_opened, (self._topology_id,))) self._settings = topology_settings topology_description = TopologyDescription( topology_settings.get_topology_type(), @@ -88,22 +102,26 @@ def __init__(self, topology_settings): topology_settings.replica_set_name, None, None, - topology_settings) + topology_settings, + ) self._description = topology_description if self._publish_tp: assert self._events is not None - initial_td = TopologyDescription(TOPOLOGY_TYPE.Unknown, {}, None, - None, None, self._settings) - self._events.put(( - self._listeners.publish_topology_description_changed, - (initial_td, self._description, self._topology_id))) + initial_td = TopologyDescription( + TOPOLOGY_TYPE.Unknown, {}, None, None, None, self._settings + ) + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (initial_td, self._description, self._topology_id), + ) + ) for seed in topology_settings.seeds: if self._publish_server: assert self._events is not None - self._events.put((self._listeners.publish_server_opened, - (seed, self._topology_id))) + self._events.put((self._listeners.publish_server_opened, (seed, self._topology_id))) # Store the seed list to help diagnose errors in _error_message(). self._seed_addresses = list(topology_description.server_descriptions()) @@ -117,6 +135,7 @@ def __init__(self, topology_settings): self._session_pool = _ServerSessionPool() if self._publish_server or self._publish_tp: + def target(): return process_events_queue(weak) @@ -124,7 +143,8 @@ def target(): interval=common.EVENTS_QUEUE_FREQUENCY, min_interval=common.MIN_HEARTBEAT_INTERVAL, target=target, - name="pymongo_events_thread") + name="pymongo_events_thread", + ) # We strongly reference the executor and it weakly references # the queue via this closure. When the topology is freed, stop @@ -134,8 +154,7 @@ def target(): executor.open() self._srv_monitor = None - if (self._settings.fqdn is not None and - not self._settings.load_balanced): + if self._settings.fqdn is not None and not self._settings.load_balanced: self._srv_monitor = SrvMonitor(self, self._settings) def open(self): @@ -158,7 +177,8 @@ def open(self): "MongoClient opened before fork. Create MongoClient only " "after forking. See PyMongo's documentation for details: " "https://pymongo.readthedocs.io/en/stable/faq.html#" - "is-pymongo-fork-safe") + "is-pymongo-fork-safe" + ) with self._lock: # Reset the session pool to avoid duplicate sessions in # the child process. @@ -167,10 +187,7 @@ def open(self): with self._lock: self._ensure_opened() - def select_servers(self, - selector, - server_selection_timeout=None, - address=None): + def select_servers(self, selector, server_selection_timeout=None, address=None): """Return a list of Servers matching selector, or time out. :Parameters: @@ -192,25 +209,25 @@ def select_servers(self, server_timeout = server_selection_timeout with self._lock: - server_descriptions = self._select_servers_loop( - selector, server_timeout, address) + server_descriptions = self._select_servers_loop(selector, server_timeout, address) - return [self.get_server_by_address(sd.address) - for sd in server_descriptions] + return [self.get_server_by_address(sd.address) for sd in server_descriptions] def _select_servers_loop(self, selector, timeout, address): """select_servers() guts. Hold the lock when calling this.""" now = time.monotonic() end_time = now + timeout server_descriptions = self._description.apply_selector( - selector, address, custom_selector=self._settings.server_selector) + selector, address, custom_selector=self._settings.server_selector + ) while not server_descriptions: # No suitable servers. if timeout == 0 or now > end_time: raise ServerSelectionTimeoutError( - "%s, Timeout: %ss, Topology Description: %r" % - (self._error_message(selector), timeout, self.description)) + "%s, Timeout: %ss, Topology Description: %r" + % (self._error_message(selector), timeout, self.description) + ) self._ensure_opened() self._request_check_all() @@ -223,19 +240,15 @@ def _select_servers_loop(self, selector, timeout, address): self._description.check_compatible() now = time.monotonic() server_descriptions = self._description.apply_selector( - selector, address, - custom_selector=self._settings.server_selector) + selector, address, custom_selector=self._settings.server_selector + ) self._description.check_compatible() return server_descriptions - def select_server(self, - selector, - server_selection_timeout=None, - address=None): + def select_server(self, selector, server_selection_timeout=None, address=None): """Like select_servers, but choose a random server if several match.""" - servers = self.select_servers( - selector, server_selection_timeout, address) + servers = self.select_servers(selector, server_selection_timeout, address) if len(servers) == 1: return servers[0] server1, server2 = random.sample(servers, 2) @@ -244,8 +257,7 @@ def select_server(self, else: return server2 - def select_server_by_address(self, address, - server_selection_timeout=None): + def select_server_by_address(self, address, server_selection_timeout=None): """Return a Server for "address", reconnecting if necessary. If the server's type is not known, request an immediate check of all @@ -263,9 +275,7 @@ def select_server_by_address(self, address, Raises exc:`ServerSelectionTimeoutError` after `server_selection_timeout` if no matching servers are found. """ - return self.select_server(any_server_selector, - server_selection_timeout, - address) + return self.select_server(any_server_selector, server_selection_timeout, address) def _process_change(self, server_description, reset_pool=False): """Process a new ServerDescription on an opened topology. @@ -278,24 +288,24 @@ def _process_change(self, server_description, reset_pool=False): # This is a stale hello response. Ignore it. return - new_td = updated_topology_description( - self._description, server_description) + new_td = updated_topology_description(self._description, server_description) # CMAP: Ensure the pool is "ready" when the server is selectable. - if (server_description.is_readable - or (server_description.is_server_type_known and - new_td.topology_type == TOPOLOGY_TYPE.Single)): + if server_description.is_readable or ( + server_description.is_server_type_known and new_td.topology_type == TOPOLOGY_TYPE.Single + ): server = self._servers.get(server_description.address) if server: server.pool.ready() - suppress_event = ((self._publish_server or self._publish_tp) - and sd_old == server_description) + suppress_event = (self._publish_server or self._publish_tp) and sd_old == server_description if self._publish_server and not suppress_event: assert self._events is not None - self._events.put(( - self._listeners.publish_server_description_changed, - (sd_old, server_description, - server_description.address, self._topology_id))) + self._events.put( + ( + self._listeners.publish_server_description_changed, + (sd_old, server_description, server_description.address, self._topology_id), + ) + ) self._description = new_td self._update_servers() @@ -303,16 +313,20 @@ def _process_change(self, server_description, reset_pool=False): if self._publish_tp and not suppress_event: assert self._events is not None - self._events.put(( - self._listeners.publish_topology_description_changed, - (td_old, self._description, self._topology_id))) + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (td_old, self._description, self._topology_id), + ) + ) # Shutdown SRV polling for unsupported cluster types. # This is only applicable if the old topology was Unknown, and the # new one is something other than Unknown or Sharded. - if self._srv_monitor and (td_old.topology_type == TOPOLOGY_TYPE.Unknown - and self._description.topology_type not in - SRV_POLLING_TOPOLOGIES): + if self._srv_monitor and ( + td_old.topology_type == TOPOLOGY_TYPE.Unknown + and self._description.topology_type not in SRV_POLLING_TOPOLOGIES + ): self._srv_monitor.close() # Clear the pool from a failed heartbeat. @@ -336,8 +350,7 @@ def on_change(self, server_description, reset_pool=False): # once. Check if it's still in the description or if some state- # change removed it. E.g., we got a host list from the primary # that didn't include this server. - if (self._opened and - self._description.has_server(server_description.address)): + if self._opened and self._description.has_server(server_description.address): self._process_change(server_description, reset_pool) def _process_srv_update(self, seedlist): @@ -345,16 +358,18 @@ def _process_srv_update(self, seedlist): Hold the lock when calling this. """ td_old = self._description - self._description = _updated_topology_description_srv_polling( - self._description, seedlist) + self._description = _updated_topology_description_srv_polling(self._description, seedlist) self._update_servers() if self._publish_tp: assert self._events is not None - self._events.put(( - self._listeners.publish_topology_description_changed, - (td_old, self._description, self._topology_id))) + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (td_old, self._description, self._topology_id), + ) + ) def on_srv_update(self, seedlist): """Process a new list of nodes obtained from scanning SRV records.""" @@ -391,8 +406,10 @@ def _get_replica_set_members(self, selector): # Implemented here in Topology instead of MongoClient, so it can lock. with self._lock: topology_type = self._description.topology_type - if topology_type not in (TOPOLOGY_TYPE.ReplicaSetWithPrimary, - TOPOLOGY_TYPE.ReplicaSetNoPrimary): + if topology_type not in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.ReplicaSetNoPrimary, + ): return set() return set([sd.address for sd in selector(self._new_selection())]) @@ -418,9 +435,10 @@ def _receive_cluster_time_no_lock(self, cluster_time): # value of the clusterTime embedded field." if cluster_time: # ">" uses bson.timestamp.Timestamp's comparison operator. - if (not self._max_cluster_time - or cluster_time['clusterTime'] > - self._max_cluster_time['clusterTime']): + if ( + not self._max_cluster_time + or cluster_time["clusterTime"] > self._max_cluster_time["clusterTime"] + ): self._max_cluster_time = cluster_time def receive_cluster_time(self, cluster_time): @@ -449,8 +467,7 @@ def update_pool(self): # Only update pools for data-bearing servers. for sd in self.data_bearing_servers(): server = self._servers[sd.address] - servers.append((server, - server.pool.gen.get_overall())) + servers.append((server, server.pool.gen.get_overall())) for server, generation in servers: try: @@ -463,7 +480,7 @@ def update_pool(self): def close(self): """Clear pools and terminate monitors. Topology does not reopen on demand. Any further operations will raise - :exc:`~.errors.InvalidOperation`. """ + :exc:`~.errors.InvalidOperation`.""" with self._lock: for server in self._servers.values(): server.close() @@ -484,8 +501,7 @@ def close(self): # Publish only after releasing the lock. if self._publish_tp: assert self._events is not None - self._events.put((self._listeners.publish_topology_closed, - (self._topology_id,))) + self._events.put((self._listeners.publish_topology_closed, (self._topology_id,))) if self._publish_server or self._publish_tp: self.__events_executor.close() @@ -506,19 +522,16 @@ def _check_session_support(self): if self._description.topology_type == TOPOLOGY_TYPE.Single: if not self._description.has_known_servers: self._select_servers_loop( - any_server_selector, - self._settings.server_selection_timeout, - None) + any_server_selector, self._settings.server_selection_timeout, None + ) elif not self._description.readable_servers: self._select_servers_loop( - readable_server_selector, - self._settings.server_selection_timeout, - None) + readable_server_selector, self._settings.server_selection_timeout, None + ) session_timeout = self._description.logical_session_timeout_minutes if session_timeout is None: - raise ConfigurationError( - "Sessions are not supported by this MongoDB deployment") + raise ConfigurationError("Sessions are not supported by this MongoDB deployment") return session_timeout def get_server_session(self): @@ -529,15 +542,15 @@ def get_server_session(self): session_timeout = self._check_session_support() else: # Sessions never time out in load balanced mode. - session_timeout = float('inf') + session_timeout = float("inf") return self._session_pool.get_server_session(session_timeout) def return_server_session(self, server_session, lock): if lock: with self._lock: self._session_pool.return_server_session( - server_session, - self._description.logical_session_timeout_minutes) + server_session, self._description.logical_session_timeout_minutes + ) else: # Called from a __del__ method, can't use a lock. self._session_pool.return_server_session_no_lock(server_session) @@ -566,16 +579,17 @@ def _ensure_opened(self): self.__events_executor.open() # Start the SRV polling thread. - if self._srv_monitor and (self.description.topology_type in - SRV_POLLING_TOPOLOGIES): + if self._srv_monitor and (self.description.topology_type in SRV_POLLING_TOPOLOGIES): self._srv_monitor.open() if self._settings.load_balanced: # Emit initial SDAM events for load balancer mode. - self._process_change(ServerDescription( - self._seed_addresses[0], - Hello({'ok': 1, 'serviceId': self._topology_id, - 'maxWireVersion': 13}))) + self._process_change( + ServerDescription( + self._seed_addresses[0], + Hello({"ok": 1, "serviceId": self._topology_id, "maxWireVersion": 13}), + ) + ) # Ensure that the monitors are open. for server in self._servers.values(): @@ -587,8 +601,7 @@ def _is_stale_error(self, address, err_ctx): # Another thread removed this server from the topology. return True - if server._pool.stale_generation( - err_ctx.sock_generation, err_ctx.service_id): + if server._pool.stale_generation(err_ctx.sock_generation, err_ctx.service_id): # This is an outdated error from a previous pool version. return True @@ -596,9 +609,9 @@ def _is_stale_error(self, address, err_ctx): cur_tv = server.description.topology_version error = err_ctx.error error_tv = None - if error and hasattr(error, 'details'): + if error and hasattr(error, "details"): if isinstance(error.details, dict): - error_tv = error.details.get('topologyVersion') + error_tv = error.details.get("topologyVersion") return _is_stale_error_topology_version(cur_tv, error_tv) @@ -610,8 +623,7 @@ def _handle_error(self, address, err_ctx): error = err_ctx.error exc_type = type(error) service_id = err_ctx.service_id - if (issubclass(exc_type, NetworkTimeout) and - err_ctx.completed_handshake): + if issubclass(exc_type, NetworkTimeout) and err_ctx.completed_handshake: # The socket has been closed. Don't reset the server. # Server Discovery And Monitoring Spec: "When an application # operation fails because of any network error besides a socket @@ -629,12 +641,12 @@ def _handle_error(self, address, err_ctx): # as Unknown and request an immediate check of the server. # Otherwise, we clear the connection pool, mark the server as # Unknown and request an immediate check of the server. - if hasattr(error, 'code'): + if hasattr(error, "code"): err_code = error.code else: # Default error code if one does not exist. default = 10107 if isinstance(error, NotPrimaryError) else None - err_code = error.details.get('code', default) + err_code = error.details.get("code", default) if err_code in helpers._NOT_PRIMARY_CODES: is_shutting_down = err_code in helpers._SHUTDOWN_CODES # Mark server Unknown, clear the pool, and request check. @@ -687,7 +699,8 @@ def _update_servers(self): server_description=sd, topology=self, pool=self._create_pool_for_monitor(address), - topology_settings=self._settings) + topology_settings=self._settings, + ) weak = None if self._publish_server: @@ -698,7 +711,8 @@ def _update_servers(self): monitor=monitor, topology_id=self._topology_id, listeners=self._listeners, - events=weak) + events=weak, + ) self._servers[address] = server server.open() @@ -709,8 +723,7 @@ def _update_servers(self): self._servers[address].description = sd # Update is_writable value of the pool, if it changed. if was_writable != sd.is_writable: - self._servers[address].pool.update_is_writable( - sd.is_writable) + self._servers[address].pool.update_is_writable(sd.is_writable) for address, server in list(self._servers.items()): if not self._description.has_server(address): @@ -738,8 +751,7 @@ def _create_pool_for_monitor(self, address): server_api=options.server_api, ) - return self._settings.pool_class(address, monitor_pool_options, - handshake=False) + return self._settings.pool_class(address, monitor_pool_options, handshake=False) def _error_message(self, selector): """Format an error message if server selection fails. @@ -748,22 +760,23 @@ def _error_message(self, selector): """ is_replica_set = self._description.topology_type in ( TOPOLOGY_TYPE.ReplicaSetWithPrimary, - TOPOLOGY_TYPE.ReplicaSetNoPrimary) + TOPOLOGY_TYPE.ReplicaSetNoPrimary, + ) if is_replica_set: - server_plural = 'replica set members' + server_plural = "replica set members" elif self._description.topology_type == TOPOLOGY_TYPE.Sharded: - server_plural = 'mongoses' + server_plural = "mongoses" else: - server_plural = 'servers' + server_plural = "servers" if self._description.known_servers: # We've connected, but no servers match the selector. if selector is writable_server_selector: if is_replica_set: - return 'No primary available for writes' + return "No primary available for writes" else: - return 'No %s available for writes' % server_plural + return "No %s available for writes" % server_plural else: return 'No %s match selector "%s"' % (server_plural, selector) else: @@ -773,9 +786,11 @@ def _error_message(self, selector): if is_replica_set: # We removed all servers because of the wrong setName? return 'No %s available for replica set name "%s"' % ( - server_plural, self._settings.replica_set_name) + server_plural, + self._settings.replica_set_name, + ) else: - return 'No %s available' % server_plural + return "No %s available" % server_plural # 1 or more servers, all Unknown. Are they unknown for one reason? error = servers[0].error @@ -783,32 +798,29 @@ def _error_message(self, selector): if same: if error is None: # We're still discovering. - return 'No %s found yet' % server_plural + return "No %s found yet" % server_plural - if (is_replica_set and not - set(addresses).intersection(self._seed_addresses)): + if is_replica_set and not set(addresses).intersection(self._seed_addresses): # We replaced our seeds with new hosts but can't reach any. return ( - 'Could not reach any servers in %s. Replica set is' - ' configured with internal hostnames or IPs?' % - addresses) + "Could not reach any servers in %s. Replica set is" + " configured with internal hostnames or IPs?" % addresses + ) return str(error) else: - return ','.join(str(server.error) for server in servers - if server.error) + return ",".join(str(server.error) for server in servers if server.error) def __repr__(self): - msg = '' + msg = "" if not self._opened: - msg = 'CLOSED ' - return '<%s %s%r>' % (self.__class__.__name__, msg, self._description) + msg = "CLOSED " + return "<%s %s%r>" % (self.__class__.__name__, msg, self._description) def eq_props(self): """The properties to use for MongoClient/Topology equality checks.""" ts = self._settings - return (tuple(sorted(ts.seeds)), ts.replica_set_name, ts.fqdn, - ts.srv_service_name) + return (tuple(sorted(ts.seeds)), ts.replica_set_name, ts.fqdn, ts.srv_service_name) def __eq__(self, other): if isinstance(other, self.__class__): @@ -821,8 +833,8 @@ def __hash__(self): class _ErrorContext(object): """An error with context for SDAM error handling.""" - def __init__(self, error, max_wire_version, sock_generation, - completed_handshake, service_id): + + def __init__(self, error, max_wire_version, sock_generation, completed_handshake, service_id): self.error = error self.max_wire_version = max_wire_version self.sock_generation = sock_generation @@ -834,9 +846,9 @@ def _is_stale_error_topology_version(current_tv, error_tv): """Return True if the error's topologyVersion is <= current.""" if current_tv is None or error_tv is None: return False - if current_tv['processId'] != error_tv['processId']: + if current_tv["processId"] != error_tv["processId"]: return False - return current_tv['counter'] >= error_tv['counter'] + return current_tv["counter"] >= error_tv["counter"] def _is_stale_server_description(current_sd, new_sd): @@ -844,6 +856,6 @@ def _is_stale_server_description(current_sd, new_sd): current_tv, new_tv = current_sd.topology_version, new_sd.topology_version if current_tv is None or new_tv is None: return False - if current_tv['processId'] != new_tv['processId']: + if current_tv["processId"] != new_tv["processId"]: return False - return current_tv['counter'] > new_tv['counter'] + return current_tv["counter"] > new_tv["counter"] diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 241ef5afbe..b3dd60680f 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -91,12 +91,12 @@ def __init__( readable_servers = self.readable_servers if not readable_servers: self._ls_timeout_minutes = None - elif any(s.logical_session_timeout_minutes is None - for s in readable_servers): + elif any(s.logical_session_timeout_minutes is None for s in readable_servers): self._ls_timeout_minutes = None else: - self._ls_timeout_minutes = min(s.logical_session_timeout_minutes # type: ignore - for s in readable_servers) + self._ls_timeout_minutes = min( # type: ignore[type-var] + s.logical_session_timeout_minutes for s in readable_servers + ) def _init_incompatible_err(self): """Internal compatibility check for non-load balanced topologies.""" @@ -109,28 +109,39 @@ def _init_incompatible_err(self): server_too_new = ( # Server too new. s.min_wire_version is not None - and s.min_wire_version > common.MAX_SUPPORTED_WIRE_VERSION) + and s.min_wire_version > common.MAX_SUPPORTED_WIRE_VERSION + ) server_too_old = ( # Server too old. s.max_wire_version is not None - and s.max_wire_version < common.MIN_SUPPORTED_WIRE_VERSION) + and s.max_wire_version < common.MIN_SUPPORTED_WIRE_VERSION + ) if server_too_new: self._incompatible_err = ( "Server at %s:%d requires wire version %d, but this " # type: ignore "version of PyMongo only supports up to %d." - % (s.address[0], s.address[1] or 0, - s.min_wire_version, common.MAX_SUPPORTED_WIRE_VERSION)) + % ( + s.address[0], + s.address[1] or 0, + s.min_wire_version, + common.MAX_SUPPORTED_WIRE_VERSION, + ) + ) elif server_too_old: self._incompatible_err = ( "Server at %s:%d reports wire version %d, but this " # type: ignore "version of PyMongo requires at least %d (MongoDB %s)." - % (s.address[0], s.address[1] or 0, - s.max_wire_version, - common.MIN_SUPPORTED_WIRE_VERSION, - common.MIN_SUPPORTED_SERVER_VERSION)) + % ( + s.address[0], + s.address[1] or 0, + s.max_wire_version, + common.MIN_SUPPORTED_WIRE_VERSION, + common.MIN_SUPPORTED_SERVER_VERSION, + ) + ) break @@ -159,8 +170,7 @@ def reset(self) -> "TopologyDescription": topology_type = self._topology_type # The default ServerDescription's type is Unknown. - sds = dict((address, ServerDescription(address)) - for address in self._server_descriptions) + sds = dict((address, ServerDescription(address)) for address in self._server_descriptions) return TopologyDescription( topology_type, @@ -168,7 +178,8 @@ def reset(self) -> "TopologyDescription": self._replica_set_name, self._max_set_version, self._max_election_id, - self._topology_settings) + self._topology_settings, + ) def server_descriptions(self) -> Dict[_Address, ServerDescription]: """Dict of (address, @@ -211,14 +222,12 @@ def logical_session_timeout_minutes(self) -> Optional[int]: @property def known_servers(self) -> List[ServerDescription]: """List of Servers of types besides Unknown.""" - return [s for s in self._server_descriptions.values() - if s.is_server_type_known] + return [s for s in self._server_descriptions.values() if s.is_server_type_known] @property def has_known_servers(self) -> bool: """Whether there are any Servers of types besides Unknown.""" - return any(s for s in self._server_descriptions.values() - if s.is_server_type_known) + return any(s for s in self._server_descriptions.values() if s.is_server_type_known) @property def readable_servers(self) -> List[ServerDescription]: @@ -246,17 +255,17 @@ def _apply_local_threshold(self, selection): if not selection: return [] # Round trip time in seconds. - fastest = min( - s.round_trip_time for s in selection.server_descriptions) + fastest = min(s.round_trip_time for s in selection.server_descriptions) threshold = self._topology_settings.local_threshold_ms / 1000.0 - return [s for s in selection.server_descriptions - if (s.round_trip_time - fastest) <= threshold] + return [ + s for s in selection.server_descriptions if (s.round_trip_time - fastest) <= threshold + ] def apply_selector( self, selector: Any, address: Optional[_Address] = None, - custom_selector: Optional[_ServerSelector] = None + custom_selector: Optional[_ServerSelector] = None, ) -> List[ServerDescription]: """List of servers matching the provided selector(s). @@ -273,22 +282,20 @@ def apply_selector( .. versionadded:: 3.4 """ - if getattr(selector, 'min_wire_version', 0): + if getattr(selector, "min_wire_version", 0): common_wv = self.common_wire_version if common_wv and common_wv < selector.min_wire_version: raise ConfigurationError( "%s requires min wire version %d, but topology's min" - " wire version is %d" % (selector, - selector.min_wire_version, - common_wv)) + " wire version is %d" % (selector, selector.min_wire_version, common_wv) + ) if isinstance(selector, _AggWritePref): selector.selection_hook(self) if self.topology_type == TOPOLOGY_TYPE.Unknown: return [] - elif self.topology_type in (TOPOLOGY_TYPE.Single, - TOPOLOGY_TYPE.LoadBalanced): + elif self.topology_type in (TOPOLOGY_TYPE.Single, TOPOLOGY_TYPE.LoadBalanced): # Ignore selectors for standalone and load balancer mode. return self.known_servers if address: @@ -304,10 +311,11 @@ def apply_selector( # Apply custom selector followed by localThresholdMS. if custom_selector is not None and selection: selection = selection.with_server_descriptions( - custom_selector(selection.server_descriptions)) + custom_selector(selection.server_descriptions) + ) return self._apply_local_threshold(selection) - def has_readable_server(self, read_preference: _ServerMode =ReadPreference.PRIMARY) -> bool: + def has_readable_server(self, read_preference: _ServerMode = ReadPreference.PRIMARY) -> bool: """Does this topology have any readable servers available matching the given read preference? @@ -336,11 +344,13 @@ def has_writable_server(self) -> bool: def __repr__(self): # Sort the servers by address. - servers = sorted(self._server_descriptions.values(), - key=lambda sd: sd.address) + servers = sorted(self._server_descriptions.values(), key=lambda sd: sd.address) return "<%s id: %s, topology_type: %s, servers: %r>" % ( - self.__class__.__name__, self._topology_settings._topology_id, - self.topology_type_name, servers) + self.__class__.__name__, + self._topology_settings._topology_id, + self.topology_type_name, + servers, + ) # If topology type is Unknown and we receive a hello response, what should @@ -386,12 +396,12 @@ def updated_topology_description( if topology_type == TOPOLOGY_TYPE.Single: # Set server type to Unknown if replica set name does not match. - if (set_name is not None and - set_name != server_description.replica_set_name): + if set_name is not None and set_name != server_description.replica_set_name: error = ConfigurationError( "client is configured to connect to a replica set named " - "'%s' but this node belongs to a set named '%s'" % ( - set_name, server_description.replica_set_name)) + "'%s' but this node belongs to a set named '%s'" + % (set_name, server_description.replica_set_name) + ) sds[address] = server_description.to_unknown(error=error) # Single type never changes. return TopologyDescription( @@ -400,7 +410,8 @@ def updated_topology_description( set_name, max_set_version, max_election_id, - topology_description._topology_settings) + topology_description._topology_settings, + ) if topology_type == TOPOLOGY_TYPE.Unknown: if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.LoadBalancer): @@ -421,21 +432,14 @@ def updated_topology_description( sds.pop(address) elif server_type == SERVER_TYPE.RSPrimary: - (topology_type, - set_name, - max_set_version, - max_election_id) = _update_rs_from_primary(sds, - set_name, - server_description, - max_set_version, - max_election_id) - - elif server_type in ( - SERVER_TYPE.RSSecondary, - SERVER_TYPE.RSArbiter, - SERVER_TYPE.RSOther): + (topology_type, set_name, max_set_version, max_election_id) = _update_rs_from_primary( + sds, set_name, server_description, max_set_version, max_election_id + ) + + elif server_type in (SERVER_TYPE.RSSecondary, SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther): topology_type, set_name = _update_rs_no_primary_from_member( - sds, set_name, server_description) + sds, set_name, server_description + ) elif topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary: if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos): @@ -443,33 +447,26 @@ def updated_topology_description( topology_type = _check_has_primary(sds) elif server_type == SERVER_TYPE.RSPrimary: - (topology_type, - set_name, - max_set_version, - max_election_id) = _update_rs_from_primary(sds, - set_name, - server_description, - max_set_version, - max_election_id) - - elif server_type in ( - SERVER_TYPE.RSSecondary, - SERVER_TYPE.RSArbiter, - SERVER_TYPE.RSOther): - topology_type = _update_rs_with_primary_from_member( - sds, set_name, server_description) + (topology_type, set_name, max_set_version, max_election_id) = _update_rs_from_primary( + sds, set_name, server_description, max_set_version, max_election_id + ) + + elif server_type in (SERVER_TYPE.RSSecondary, SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther): + topology_type = _update_rs_with_primary_from_member(sds, set_name, server_description) else: # Server type is Unknown or RSGhost: did we just lose the primary? topology_type = _check_has_primary(sds) # Return updated copy. - return TopologyDescription(topology_type, - sds, - set_name, - max_set_version, - max_election_id, - topology_description._topology_settings) + return TopologyDescription( + topology_type, + sds, + set_name, + max_set_version, + max_election_id, + topology_description._topology_settings, + ) def _updated_topology_description_srv_polling(topology_description, seedlist): @@ -487,7 +484,6 @@ def _updated_topology_description_srv_polling(topology_description, seedlist): if set(sds.keys()) == set(seedlist): return topology_description - # Remove SDs corresponding to servers no longer part of the SRV record. for address in list(sds.keys()): if address not in seedlist: @@ -510,15 +506,13 @@ def _updated_topology_description_srv_polling(topology_description, seedlist): topology_description.replica_set_name, topology_description.max_set_version, topology_description.max_election_id, - topology_description._topology_settings) + topology_description._topology_settings, + ) def _update_rs_from_primary( - sds, - replica_set_name, - server_description, - max_set_version, - max_election_id): + sds, replica_set_name, server_description, max_set_version, max_election_id +): """Update topology description from a primary's hello response. Pass in a dict of ServerDescriptions, current replica set name, the @@ -535,35 +529,33 @@ def _update_rs_from_primary( # We found a primary but it doesn't have the replica_set_name # provided by the user. sds.pop(server_description.address) - return (_check_has_primary(sds), - replica_set_name, - max_set_version, - max_election_id) + return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) max_election_tuple = max_set_version, max_election_id if None not in server_description.election_tuple: - if (None not in max_election_tuple and - max_election_tuple > server_description.election_tuple): + if ( + None not in max_election_tuple + and max_election_tuple > server_description.election_tuple + ): # Stale primary, set to type Unknown. sds[server_description.address] = server_description.to_unknown() - return (_check_has_primary(sds), - replica_set_name, - max_set_version, - max_election_id) + return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) max_election_id = server_description.election_id - if (server_description.set_version is not None and - (max_set_version is None or - server_description.set_version > max_set_version)): + if server_description.set_version is not None and ( + max_set_version is None or server_description.set_version > max_set_version + ): max_set_version = server_description.set_version # We've heard from the primary. Is it the same primary as before? for server in sds.values(): - if (server.server_type is SERVER_TYPE.RSPrimary - and server.address != server_description.address): + if ( + server.server_type is SERVER_TYPE.RSPrimary + and server.address != server_description.address + ): # Reset old primary's type to Unknown. sds[server.address] = server.to_unknown() @@ -582,16 +574,10 @@ def _update_rs_from_primary( # If the host list differs from the seed list, we may not have a primary # after all. - return (_check_has_primary(sds), - replica_set_name, - max_set_version, - max_election_id) + return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) -def _update_rs_with_primary_from_member( - sds, - replica_set_name, - server_description): +def _update_rs_with_primary_from_member(sds, replica_set_name, server_description): """RS with known primary. Process a response from a non-primary. Pass in a dict of ServerDescriptions, current replica set name, and the @@ -603,18 +589,14 @@ def _update_rs_with_primary_from_member( if replica_set_name != server_description.replica_set_name: sds.pop(server_description.address) - elif (server_description.me and - server_description.address != server_description.me): + elif server_description.me and server_description.address != server_description.me: sds.pop(server_description.address) # Had this member been the primary? return _check_has_primary(sds) -def _update_rs_no_primary_from_member( - sds, - replica_set_name, - server_description): +def _update_rs_no_primary_from_member(sds, replica_set_name, server_description): """RS without known primary. Update from a non-primary's response. Pass in a dict of ServerDescriptions, current replica set name, and the @@ -636,8 +618,7 @@ def _update_rs_no_primary_from_member( if address not in sds: sds[address] = ServerDescription(address) - if (server_description.me and - server_description.address != server_description.me): + if server_description.me and server_description.address != server_description.me: sds.pop(server_description.address) return topology_type, replica_set_name diff --git a/pymongo/typings.py b/pymongo/typings.py index 767eed36c5..263b591e24 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -13,8 +13,20 @@ # limitations under the License. """Type aliases used by PyMongo""" -from typing import (TYPE_CHECKING, Any, Dict, List, Mapping, MutableMapping, Optional, - Sequence, Tuple, Type, TypeVar, Union) +from typing import ( + TYPE_CHECKING, + Any, + Dict, + List, + Mapping, + MutableMapping, + Optional, + Sequence, + Tuple, + Type, + TypeVar, + Union, +) if TYPE_CHECKING: from bson.raw_bson import RawBSONDocument @@ -26,4 +38,6 @@ _CollationIn = Union[Mapping[str, Any], "Collation"] _DocumentIn = Union[MutableMapping[str, Any], "RawBSONDocument"] _Pipeline = Sequence[Mapping[str, Any]] -_DocumentType = TypeVar('_DocumentType', Mapping[str, Any], MutableMapping[str, Any], Dict[str, Any]) +_DocumentType = TypeVar( + "_DocumentType", Mapping[str, Any], MutableMapping[str, Any], Dict[str, Any] +) diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index c213f4217c..76c6e4d513 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -17,20 +17,33 @@ import re import sys import warnings -from typing import (Any, Dict, List, Mapping, MutableMapping, Optional, Tuple, - Union, cast) +from typing import ( + Any, + Dict, + List, + Mapping, + MutableMapping, + Optional, + Tuple, + Union, + cast, +) from urllib.parse import unquote_plus from pymongo.client_options import _parse_ssl_options -from pymongo.common import (INTERNAL_URI_OPTION_NAME_MAP, SRV_SERVICE_NAME, - URI_OPTIONS_DEPRECATION_MAP, - _CaseInsensitiveDictionary, get_validated_options) +from pymongo.common import ( + INTERNAL_URI_OPTION_NAME_MAP, + SRV_SERVICE_NAME, + URI_OPTIONS_DEPRECATION_MAP, + _CaseInsensitiveDictionary, + get_validated_options, +) from pymongo.errors import ConfigurationError, InvalidURI from pymongo.srv_resolver import _HAVE_DNSPYTHON, _SrvResolver -SCHEME = 'mongodb://' +SCHEME = "mongodb://" SCHEME_LEN = len(SCHEME) -SRV_SCHEME = 'mongodb+srv://' +SRV_SCHEME = "mongodb+srv://" SRV_SCHEME_LEN = len(SRV_SCHEME) DEFAULT_PORT = 27017 @@ -43,14 +56,15 @@ def _unquoted_percent(s): and '%E2%85%A8' but cannot have unquoted percent like '%foo'. """ for i in range(len(s)): - if s[i] == '%': - sub = s[i:i+3] + if s[i] == "%": + sub = s[i : i + 3] # If unquoting yields the same string this means there was an # unquoted %. if unquote_plus(sub) == sub: return True return False + def parse_userinfo(userinfo: str) -> Tuple[str, str]: """Validates the format of user information in a MongoDB URI. Reserved characters that are gen-delimiters (":", "/", "?", "#", "[", @@ -62,10 +76,11 @@ def parse_userinfo(userinfo: str) -> Tuple[str, str]: :Paramaters: - `userinfo`: A string of the form : """ - if ('@' in userinfo or userinfo.count(':') > 1 or - _unquoted_percent(userinfo)): - raise InvalidURI("Username and password must be escaped according to " - "RFC 3986, use urllib.parse.quote_plus") + if "@" in userinfo or userinfo.count(":") > 1 or _unquoted_percent(userinfo): + raise InvalidURI( + "Username and password must be escaped according to " + "RFC 3986, use urllib.parse.quote_plus" + ) user, _, passwd = userinfo.partition(":") # No password is expected with GSSAPI authentication. @@ -75,7 +90,9 @@ def parse_userinfo(userinfo: str) -> Tuple[str, str]: return unquote_plus(user), unquote_plus(passwd) -def parse_ipv6_literal_host(entity: str, default_port: Optional[int]) -> Tuple[str, Optional[Union[str, int]]]: +def parse_ipv6_literal_host( + entity: str, default_port: Optional[int] +) -> Tuple[str, Optional[Union[str, int]]]: """Validates an IPv6 literal host:port string. Returns a 2-tuple of IPv6 literal followed by port where @@ -87,17 +104,19 @@ def parse_ipv6_literal_host(entity: str, default_port: Optional[int]) -> Tuple[s - `default_port`: The port number to use when one wasn't specified in entity. """ - if entity.find(']') == -1: - raise ValueError("an IPv6 address literal must be " - "enclosed in '[' and ']' according " - "to RFC 2732.") - i = entity.find(']:') + if entity.find("]") == -1: + raise ValueError( + "an IPv6 address literal must be " "enclosed in '[' and ']' according " "to RFC 2732." + ) + i = entity.find("]:") if i == -1: return entity[1:-1], default_port - return entity[1: i], entity[i + 2:] + return entity[1:i], entity[i + 2 :] -def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> Tuple[str, Optional[int]]: +def parse_host( + entity: str, default_port: Optional[int] = DEFAULT_PORT +) -> Tuple[str, Optional[int]]: """Validates a host string Returns a 2-tuple of host followed by port where port is default_port @@ -111,21 +130,22 @@ def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> Tuple """ host = entity port: Optional[Union[str, int]] = default_port - if entity[0] == '[': + if entity[0] == "[": host, port = parse_ipv6_literal_host(entity, default_port) elif entity.endswith(".sock"): return entity, default_port - elif entity.find(':') != -1: - if entity.count(':') > 1: - raise ValueError("Reserved characters such as ':' must be " - "escaped according RFC 2396. An IPv6 " - "address literal must be enclosed in '[' " - "and ']' according to RFC 2732.") - host, port = host.split(':', 1) + elif entity.find(":") != -1: + if entity.count(":") > 1: + raise ValueError( + "Reserved characters such as ':' must be " + "escaped according RFC 2396. An IPv6 " + "address literal must be enclosed in '[' " + "and ']' according to RFC 2732." + ) + host, port = host.split(":", 1) if isinstance(port, str): if not port.isdigit() or int(port) > 65535 or int(port) <= 0: - raise ValueError("Port must be an integer between 0 and 65535: %s" - % (port,)) + raise ValueError("Port must be an integer between 0 and 65535: %s" % (port,)) port = int(port) # Normalize hostname to lowercase, since DNS is case-insensitive: @@ -139,7 +159,8 @@ def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> Tuple _IMPLICIT_TLSINSECURE_OPTS = { "tlsallowinvalidcertificates", "tlsallowinvalidhostnames", - "tlsdisableocspendpointcheck"} + "tlsdisableocspendpointcheck", +} def _parse_options(opts, delim): @@ -149,12 +170,12 @@ def _parse_options(opts, delim): options = _CaseInsensitiveDictionary() for uriopt in opts.split(delim): key, value = uriopt.split("=") - if key.lower() == 'readpreferencetags': + if key.lower() == "readpreferencetags": options.setdefault(key, []).append(value) else: if key in options: warnings.warn("Duplicate URI option '%s'." % (key,)) - if key.lower() == 'authmechanismproperties': + if key.lower() == "authmechanismproperties": val = value else: val = unquote_plus(value) @@ -172,49 +193,47 @@ def _handle_security_options(options): MongoDB URI options. """ # Implicitly defined options must not be explicitly specified. - tlsinsecure = options.get('tlsinsecure') + tlsinsecure = options.get("tlsinsecure") if tlsinsecure is not None: for opt in _IMPLICIT_TLSINSECURE_OPTS: if opt in options: - err_msg = ("URI options %s and %s cannot be specified " - "simultaneously.") - raise InvalidURI(err_msg % ( - options.cased_key('tlsinsecure'), options.cased_key(opt))) + err_msg = "URI options %s and %s cannot be specified " "simultaneously." + raise InvalidURI( + err_msg % (options.cased_key("tlsinsecure"), options.cased_key(opt)) + ) # Handle co-occurence of OCSP & tlsAllowInvalidCertificates options. - tlsallowinvalidcerts = options.get('tlsallowinvalidcertificates') + tlsallowinvalidcerts = options.get("tlsallowinvalidcertificates") if tlsallowinvalidcerts is not None: - if 'tlsdisableocspendpointcheck' in options: - err_msg = ("URI options %s and %s cannot be specified " - "simultaneously.") - raise InvalidURI(err_msg % ( - 'tlsallowinvalidcertificates', options.cased_key( - 'tlsdisableocspendpointcheck'))) + if "tlsdisableocspendpointcheck" in options: + err_msg = "URI options %s and %s cannot be specified " "simultaneously." + raise InvalidURI( + err_msg + % ("tlsallowinvalidcertificates", options.cased_key("tlsdisableocspendpointcheck")) + ) if tlsallowinvalidcerts is True: - options['tlsdisableocspendpointcheck'] = True + options["tlsdisableocspendpointcheck"] = True # Handle co-occurence of CRL and OCSP-related options. - tlscrlfile = options.get('tlscrlfile') + tlscrlfile = options.get("tlscrlfile") if tlscrlfile is not None: - for opt in ('tlsinsecure', 'tlsallowinvalidcertificates', - 'tlsdisableocspendpointcheck'): + for opt in ("tlsinsecure", "tlsallowinvalidcertificates", "tlsdisableocspendpointcheck"): if options.get(opt) is True: - err_msg = ("URI option %s=True cannot be specified when " - "CRL checking is enabled.") + err_msg = "URI option %s=True cannot be specified when " "CRL checking is enabled." raise InvalidURI(err_msg % (opt,)) - if 'ssl' in options and 'tls' in options: + if "ssl" in options and "tls" in options: + def truth_value(val): - if val in ('true', 'false'): - return val == 'true' + if val in ("true", "false"): + return val == "true" if isinstance(val, bool): return val return val - if truth_value(options.get('ssl')) != truth_value(options.get('tls')): - err_msg = ("Can not specify conflicting values for URI options %s " - "and %s.") - raise InvalidURI(err_msg % ( - options.cased_key('ssl'), options.cased_key('tls'))) + + if truth_value(options.get("ssl")) != truth_value(options.get("tls")): + err_msg = "Can not specify conflicting values for URI options %s " "and %s." + raise InvalidURI(err_msg % (options.cased_key("ssl"), options.cased_key("tls"))) return options @@ -231,26 +250,30 @@ def _handle_option_deprecations(options): for optname in list(options): if optname in URI_OPTIONS_DEPRECATION_MAP: mode, message = URI_OPTIONS_DEPRECATION_MAP[optname] - if mode == 'renamed': + if mode == "renamed": newoptname = message if newoptname in options: - warn_msg = ("Deprecated option '%s' ignored in favor of " - "'%s'.") + warn_msg = "Deprecated option '%s' ignored in favor of " "'%s'." warnings.warn( - warn_msg % (options.cased_key(optname), - options.cased_key(newoptname)), - DeprecationWarning, stacklevel=2) + warn_msg % (options.cased_key(optname), options.cased_key(newoptname)), + DeprecationWarning, + stacklevel=2, + ) options.pop(optname) continue warn_msg = "Option '%s' is deprecated, use '%s' instead." warnings.warn( warn_msg % (options.cased_key(optname), newoptname), - DeprecationWarning, stacklevel=2) - elif mode == 'removed': + DeprecationWarning, + stacklevel=2, + ) + elif mode == "removed": warn_msg = "Option '%s' is deprecated. %s." warnings.warn( warn_msg % (options.cased_key(optname), message), - DeprecationWarning, stacklevel=2) + DeprecationWarning, + stacklevel=2, + ) return options @@ -264,7 +287,7 @@ def _normalize_options(options): MongoDB URI options. """ # Expand the tlsInsecure option. - tlsinsecure = options.get('tlsinsecure') + tlsinsecure = options.get("tlsinsecure") if tlsinsecure is not None: for opt in _IMPLICIT_TLSINSECURE_OPTS: # Implicit options are logically the same as tlsInsecure. @@ -294,7 +317,9 @@ def validate_options(opts: Mapping[str, Any], warn: bool = False) -> MutableMapp return get_validated_options(opts, warn) -def split_options(opts: str, validate: bool = True, warn: bool = False, normalize: bool = True) -> MutableMapping[str, Any]: +def split_options( + opts: str, validate: bool = True, warn: bool = False, normalize: bool = True +) -> MutableMapping[str, Any]: """Takes the options portion of a MongoDB URI, validates each option and returns the options in a dictionary. @@ -332,14 +357,15 @@ def split_options(opts: str, validate: bool = True, warn: bool = False, normaliz if validate: options = validate_options(options, warn) - if options.get('authsource') == '': - raise InvalidURI( - "the authSource database cannot be an empty string") + if options.get("authsource") == "": + raise InvalidURI("the authSource database cannot be an empty string") return options -def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> List[Tuple[str, Optional[int]]]: +def split_hosts( + hosts: str, default_port: Optional[int] = DEFAULT_PORT +) -> List[Tuple[str, Optional[int]]]: """Takes a string of the form host1[:port],host2[:port]... and splits it into (host, port) tuples. If [:port] isn't present the default_port is used. @@ -353,13 +379,12 @@ def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> List[ for a host. """ nodes = [] - for entity in hosts.split(','): + for entity in hosts.split(","): if not entity: - raise ConfigurationError("Empty host " - "(or extra comma in host list).") + raise ConfigurationError("Empty host " "(or extra comma in host list).") port = default_port # Unix socket entities don't have ports - if entity.endswith('.sock'): + if entity.endswith(".sock"): port = None nodes.append(parse_host(entity, port)) return nodes @@ -367,29 +392,25 @@ def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> List[ # Prohibited characters in database name. DB names also can't have ".", but for # backward-compat we allow "db.collection" in URI. -_BAD_DB_CHARS = re.compile('[' + re.escape(r'/ "$') + ']') +_BAD_DB_CHARS = re.compile("[" + re.escape(r'/ "$') + "]") _ALLOWED_TXT_OPTS = frozenset( - ['authsource', 'authSource', 'replicaset', 'replicaSet', 'loadbalanced', - 'loadBalanced']) + ["authsource", "authSource", "replicaset", "replicaSet", "loadbalanced", "loadBalanced"] +) def _check_options(nodes, options): # Ensure directConnection was not True if there are multiple seeds. - if len(nodes) > 1 and options.get('directconnection'): - raise ConfigurationError( - 'Cannot specify multiple hosts with directConnection=true') + if len(nodes) > 1 and options.get("directconnection"): + raise ConfigurationError("Cannot specify multiple hosts with directConnection=true") - if options.get('loadbalanced'): + if options.get("loadbalanced"): if len(nodes) > 1: - raise ConfigurationError( - 'Cannot specify multiple hosts with loadBalanced=true') - if options.get('directconnection'): - raise ConfigurationError( - 'Cannot specify directConnection=true with loadBalanced=true') - if options.get('replicaset'): - raise ConfigurationError( - 'Cannot specify replicaSet with loadBalanced=true') + raise ConfigurationError("Cannot specify multiple hosts with loadBalanced=true") + if options.get("directconnection"): + raise ConfigurationError("Cannot specify directConnection=true with loadBalanced=true") + if options.get("replicaset"): + raise ConfigurationError("Cannot specify replicaSet with loadBalanced=true") def parse_uri( @@ -400,7 +421,7 @@ def parse_uri( normalize: bool = True, connect_timeout: Optional[float] = None, srv_service_name: Optional[str] = None, - srv_max_hosts: Optional[int] = None + srv_max_hosts: Optional[int] = None, ) -> Dict[str, Any]: """Parse and validate a MongoDB URI. @@ -460,14 +481,16 @@ def parse_uri( python_path = sys.executable or "python" raise ConfigurationError( 'The "dnspython" module must be ' - 'installed to use mongodb+srv:// URIs. ' - 'To fix this error install pymongo with the srv extra:\n ' - '%s -m pip install "pymongo[srv]"' % (python_path)) + "installed to use mongodb+srv:// URIs. " + "To fix this error install pymongo with the srv extra:\n " + '%s -m pip install "pymongo[srv]"' % (python_path) + ) is_srv = True scheme_free = uri[SRV_SCHEME_LEN:] else: - raise InvalidURI("Invalid URI scheme: URI must " - "begin with '%s' or '%s'" % (SCHEME, SRV_SCHEME)) + raise InvalidURI( + "Invalid URI scheme: URI must " "begin with '%s' or '%s'" % (SCHEME, SRV_SCHEME) + ) if not scheme_free: raise InvalidURI("Must provide at least one hostname or IP.") @@ -478,21 +501,20 @@ def parse_uri( collection = None options = _CaseInsensitiveDictionary() - host_part, _, path_part = scheme_free.partition('/') + host_part, _, path_part = scheme_free.partition("/") if not host_part: host_part = path_part path_part = "" - if not path_part and '?' in host_part: - raise InvalidURI("A '/' is required between " - "the host list and any options.") + if not path_part and "?" in host_part: + raise InvalidURI("A '/' is required between " "the host list and any options.") if path_part: - dbase, _, opts = path_part.partition('?') + dbase, _, opts = path_part.partition("?") if dbase: dbase = unquote_plus(dbase) - if '.' in dbase: - dbase, collection = dbase.split('.', 1) + if "." in dbase: + dbase, collection = dbase.split(".", 1) if _BAD_DB_CHARS.search(dbase): raise InvalidURI('Bad database name "%s"' % dbase) else: @@ -502,77 +524,74 @@ def parse_uri( options.update(split_options(opts, validate, warn, normalize)) if srv_service_name is None: srv_service_name = options.get("srvServiceName", SRV_SERVICE_NAME) - if '@' in host_part: - userinfo, _, hosts = host_part.rpartition('@') + if "@" in host_part: + userinfo, _, hosts = host_part.rpartition("@") user, passwd = parse_userinfo(userinfo) else: hosts = host_part - if '/' in hosts: - raise InvalidURI("Any '/' in a unix domain socket must be" - " percent-encoded: %s" % host_part) + if "/" in hosts: + raise InvalidURI( + "Any '/' in a unix domain socket must be" " percent-encoded: %s" % host_part + ) hosts = unquote_plus(hosts) fqdn = None srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") if is_srv: - if options.get('directConnection'): + if options.get("directConnection"): raise ConfigurationError( - "Cannot specify directConnection=true with " - "%s URIs" % (SRV_SCHEME,)) + "Cannot specify directConnection=true with " "%s URIs" % (SRV_SCHEME,) + ) nodes = split_hosts(hosts, default_port=None) if len(nodes) != 1: - raise InvalidURI( - "%s URIs must include one, " - "and only one, hostname" % (SRV_SCHEME,)) + raise InvalidURI("%s URIs must include one, " "and only one, hostname" % (SRV_SCHEME,)) fqdn, port = nodes[0] if port is not None: - raise InvalidURI( - "%s URIs must not include a port number" % (SRV_SCHEME,)) + raise InvalidURI("%s URIs must not include a port number" % (SRV_SCHEME,)) # Use the connection timeout. connectTimeoutMS passed as a keyword # argument overrides the same option passed in the connection string. connect_timeout = connect_timeout or options.get("connectTimeoutMS") - dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, - srv_max_hosts) + dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, srv_max_hosts) nodes = dns_resolver.get_hosts() dns_options = dns_resolver.get_options() if dns_options: - parsed_dns_options = split_options( - dns_options, validate, warn, normalize) + parsed_dns_options = split_options(dns_options, validate, warn, normalize) if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: raise ConfigurationError( - "Only authSource, replicaSet, and loadBalanced are " - "supported from DNS") + "Only authSource, replicaSet, and loadBalanced are " "supported from DNS" + ) for opt, val in parsed_dns_options.items(): if opt not in options: options[opt] = val if options.get("loadBalanced") and srv_max_hosts: - raise InvalidURI( - "You cannot specify loadBalanced with srvMaxHosts") + raise InvalidURI("You cannot specify loadBalanced with srvMaxHosts") if options.get("replicaSet") and srv_max_hosts: raise InvalidURI("You cannot specify replicaSet with srvMaxHosts") if "tls" not in options and "ssl" not in options: - options["tls"] = True if validate else 'true' + options["tls"] = True if validate else "true" elif not is_srv and options.get("srvServiceName") is not None: - raise ConfigurationError("The srvServiceName option is only allowed " - "with 'mongodb+srv://' URIs") + raise ConfigurationError( + "The srvServiceName option is only allowed " "with 'mongodb+srv://' URIs" + ) elif not is_srv and srv_max_hosts: - raise ConfigurationError("The srvMaxHosts option is only allowed " - "with 'mongodb+srv://' URIs") + raise ConfigurationError( + "The srvMaxHosts option is only allowed " "with 'mongodb+srv://' URIs" + ) else: nodes = split_hosts(hosts, default_port=default_port) _check_options(nodes, options) return { - 'nodelist': nodes, - 'username': user, - 'password': passwd, - 'database': dbase, - 'collection': collection, - 'options': options, - 'fqdn': fqdn + "nodelist": nodes, + "username": user, + "password": passwd, + "database": dbase, + "collection": collection, + "options": options, + "fqdn": fqdn, } @@ -581,37 +600,39 @@ def _parse_kms_tls_options(kms_tls_options): if not kms_tls_options: return {} if not isinstance(kms_tls_options, dict): - raise TypeError('kms_tls_options must be a dict') + raise TypeError("kms_tls_options must be a dict") contexts = {} for provider, opts in kms_tls_options.items(): if not isinstance(opts, dict): raise TypeError(f'kms_tls_options["{provider}"] must be a dict') - opts.setdefault('tls', True) + opts.setdefault("tls", True) opts = _CaseInsensitiveDictionary(opts) opts = _handle_security_options(opts) opts = _normalize_options(opts) opts = validate_options(opts) ssl_context, allow_invalid_hostnames = _parse_ssl_options(opts) if ssl_context is None: - raise ConfigurationError('TLS is required for KMS providers') + raise ConfigurationError("TLS is required for KMS providers") if allow_invalid_hostnames: - raise ConfigurationError('Insecure TLS options prohibited') - - for n in ['tlsInsecure', - 'tlsAllowInvalidCertificates', - 'tlsAllowInvalidHostnames', - 'tlsDisableOCSPEndpointCheck', - 'tlsDisableCertificateRevocationCheck']: + raise ConfigurationError("Insecure TLS options prohibited") + + for n in [ + "tlsInsecure", + "tlsAllowInvalidCertificates", + "tlsAllowInvalidHostnames", + "tlsDisableOCSPEndpointCheck", + "tlsDisableCertificateRevocationCheck", + ]: if n in opts: - raise ConfigurationError( - f'Insecure TLS options prohibited: {n}') + raise ConfigurationError(f"Insecure TLS options prohibited: {n}") contexts[provider] = ssl_context return contexts -if __name__ == '__main__': +if __name__ == "__main__": import pprint import sys + try: pprint.pprint(parse_uri(sys.argv[1])) except InvalidURI as exc: diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index 5168948ee3..fea912d569 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -47,7 +47,13 @@ class WriteConcern(object): __slots__ = ("__document", "__acknowledged", "__server_default") - def __init__(self, w: Optional[Union[int, str]] = None, wtimeout: Optional[int] = None, j: Optional[bool] = None, fsync: Optional[bool] = None) -> None: + def __init__( + self, + w: Optional[Union[int, str]] = None, + wtimeout: Optional[int] = None, + j: Optional[bool] = None, + fsync: Optional[bool] = None, + ) -> None: self.__document: Dict[str, Any] = {} self.__acknowledged = True @@ -67,8 +73,7 @@ def __init__(self, w: Optional[Union[int, str]] = None, wtimeout: Optional[int] if not isinstance(fsync, bool): raise TypeError("fsync must be True or False") if j and fsync: - raise ConfigurationError("Can't set both j " - "and fsync at the same time") + raise ConfigurationError("Can't set both j " "and fsync at the same time") self.__document["fsync"] = fsync if w == 0 and j is True: @@ -108,8 +113,7 @@ def acknowledged(self) -> bool: return self.__acknowledged def __repr__(self): - return ("WriteConcern(%s)" % ( - ", ".join("%s=%s" % kvt for kvt in self.__document.items()),)) + return "WriteConcern(%s)" % (", ".join("%s=%s" % kvt for kvt in self.__document.items()),) def __eq__(self, other: Any) -> bool: if isinstance(other, WriteConcern): diff --git a/setup.py b/setup.py index fde9ae1b3f..5dbbdde22b 100755 --- a/setup.py +++ b/setup.py @@ -4,7 +4,6 @@ import sys import warnings - if sys.version_info[:2] < (3, 6): raise RuntimeError("Python version >= 3.6 required.") @@ -15,8 +14,8 @@ except ImportError: pass -from setuptools import setup, __version__ as _setuptools_version - +from setuptools import __version__ as _setuptools_version +from setuptools import setup if sys.version_info[:2] < (3, 10): from distutils.cmd import Command @@ -54,13 +53,14 @@ # generated by distutils for Apple provided pythons, allowing C extension # builds to complete without error. The inspiration comes from older # versions of distutils.sysconfig.get_config_vars. -if sys.platform == 'darwin' and 'clang' in platform.python_compiler().lower(): +if sys.platform == "darwin" and "clang" in platform.python_compiler().lower(): from distutils.sysconfig import get_config_vars + res = get_config_vars() - for key in ('CFLAGS', 'PY_CFLAGS'): + for key in ("CFLAGS", "PY_CFLAGS"): if key in res: flags = res[key] - flags = re.sub('-mno-fused-madd', '', flags) + flags = re.sub("-mno-fused-madd", "", flags) res[key] = flags @@ -69,11 +69,9 @@ class test(Command): user_options = [ ("test-module=", "m", "Discover tests in specified module"), - ("test-suite=", "s", - "Test suite to run (e.g. 'some_module.test_suite')"), + ("test-suite=", "s", "Test suite to run (e.g. 'some_module.test_suite')"), ("failfast", "f", "Stop running tests on first failure or error"), - ("xunit-output=", "x", - "Generate a results directory with XUnit XML format") + ("xunit-output=", "x", "Generate a results directory with XUnit XML format"), ] def initialize_options(self): @@ -84,44 +82,42 @@ def initialize_options(self): def finalize_options(self): if self.test_suite is None and self.test_module is None: - self.test_module = 'test' + self.test_module = "test" elif self.test_module is not None and self.test_suite is not None: - raise Exception( - "You may specify a module or suite, but not both" - ) + raise Exception("You may specify a module or suite, but not both") def run(self): # Installing required packages, running egg_info and build_ext are # part of normal operation for setuptools.command.test.test if self.distribution.install_requires: - self.distribution.fetch_build_eggs( - self.distribution.install_requires) + self.distribution.fetch_build_eggs(self.distribution.install_requires) if self.distribution.tests_require: self.distribution.fetch_build_eggs(self.distribution.tests_require) if self.xunit_output: self.distribution.fetch_build_eggs(["unittest-xml-reporting"]) - self.run_command('egg_info') - build_ext_cmd = self.reinitialize_command('build_ext') + self.run_command("egg_info") + build_ext_cmd = self.reinitialize_command("build_ext") build_ext_cmd.inplace = 1 - self.run_command('build_ext') + self.run_command("build_ext") # Construct a TextTestRunner directly from the unittest imported from # test, which creates a TestResult that supports the 'addSkip' method. # setuptools will by default create a TextTestRunner that uses the old # TestResult class. - from test import unittest, PymongoTestRunner, test_cases + from test import PymongoTestRunner, test_cases, unittest + if self.test_suite is None: all_tests = unittest.defaultTestLoader.discover(self.test_module) suite = unittest.TestSuite() - suite.addTests(sorted(test_cases(all_tests), - key=lambda x: x.__module__)) + suite.addTests(sorted(test_cases(all_tests), key=lambda x: x.__module__)) else: - suite = unittest.defaultTestLoader.loadTestsFromName( - self.test_suite) + suite = unittest.defaultTestLoader.loadTestsFromName(self.test_suite) if self.xunit_output: from test import PymongoXMLTestRunner - runner = PymongoXMLTestRunner(verbosity=2, failfast=self.failfast, - output=self.xunit_output) + + runner = PymongoXMLTestRunner( + verbosity=2, failfast=self.failfast, output=self.xunit_output + ) else: runner = PymongoTestRunner(verbosity=2, failfast=self.failfast) result = runner.run(suite) @@ -132,8 +128,7 @@ class doc(Command): description = "generate or test documentation" - user_options = [("test", "t", - "run doctests instead of generating documentation")] + user_options = [("test", "t", "run doctests instead of generating documentation")] boolean_options = ["test"] @@ -146,16 +141,13 @@ def finalize_options(self): def run(self): if not _HAVE_SPHINX: - raise RuntimeError( - "You must install Sphinx to build or test the documentation.") + raise RuntimeError("You must install Sphinx to build or test the documentation.") if self.test: - path = os.path.join( - os.path.abspath('.'), "doc", "_build", "doctest") + path = os.path.join(os.path.abspath("."), "doc", "_build", "doctest") mode = "doctest" else: - path = os.path.join( - os.path.abspath('.'), "doc", "_build", version) + path = os.path.join(os.path.abspath("."), "doc", "_build", version) mode = "html" try: @@ -168,7 +160,7 @@ def run(self): # sphinx.main calls sys.exit when sphinx.build_main exists. # Call build_main directly so we can check status and print # the full path to the built docs. - if hasattr(sphinx, 'build_main'): + if hasattr(sphinx, "build_main"): status = sphinx.build_main(sphinx_args) else: status = sphinx.main(sphinx_args) @@ -176,8 +168,9 @@ def run(self): if status: raise RuntimeError("documentation step '%s' failed" % (mode,)) - sys.stdout.write("\nDocumentation step '%s' performed, results here:\n" - " %s/\n" % (mode, path)) + sys.stdout.write( + "\nDocumentation step '%s' performed, results here:\n" " %s/\n" % (mode, path) + ) class custom_build_ext(build_ext): @@ -234,11 +227,14 @@ def run(self): build_ext.run(self) except Exception: e = sys.exc_info()[1] - sys.stdout.write('%s\n' % str(e)) - warnings.warn(self.warning_message % ("Extension modules", - "There was an issue with " - "your platform configuration" - " - see above.")) + sys.stdout.write("%s\n" % str(e)) + warnings.warn( + self.warning_message + % ( + "Extension modules", + "There was an issue with " "your platform configuration" " - see above.", + ) + ) def build_extension(self, ext): name = ext.name @@ -246,68 +242,75 @@ def build_extension(self, ext): build_ext.build_extension(self, ext) except Exception: e = sys.exc_info()[1] - sys.stdout.write('%s\n' % str(e)) - warnings.warn(self.warning_message % ("The %s extension " - "module" % (name,), - "The output above " - "this warning shows how " - "the compilation " - "failed.")) - -ext_modules = [Extension('bson._cbson', - include_dirs=['bson'], - sources=['bson/_cbsonmodule.c', - 'bson/time64.c', - 'bson/buffer.c', - 'bson/encoding_helpers.c']), - Extension('pymongo._cmessage', - include_dirs=['bson'], - sources=['pymongo/_cmessagemodule.c', - 'bson/buffer.c'])] + sys.stdout.write("%s\n" % str(e)) + warnings.warn( + self.warning_message + % ( + "The %s extension " "module" % (name,), + "The output above " "this warning shows how " "the compilation " "failed.", + ) + ) + + +ext_modules = [ + Extension( + "bson._cbson", + include_dirs=["bson"], + sources=[ + "bson/_cbsonmodule.c", + "bson/time64.c", + "bson/buffer.c", + "bson/encoding_helpers.c", + ], + ), + Extension( + "pymongo._cmessage", + include_dirs=["bson"], + sources=["pymongo/_cmessagemodule.c", "bson/buffer.c"], + ), +] # PyOpenSSL 17.0.0 introduced support for OCSP. 17.1.0 introduced # a related feature we need. 17.2.0 fixes a bug # in set_default_verify_paths we should really avoid. # service_identity 18.1.0 introduced support for IP addr matching. pyopenssl_reqs = ["pyopenssl>=17.2.0", "requests<3.0.0", "service_identity>=18.1.0"] -if sys.platform in ('win32', 'darwin'): +if sys.platform in ("win32", "darwin"): # Fallback to certifi on Windows if we can't load CA certs from the system # store and just use certifi on macOS. # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths - pyopenssl_reqs.append('certifi') + pyopenssl_reqs.append("certifi") extras_require = { - 'encryption': ['pymongocrypt>=1.2.0,<2.0.0'], - 'ocsp': pyopenssl_reqs, - 'snappy': ['python-snappy'], - 'zstd': ['zstandard'], - 'aws': ['pymongo-auth-aws<2.0.0'], - 'srv': ["dnspython>=1.16.0,<3.0.0"] + "encryption": ["pymongocrypt>=1.2.0,<2.0.0"], + "ocsp": pyopenssl_reqs, + "snappy": ["python-snappy"], + "zstd": ["zstandard"], + "aws": ["pymongo-auth-aws<2.0.0"], + "srv": ["dnspython>=1.16.0,<3.0.0"], } # GSSAPI extras -if sys.platform == 'win32': - extras_require['gssapi'] = ["winkerberos>=0.5.0"] +if sys.platform == "win32": + extras_require["gssapi"] = ["winkerberos>=0.5.0"] else: - extras_require['gssapi'] = ["pykerberos"] + extras_require["gssapi"] = ["pykerberos"] -extra_opts = { - "packages": ["bson", "pymongo", "gridfs"] -} +extra_opts = {"packages": ["bson", "pymongo", "gridfs"]} if "--no_ext" in sys.argv: sys.argv.remove("--no_ext") -elif (sys.platform.startswith("java") or - sys.platform == "cli" or - "PyPy" in sys.version): - sys.stdout.write(""" +elif sys.platform.startswith("java") or sys.platform == "cli" or "PyPy" in sys.version: + sys.stdout.write( + """ *****************************************************\n The optional C extensions are currently not supported\n by this python implementation.\n *****************************************************\n -""") +""" + ) else: - extra_opts['ext_modules'] = ext_modules + extra_opts["ext_modules"] = ext_modules setup( name="pymongo", @@ -336,10 +339,9 @@ def build_extension(self, ext): "Programming Language :: Python :: 3.10", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", - "Topic :: Database"], - cmdclass={"build_ext": custom_build_ext, - "doc": doc, - "test": test}, + "Topic :: Database", + ], + cmdclass={"build_ext": custom_build_ext, "doc": doc, "test": test}, extras_require=extras_require, **extra_opts ) diff --git a/test/__init__.py b/test/__init__.py index c02eb97949..32220cfff3 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -27,6 +27,7 @@ try: from xmlrunner import XMLTestRunner + HAVE_XML = True # ValueError is raised when version 3+ is installed on Jython 2.7. except (ImportError, ValueError): @@ -34,18 +35,19 @@ try: import ipaddress + HAVE_IPADDRESS = True except ImportError: HAVE_IPADDRESS = False from contextlib import contextmanager from functools import wraps +from test.version import Version from typing import Dict, no_type_check from unittest import SkipTest import pymongo import pymongo.errors - from bson.son import SON from pymongo import common, message from pymongo.common import partition_node @@ -55,7 +57,6 @@ from pymongo.server_api import ServerApi from pymongo.ssl_support import HAVE_SSL, _ssl from pymongo.uri_parser import parse_uri -from test.version import Version if HAVE_SSL: import ssl @@ -64,36 +65,34 @@ # Enable the fault handler to dump the traceback of each running thread # after a segfault. import faulthandler + faulthandler.enable() except ImportError: pass # Enable debug output for uncollectable objects. PyPy does not have set_debug. -if hasattr(gc, 'set_debug'): +if hasattr(gc, "set_debug"): gc.set_debug( - gc.DEBUG_UNCOLLECTABLE | - getattr(gc, 'DEBUG_OBJECTS', 0) | - getattr(gc, 'DEBUG_INSTANCES', 0)) + gc.DEBUG_UNCOLLECTABLE | getattr(gc, "DEBUG_OBJECTS", 0) | getattr(gc, "DEBUG_INSTANCES", 0) + ) # The host and port of a single mongod or mongos, or the seed host # for a replica set. -host = os.environ.get("DB_IP", 'localhost') +host = os.environ.get("DB_IP", "localhost") port = int(os.environ.get("DB_PORT", 27017)) db_user = os.environ.get("DB_USER", "user") db_pwd = os.environ.get("DB_PASSWORD", "password") -CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'certificates') -CLIENT_PEM = os.environ.get('CLIENT_PEM', - os.path.join(CERT_PATH, 'client.pem')) -CA_PEM = os.environ.get('CA_PEM', os.path.join(CERT_PATH, 'ca.pem')) +CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "certificates") +CLIENT_PEM = os.environ.get("CLIENT_PEM", os.path.join(CERT_PATH, "client.pem")) +CA_PEM = os.environ.get("CA_PEM", os.path.join(CERT_PATH, "ca.pem")) TLS_OPTIONS: Dict = dict(tls=True) if CLIENT_PEM: - TLS_OPTIONS['tlsCertificateKeyFile'] = CLIENT_PEM + TLS_OPTIONS["tlsCertificateKeyFile"] = CLIENT_PEM if CA_PEM: - TLS_OPTIONS['tlsCAFile'] = CA_PEM + TLS_OPTIONS["tlsCAFile"] = CA_PEM COMPRESSORS = os.environ.get("COMPRESSORS") MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") @@ -104,20 +103,21 @@ if TEST_LOADBALANCER: # Remove after PYTHON-2712 from pymongo import pool + pool._MOCK_SERVICE_ID = True res = parse_uri(SINGLE_MONGOS_LB_URI or "") - host, port = res['nodelist'][0] - db_user = res['username'] or db_user - db_pwd = res['password'] or db_pwd + host, port = res["nodelist"][0] + db_user = res["username"] or db_user + db_pwd = res["password"] or db_pwd elif TEST_SERVERLESS: TEST_LOADBALANCER = True res = parse_uri(SINGLE_MONGOS_LB_URI or "") - host, port = res['nodelist'][0] - db_user = res['username'] or db_user - db_pwd = res['password'] or db_pwd - TLS_OPTIONS = {'tls': True} + host, port = res["nodelist"][0] + db_user = res["username"] or db_user + db_pwd = res["password"] or db_pwd + TLS_OPTIONS = {"tls": True} # Spec says serverless tests must be run with compression. - COMPRESSORS = COMPRESSORS or 'zlib' + COMPRESSORS = COMPRESSORS or "zlib" def is_server_resolvable(): @@ -126,7 +126,7 @@ def is_server_resolvable(): socket.setdefaulttimeout(1) try: try: - socket.gethostbyname('server') + socket.gethostbyname("server") return True except socket.error: return False @@ -135,22 +135,23 @@ def is_server_resolvable(): def _create_user(authdb, user, pwd=None, roles=None, **kwargs): - cmd = SON([('createUser', user)]) + cmd = SON([("createUser", user)]) # X509 doesn't use a password if pwd: - cmd['pwd'] = pwd - cmd['roles'] = roles or ['root'] + cmd["pwd"] = pwd + cmd["roles"] = roles or ["root"] cmd.update(**kwargs) return authdb.command(cmd) class client_knobs(object): def __init__( - self, - heartbeat_frequency=None, - min_heartbeat_interval=None, - kill_cursor_frequency=None, - events_queue_frequency=None): + self, + heartbeat_frequency=None, + min_heartbeat_interval=None, + kill_cursor_frequency=None, + events_queue_frequency=None, + ): self.heartbeat_frequency = heartbeat_frequency self.min_heartbeat_interval = min_heartbeat_interval self.kill_cursor_frequency = kill_cursor_frequency @@ -182,7 +183,7 @@ def enable(self): common.EVENTS_QUEUE_FREQUENCY = self.events_queue_frequency self._enabled = True # Store the allocation traceback to catch non-disabled client_knobs. - self._stack = ''.join(traceback.format_stack()) + self._stack = "".join(traceback.format_stack()) def __enter__(self): self.enable() @@ -204,6 +205,7 @@ def make_wrapper(f): def wrap(*args, **kwargs): with self: return f(*args, **kwargs) + return wrap return make_wrapper(func) @@ -211,20 +213,23 @@ def wrap(*args, **kwargs): def __del__(self): if self._enabled: msg = ( - 'ERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY=%s, ' - 'MIN_HEARTBEAT_INTERVAL=%s, KILL_CURSOR_FREQUENCY=%s, ' - 'EVENTS_QUEUE_FREQUENCY=%s, stack:\n%s' % ( + "ERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY=%s, " + "MIN_HEARTBEAT_INTERVAL=%s, KILL_CURSOR_FREQUENCY=%s, " + "EVENTS_QUEUE_FREQUENCY=%s, stack:\n%s" + % ( common.HEARTBEAT_FREQUENCY, common.MIN_HEARTBEAT_INTERVAL, common.KILL_CURSOR_FREQUENCY, common.EVENTS_QUEUE_FREQUENCY, - self._stack)) + self._stack, + ) + ) self.disable() raise Exception(msg) def _all_users(db): - return set(u['user'] for u in db.command('usersInfo').get('users', [])) + return set(u["user"] for u in db.command("usersInfo").get("users", [])) class ClientContext(object): @@ -273,10 +278,10 @@ def client_options(self): """Return the MongoClient options for creating a duplicate client.""" opts = client_context.default_client_options.copy() if client_context.auth_enabled: - opts['username'] = db_user - opts['password'] = db_pwd + opts["username"] = db_user + opts["password"] = db_pwd if self.replica_set_name: - opts['replicaSet'] = self.replica_set_name + opts["replicaSet"] = self.replica_set_name return opts @property @@ -287,29 +292,26 @@ def hello(self): def _connect(self, host, port, **kwargs): # Jython takes a long time to connect. - if sys.platform.startswith('java'): + if sys.platform.startswith("java"): timeout_ms = 10000 else: timeout_ms = 5000 kwargs.update(self.default_client_options) - client = pymongo.MongoClient( - host, port, serverSelectionTimeoutMS=timeout_ms, **kwargs) + client = pymongo.MongoClient(host, port, serverSelectionTimeoutMS=timeout_ms, **kwargs) try: try: client.admin.command(HelloCompat.LEGACY_CMD) # Can we connect? except pymongo.errors.OperationFailure as exc: # SERVER-32063 self.connection_attempts.append( - 'connected client %r, but legacy hello failed: %s' % ( - client, exc)) + "connected client %r, but legacy hello failed: %s" % (client, exc) + ) else: - self.connection_attempts.append( - 'successfully connected client %r' % (client,)) + self.connection_attempts.append("successfully connected client %r" % (client,)) # If connected, then return client with default timeout return pymongo.MongoClient(host, port, **kwargs) except pymongo.errors.ConnectionFailure as exc: - self.connection_attempts.append( - 'failed to connect client %r: %s' % (client, exc)) + self.connection_attempts.append("failed to connect client %r: %s" % (client, exc)) return None finally: client.close() @@ -320,12 +322,11 @@ def _init_client(self): if self.client is not None: # Return early when connected to dataLake as mongohoused does not # support the getCmdLineOpts command and is tested without TLS. - build_info = self.client.admin.command('buildInfo') - if 'dataLake' in build_info: + build_info = self.client.admin.command("buildInfo") + if "dataLake" in build_info: self.is_data_lake = True self.auth_enabled = True - self.client = self._connect( - host, port, username=db_user, password=db_pwd) + self.client = self._connect(host, port, username=db_user, password=db_pwd) self.connected = True return @@ -344,11 +345,11 @@ def _init_client(self): self.auth_enabled = True else: try: - self.cmd_line = self.client.admin.command('getCmdLineOpts') + self.cmd_line = self.client.admin.command("getCmdLineOpts") except pymongo.errors.OperationFailure as e: assert e.details is not None - msg = e.details.get('errmsg', '') - if e.code == 13 or 'unauthorized' in msg or 'login' in msg: + msg = e.details.get("errmsg", "") + if e.code == 13 or "unauthorized" in msg or "login" in msg: # Unauthorized. self.auth_enabled = True else: @@ -363,26 +364,30 @@ def _init_client(self): _create_user(self.client.admin, db_user, db_pwd) self.client = self._connect( - host, port, username=db_user, password=db_pwd, + host, + port, + username=db_user, + password=db_pwd, replicaSet=self.replica_set_name, - **self.default_client_options) + **self.default_client_options + ) # May not have this if OperationFailure was raised earlier. - self.cmd_line = self.client.admin.command('getCmdLineOpts') + self.cmd_line = self.client.admin.command("getCmdLineOpts") if self.serverless: self.server_status = {} else: - self.server_status = self.client.admin.command('serverStatus') + self.server_status = self.client.admin.command("serverStatus") if self.storage_engine == "mmapv1": # MMAPv1 does not support retryWrites=True. - self.default_client_options['retryWrites'] = False + self.default_client_options["retryWrites"] = False hello = self.hello - self.sessions_enabled = 'logicalSessionTimeoutMinutes' in hello + self.sessions_enabled = "logicalSessionTimeoutMinutes" in hello - if 'setName' in hello: - self.replica_set_name = str(hello['setName']) + if "setName" in hello: + self.replica_set_name = str(hello["setName"]) self.is_rs = True if self.auth_enabled: # It doesn't matter which member we use as the seed here. @@ -392,23 +397,19 @@ def _init_client(self): username=db_user, password=db_pwd, replicaSet=self.replica_set_name, - **self.default_client_options) + **self.default_client_options + ) else: self.client = pymongo.MongoClient( - host, - port, - replicaSet=self.replica_set_name, - **self.default_client_options) + host, port, replicaSet=self.replica_set_name, **self.default_client_options + ) # Get the authoritative hello result from the primary. self._hello = None hello = self.hello - nodes = [partition_node(node.lower()) - for node in hello.get('hosts', [])] - nodes.extend([partition_node(node.lower()) - for node in hello.get('passives', [])]) - nodes.extend([partition_node(node.lower()) - for node in hello.get('arbiters', [])]) + nodes = [partition_node(node.lower()) for node in hello.get("hosts", [])] + nodes.extend([partition_node(node.lower()) for node in hello.get("passives", [])]) + nodes.extend([partition_node(node.lower()) for node in hello.get("arbiters", [])]) self.nodes = set(nodes) else: self.nodes = set([(host, port)]) @@ -417,40 +418,38 @@ def _init_client(self): if self.serverless: self.server_parameters = { - 'requireApiVersion': False, - 'enableTestCommands': True, + "requireApiVersion": False, + "enableTestCommands": True, } self.test_commands_enabled = True self.has_ipv6 = False else: - self.server_parameters = self.client.admin.command( - 'getParameter', '*') + self.server_parameters = self.client.admin.command("getParameter", "*") assert self.cmd_line is not None - if 'enableTestCommands=1' in self.cmd_line['argv']: + if "enableTestCommands=1" in self.cmd_line["argv"]: self.test_commands_enabled = True - elif 'parsed' in self.cmd_line: - params = self.cmd_line['parsed'].get('setParameter', []) - if 'enableTestCommands=1' in params: + elif "parsed" in self.cmd_line: + params = self.cmd_line["parsed"].get("setParameter", []) + if "enableTestCommands=1" in params: self.test_commands_enabled = True else: - params = self.cmd_line['parsed'].get('setParameter', {}) - if params.get('enableTestCommands') == '1': + params = self.cmd_line["parsed"].get("setParameter", {}) + if params.get("enableTestCommands") == "1": self.test_commands_enabled = True self.has_ipv6 = self._server_started_with_ipv6() - self.is_mongos = (self.hello.get('msg') == 'isdbgrid') + self.is_mongos = self.hello.get("msg") == "isdbgrid" if self.is_mongos: address = self.client.address self.mongoses.append(address) if not self.serverless: # Check for another mongos on the next port. assert address is not None - next_address = address[0], address[1] + 1 - mongos_client = self._connect( - *next_address, **self.default_client_options) + next_address = address[0], address[1] + 1 + mongos_client = self._connect(*next_address, **self.default_client_options) if mongos_client: hello = mongos_client.admin.command(HelloCompat.LEGACY_CMD) - if hello.get('msg') == 'isdbgrid': + if hello.get("msg") == "isdbgrid": self.mongoses.append(next_address) def init(self): @@ -459,7 +458,7 @@ def init(self): self._init_client() def connection_attempt_info(self): - return '\n'.join(self.connection_attempts) + return "\n".join(self.connection_attempts) @property def host(self): @@ -496,18 +495,20 @@ def storage_engine(self): def _check_user_provided(self): """Return True if db_user/db_password is already an admin user.""" client = pymongo.MongoClient( - host, port, + host, + port, username=db_user, password=db_pwd, serverSelectionTimeoutMS=100, - **self.default_client_options) + **self.default_client_options + ) try: return db_user in _all_users(client.admin) except pymongo.errors.OperationFailure as e: assert e.details is not None - msg = e.details.get('errmsg', '') - if e.code == 18 or 'auth fails' in msg: + msg = e.details.get("errmsg", "") + if e.code == 18 or "auth fails" in msg: # Auth failed. return False else: @@ -516,32 +517,31 @@ def _check_user_provided(self): def _server_started_with_auth(self): # MongoDB >= 2.0 assert self.cmd_line is not None - if 'parsed' in self.cmd_line: - parsed = self.cmd_line['parsed'] + if "parsed" in self.cmd_line: + parsed = self.cmd_line["parsed"] # MongoDB >= 2.6 - if 'security' in parsed: - security = parsed['security'] + if "security" in parsed: + security = parsed["security"] # >= rc3 - if 'authorization' in security: - return security['authorization'] == 'enabled' + if "authorization" in security: + return security["authorization"] == "enabled" # < rc3 - return (security.get('auth', False) or - bool(security.get('keyFile'))) - return parsed.get('auth', False) or bool(parsed.get('keyFile')) + return security.get("auth", False) or bool(security.get("keyFile")) + return parsed.get("auth", False) or bool(parsed.get("keyFile")) # Legacy - argv = self.cmd_line['argv'] - return '--auth' in argv or '--keyFile' in argv + argv = self.cmd_line["argv"] + return "--auth" in argv or "--keyFile" in argv def _server_started_with_ipv6(self): if not socket.has_ipv6: return False assert self.cmd_line is not None - if 'parsed' in self.cmd_line: - if not self.cmd_line['parsed'].get('net', {}).get('ipv6'): + if "parsed" in self.cmd_line: + if not self.cmd_line["parsed"].get("net", {}).get("ipv6"): return False else: - if '--ipv6' not in self.cmd_line['argv']: + if "--ipv6" not in self.cmd_line["argv"]: return False # The server was started with --ipv6. Is there an IPv6 route to it? @@ -561,101 +561,107 @@ def wrap(*args, **kwargs): self.init() # Always raise SkipTest if we can't connect to MongoDB if not self.connected: - raise SkipTest( - "Cannot connect to MongoDB on %s" % (self.pair,)) + raise SkipTest("Cannot connect to MongoDB on %s" % (self.pair,)) if condition(): return f(*args, **kwargs) raise SkipTest(msg) + return wrap if func is None: + def decorate(f): return make_wrapper(f) + return decorate return make_wrapper(func) def create_user(self, dbname, user, pwd=None, roles=None, **kwargs): - kwargs['writeConcern'] = {'w': self.w} + kwargs["writeConcern"] = {"w": self.w} return _create_user(self.client[dbname], user, pwd, roles, **kwargs) def drop_user(self, dbname, user): - self.client[dbname].command( - 'dropUser', user, writeConcern={'w': self.w}) + self.client[dbname].command("dropUser", user, writeConcern={"w": self.w}) def require_connection(self, func): """Run a test only if we can connect to MongoDB.""" return self._require( lambda: True, # _require checks if we're connected "Cannot connect to MongoDB on %s" % (self.pair,), - func=func) + func=func, + ) def require_data_lake(self, func): """Run a test only if we are connected to Atlas Data Lake.""" return self._require( lambda: self.is_data_lake, "Not connected to Atlas Data Lake on %s" % (self.pair,), - func=func) + func=func, + ) def require_no_mmap(self, func): """Run a test only if the server is not using the MMAPv1 storage engine. Only works for standalone and replica sets; tests are - run regardless of storage engine on sharded clusters. """ + run regardless of storage engine on sharded clusters.""" + def is_not_mmap(): if self.is_mongos: return True - return self.storage_engine != 'mmapv1' + return self.storage_engine != "mmapv1" - return self._require( - is_not_mmap, "Storage engine must not be MMAPv1", func=func) + return self._require(is_not_mmap, "Storage engine must not be MMAPv1", func=func) def require_version_min(self, *ver): """Run a test only if the server version is at least ``version``.""" other_version = Version(*ver) - return self._require(lambda: self.version >= other_version, - "Server version must be at least %s" - % str(other_version)) + return self._require( + lambda: self.version >= other_version, + "Server version must be at least %s" % str(other_version), + ) def require_version_max(self, *ver): """Run a test only if the server version is at most ``version``.""" other_version = Version(*ver) - return self._require(lambda: self.version <= other_version, - "Server version must be at most %s" - % str(other_version)) + return self._require( + lambda: self.version <= other_version, + "Server version must be at most %s" % str(other_version), + ) def require_auth(self, func): """Run a test only if the server is running with auth enabled.""" - return self._require(lambda: self.auth_enabled, - "Authentication is not enabled on the server", - func=func) + return self._require( + lambda: self.auth_enabled, "Authentication is not enabled on the server", func=func + ) def require_no_auth(self, func): """Run a test only if the server is running without auth enabled.""" - return self._require(lambda: not self.auth_enabled, - "Authentication must not be enabled on the server", - func=func) + return self._require( + lambda: not self.auth_enabled, + "Authentication must not be enabled on the server", + func=func, + ) def require_replica_set(self, func): """Run a test only if the client is connected to a replica set.""" - return self._require(lambda: self.is_rs, - "Not connected to a replica set", - func=func) + return self._require(lambda: self.is_rs, "Not connected to a replica set", func=func) def require_secondaries_count(self, count): """Run a test only if the client is connected to a replica set that has `count` secondaries. """ + def sec_count(): return 0 if not self.client else len(self.client.secondaries) - return self._require(lambda: sec_count() >= count, - "Not enough secondaries available") + + return self._require(lambda: sec_count() >= count, "Not enough secondaries available") @property def supports_secondary_read_pref(self): if self.has_secondaries: return True if self.is_mongos: - shard = self.client.config.shards.find_one()['host'] - num_members = shard.count(',') + 1 + shard = self.client.config.shards.find_one()["host"] + num_members = shard.count(",") + 1 return num_members > 1 return False @@ -663,90 +669,94 @@ def require_secondary_read_pref(self): """Run a test only if the client is connected to a cluster that supports secondary read preference """ - return self._require(lambda: self.supports_secondary_read_pref, - "This cluster does not support secondary read " - "preference") + return self._require( + lambda: self.supports_secondary_read_pref, + "This cluster does not support secondary read " "preference", + ) def require_no_replica_set(self, func): """Run a test if the client is *not* connected to a replica set.""" return self._require( - lambda: not self.is_rs, - "Connected to a replica set, not a standalone mongod", - func=func) + lambda: not self.is_rs, "Connected to a replica set, not a standalone mongod", func=func + ) def require_ipv6(self, func): """Run a test only if the client can connect to a server via IPv6.""" - return self._require(lambda: self.has_ipv6, - "No IPv6", - func=func) + return self._require(lambda: self.has_ipv6, "No IPv6", func=func) def require_no_mongos(self, func): """Run a test only if the client is not connected to a mongos.""" - return self._require(lambda: not self.is_mongos, - "Must be connected to a mongod, not a mongos", - func=func) + return self._require( + lambda: not self.is_mongos, "Must be connected to a mongod, not a mongos", func=func + ) def require_mongos(self, func): """Run a test only if the client is connected to a mongos.""" - return self._require(lambda: self.is_mongos, - "Must be connected to a mongos", - func=func) + return self._require(lambda: self.is_mongos, "Must be connected to a mongos", func=func) def require_multiple_mongoses(self, func): """Run a test only if the client is connected to a sharded cluster that has 2 mongos nodes.""" - return self._require(lambda: len(self.mongoses) > 1, - "Must have multiple mongoses available", - func=func) + return self._require( + lambda: len(self.mongoses) > 1, "Must have multiple mongoses available", func=func + ) def require_standalone(self, func): """Run a test only if the client is connected to a standalone.""" - return self._require(lambda: not (self.is_mongos or self.is_rs), - "Must be connected to a standalone", - func=func) + return self._require( + lambda: not (self.is_mongos or self.is_rs), + "Must be connected to a standalone", + func=func, + ) def require_no_standalone(self, func): """Run a test only if the client is not connected to a standalone.""" - return self._require(lambda: self.is_mongos or self.is_rs, - "Must be connected to a replica set or mongos", - func=func) + return self._require( + lambda: self.is_mongos or self.is_rs, + "Must be connected to a replica set or mongos", + func=func, + ) def require_load_balancer(self, func): """Run a test only if the client is connected to a load balancer.""" - return self._require(lambda: self.load_balancer, - "Must be connected to a load balancer", - func=func) + return self._require( + lambda: self.load_balancer, "Must be connected to a load balancer", func=func + ) def require_no_load_balancer(self, func): - """Run a test only if the client is not connected to a load balancer. - """ - return self._require(lambda: not self.load_balancer, - "Must not be connected to a load balancer", - func=func) + """Run a test only if the client is not connected to a load balancer.""" + return self._require( + lambda: not self.load_balancer, "Must not be connected to a load balancer", func=func + ) def is_topology_type(self, topologies): - unknown = set(topologies) - {'single', 'replicaset', 'sharded', - 'sharded-replicaset', 'load-balanced'} + unknown = set(topologies) - { + "single", + "replicaset", + "sharded", + "sharded-replicaset", + "load-balanced", + } if unknown: - raise AssertionError('Unknown topologies: %r' % (unknown,)) + raise AssertionError("Unknown topologies: %r" % (unknown,)) if self.load_balancer: - if 'load-balanced' in topologies: + if "load-balanced" in topologies: return True return False - if 'single' in topologies and not (self.is_mongos or self.is_rs): + if "single" in topologies and not (self.is_mongos or self.is_rs): return True - if 'replicaset' in topologies and self.is_rs: + if "replicaset" in topologies and self.is_rs: return True - if 'sharded' in topologies and self.is_mongos: + if "sharded" in topologies and self.is_mongos: return True - if 'sharded-replicaset' in topologies and self.is_mongos: + if "sharded-replicaset" in topologies and self.is_mongos: shards = list(client_context.client.config.shards.find()) for shard in shards: # For a 3-member RS-backed sharded cluster, shard['host'] # will be 'replicaName/ip1:port1,ip2:port2,ip3:port3' # Otherwise it will be 'ip1:port1' - host_spec = shard['host'] - if not len(host_spec.split('/')) > 1: + host_spec = shard["host"] + if not len(host_spec.split("/")) > 1: return False return True return False @@ -755,76 +765,80 @@ def require_cluster_type(self, topologies=[]): """Run a test only if the client is connected to a cluster that conforms to one of the specified topologies. Acceptable topologies are 'single', 'replicaset', and 'sharded'.""" + def _is_valid_topology(): return self.is_topology_type(topologies) - return self._require( - _is_valid_topology, - "Cluster type not in %s" % (topologies)) + + return self._require(_is_valid_topology, "Cluster type not in %s" % (topologies)) def require_test_commands(self, func): """Run a test only if the server has test commands enabled.""" - return self._require(lambda: self.test_commands_enabled, - "Test commands must be enabled", - func=func) + return self._require( + lambda: self.test_commands_enabled, "Test commands must be enabled", func=func + ) def require_failCommand_fail_point(self, func): """Run a test only if the server supports the failCommand fail point.""" - return self._require(lambda: self.supports_failCommand_fail_point, - "failCommand fail point must be supported", - func=func) + return self._require( + lambda: self.supports_failCommand_fail_point, + "failCommand fail point must be supported", + func=func, + ) def require_failCommand_appName(self, func): """Run a test only if the server supports the failCommand appName.""" # SERVER-47195 - return self._require(lambda: (self.test_commands_enabled and - self.version >= (4, 4, -1)), - "failCommand appName must be supported", - func=func) + return self._require( + lambda: (self.test_commands_enabled and self.version >= (4, 4, -1)), + "failCommand appName must be supported", + func=func, + ) def require_failCommand_blockConnection(self, func): - """Run a test only if the server supports failCommand blockConnection. - """ + """Run a test only if the server supports failCommand blockConnection.""" return self._require( - lambda: (self.test_commands_enabled and ( - (not self.is_mongos and self.version >= (4, 2, 9)) or - (self.is_mongos and self.version >= (4, 4)))), + lambda: ( + self.test_commands_enabled + and ( + (not self.is_mongos and self.version >= (4, 2, 9)) + or (self.is_mongos and self.version >= (4, 4)) + ) + ), "failCommand blockConnection is not supported", - func=func) + func=func, + ) def require_tls(self, func): """Run a test only if the client can connect over TLS.""" - return self._require(lambda: self.tls, - "Must be able to connect via TLS", - func=func) + return self._require(lambda: self.tls, "Must be able to connect via TLS", func=func) def require_no_tls(self, func): """Run a test only if the client can connect over TLS.""" - return self._require(lambda: not self.tls, - "Must be able to connect without TLS", - func=func) + return self._require(lambda: not self.tls, "Must be able to connect without TLS", func=func) def require_tlsCertificateKeyFile(self, func): """Run a test only if the client can connect with tlsCertificateKeyFile.""" - return self._require(lambda: self.tlsCertificateKeyFile, - "Must be able to connect with tlsCertificateKeyFile", - func=func) + return self._require( + lambda: self.tlsCertificateKeyFile, + "Must be able to connect with tlsCertificateKeyFile", + func=func, + ) def require_server_resolvable(self, func): """Run a test only if the hostname 'server' is resolvable.""" - return self._require(lambda: self.server_is_resolvable, - "No hosts entry for 'server'. Cannot validate " - "hostname in the certificate", - func=func) + return self._require( + lambda: self.server_is_resolvable, + "No hosts entry for 'server'. Cannot validate " "hostname in the certificate", + func=func, + ) def require_sessions(self, func): """Run a test only if the deployment supports sessions.""" - return self._require(lambda: self.sessions_enabled, - "Sessions not supported", - func=func) + return self._require(lambda: self.sessions_enabled, "Sessions not supported", func=func) def supports_retryable_writes(self): - if self.storage_engine == 'mmapv1': + if self.storage_engine == "mmapv1": return False if not self.sessions_enabled: return False @@ -832,12 +846,14 @@ def supports_retryable_writes(self): def require_retryable_writes(self, func): """Run a test only if the deployment supports retryable writes.""" - return self._require(self.supports_retryable_writes, - "This server does not support retryable writes", - func=func) + return self._require( + self.supports_retryable_writes, + "This server does not support retryable writes", + func=func, + ) def supports_transactions(self): - if self.storage_engine == 'mmapv1': + if self.storage_engine == "mmapv1": return False if self.version.at_least(4, 1, 8): @@ -853,28 +869,28 @@ def require_transactions(self, func): *Might* because this does not test the storage engine or FCV. """ - return self._require(self.supports_transactions, - "Transactions are not supported", - func=func) + return self._require( + self.supports_transactions, "Transactions are not supported", func=func + ) def require_no_api_version(self, func): """Skip this test when testing with requireApiVersion.""" - return self._require(lambda: not MONGODB_API_VERSION, - "This test does not work with requireApiVersion", - func=func) + return self._require( + lambda: not MONGODB_API_VERSION, + "This test does not work with requireApiVersion", + func=func, + ) def mongos_seeds(self): - return ','.join('%s:%s' % address for address in self.mongoses) + return ",".join("%s:%s" % address for address in self.mongoses) @property def supports_failCommand_fail_point(self): """Does the server support the failCommand fail point?""" if self.is_mongos: - return (self.version.at_least(4, 1, 5) and - self.test_commands_enabled) + return self.version.at_least(4, 1, 5) and self.test_commands_enabled else: - return (self.version.at_least(4, 0) and - self.test_commands_enabled) + return self.version.at_least(4, 0) and self.test_commands_enabled @property def requires_hint_with_min_max_queries(self): @@ -884,11 +900,11 @@ def requires_hint_with_min_max_queries(self): @property def max_bson_size(self): - return self.hello['maxBsonObjectSize'] + return self.hello["maxBsonObjectSize"] @property def max_write_batch_size(self): - return self.hello['maxWriteBatchSize'] + return self.hello["maxWriteBatchSize"] # Reusable client context @@ -897,13 +913,13 @@ def max_write_batch_size(self): def sanitize_cmd(cmd): cp = cmd.copy() - cp.pop('$clusterTime', None) - cp.pop('$db', None) - cp.pop('$readPreference', None) - cp.pop('lsid', None) + cp.pop("$clusterTime", None) + cp.pop("$db", None) + cp.pop("$readPreference", None) + cp.pop("lsid", None) if MONGODB_API_VERSION: # Versioned api parameters - cp.pop('apiVersion', None) + cp.pop("apiVersion", None) # OP_MSG encoding may move the payload type one field to the # end of the command. Do the same here. name = next(iter(cp)) @@ -918,8 +934,8 @@ def sanitize_cmd(cmd): def sanitize_reply(reply): cp = reply.copy() - cp.pop('$clusterTime', None) - cp.pop('operationTime', None) + cp.pop("$clusterTime", None) + cp.pop("operationTime", None) return cp @@ -932,18 +948,20 @@ def assertEqualReply(self, expected, actual, msg=None): @contextmanager def fail_point(self, command_args): - cmd_on = SON([('configureFailPoint', 'failCommand')]) + cmd_on = SON([("configureFailPoint", "failCommand")]) cmd_on.update(command_args) client_context.client.admin.command(cmd_on) try: yield finally: client_context.client.admin.command( - 'configureFailPoint', cmd_on['configureFailPoint'], mode='off') + "configureFailPoint", cmd_on["configureFailPoint"], mode="off" + ) class IntegrationTest(PyMongoTestCase): """Base class for TestCases that need a connection to MongoDB to pass.""" + client: MongoClient db: Database credentials: Dict[str, str] @@ -951,16 +969,14 @@ class IntegrationTest(PyMongoTestCase): @classmethod @client_context.require_connection def setUpClass(cls): - if (client_context.load_balancer and - not getattr(cls, 'RUN_ON_LOAD_BALANCER', False)): - raise SkipTest('this test does not support load balancers') - if (client_context.serverless and - not getattr(cls, 'RUN_ON_SERVERLESS', False)): - raise SkipTest('this test does not support serverless') + if client_context.load_balancer and not getattr(cls, "RUN_ON_LOAD_BALANCER", False): + raise SkipTest("this test does not support load balancers") + if client_context.serverless and not getattr(cls, "RUN_ON_SERVERLESS", False): + raise SkipTest("this test does not support serverless") cls.client = client_context.client cls.db = cls.client.pymongo_test if client_context.auth_enabled: - cls.credentials = {'username': db_user, 'password': db_pwd} + cls.credentials = {"username": db_user, "password": db_pwd} else: cls.credentials = {} @@ -996,9 +1012,7 @@ def setUpClass(cls): def setUp(self): super(MockClientTest, self).setUp() - self.client_knobs = client_knobs( - heartbeat_frequency=0.001, - min_heartbeat_interval=0.001) + self.client_knobs = client_knobs(heartbeat_frequency=0.001, min_heartbeat_interval=0.001) self.client_knobs.enable() @@ -1017,9 +1031,9 @@ def _get_executors(topology): executors = [] for server in topology._servers.values(): # Some MockMonitor do not have an _executor. - if hasattr(server._monitor, '_executor'): + if hasattr(server._monitor, "_executor"): executors.append(server._monitor._executor) - if hasattr(server._monitor, '_rtt_monitor'): + if hasattr(server._monitor, "_rtt_monitor"): executors.append(server._monitor._rtt_monitor._executor) executors.append(topology._Topology__events_executor) if topology._srv_monitor: @@ -1031,14 +1045,17 @@ def _get_executors(topology): def all_executors_stopped(topology): running = [e for e in _get_executors(topology) if not e._stopped] if running: - print(' Topology %s has THREADS RUNNING: %s, created at: %s' % ( - topology, running, topology._settings._stack)) + print( + " Topology %s has THREADS RUNNING: %s, created at: %s" + % (topology, running, topology._settings._stack) + ) return False return True def print_unclosed_clients(): from pymongo.topology import Topology + processed = set() # Call collect to manually cleanup any would-be gc'd clients to avoid # false positives. @@ -1058,11 +1075,11 @@ def print_unclosed_clients(): def teardown(): garbage = [] for g in gc.garbage: - garbage.append('GARBAGE: %r' % (g,)) - garbage.append(' gc.get_referents: %r' % (gc.get_referents(g),)) - garbage.append(' gc.get_referrers: %r' % (gc.get_referrers(g),)) + garbage.append("GARBAGE: %r" % (g,)) + garbage.append(" gc.get_referents: %r" % (gc.get_referents(g),)) + garbage.append(" gc.get_referrers: %r" % (gc.get_referrers(g),)) if garbage: - assert False, '\n'.join(garbage) + assert False, "\n".join(garbage) c = client_context.client if c: if not client_context.is_data_lake: @@ -1075,7 +1092,7 @@ def teardown(): c.close() # Jython does not support gc.get_objects. - if not sys.platform.startswith('java'): + if not sys.platform.startswith("java"): print_unclosed_clients() @@ -1088,6 +1105,7 @@ def run(self, test): if HAVE_XML: + class PymongoXMLTestRunner(XMLTestRunner): # type: ignore[misc] def run(self, test): setup() @@ -1118,17 +1136,21 @@ def clear_warning_registry(): class SystemCertsPatcher(object): def __init__(self, ca_certs): - if (ssl.OPENSSL_VERSION.lower().startswith('libressl') and - sys.platform == 'darwin' and not _ssl.IS_PYOPENSSL): + if ( + ssl.OPENSSL_VERSION.lower().startswith("libressl") + and sys.platform == "darwin" + and not _ssl.IS_PYOPENSSL + ): raise SkipTest( "LibreSSL on OSX doesn't support setting CA certificates " - "using SSL_CERT_FILE environment variable.") - self.original_certs = os.environ.get('SSL_CERT_FILE') + "using SSL_CERT_FILE environment variable." + ) + self.original_certs = os.environ.get("SSL_CERT_FILE") # Tell OpenSSL where CA certificates live. - os.environ['SSL_CERT_FILE'] = ca_certs + os.environ["SSL_CERT_FILE"] = ca_certs def disable(self): if self.original_certs is None: - os.environ.pop('SSL_CERT_FILE') + os.environ.pop("SSL_CERT_FILE") else: - os.environ['SSL_CERT_FILE'] = self.original_certs + os.environ["SSL_CERT_FILE"] = self.original_certs diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index 1ad84068ed..cad2b10683 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -17,7 +17,6 @@ import os import sys import unittest - from collections import defaultdict sys.path[0:0] = [""] @@ -27,6 +26,7 @@ try: import dns + HAS_DNS = True except ImportError: HAS_DNS = False @@ -57,59 +57,59 @@ def connect(uri): raise Exception("Must set env variable to test.") client = pymongo.MongoClient(uri) # No TLS error - client.admin.command('ping') + client.admin.command("ping") # No auth error client.test.test.count_documents({}) class TestAtlasConnect(unittest.TestCase): - @unittest.skipUnless(HAS_SNI, 'Free tier requires SNI support') + @unittest.skipUnless(HAS_SNI, "Free tier requires SNI support") def test_free_tier(self): - connect(URIS['ATLAS_FREE']) + connect(URIS["ATLAS_FREE"]) def test_replica_set(self): - connect(URIS['ATLAS_REPL']) + connect(URIS["ATLAS_REPL"]) def test_sharded_cluster(self): - connect(URIS['ATLAS_SHRD']) + connect(URIS["ATLAS_SHRD"]) def test_tls_11(self): - connect(URIS['ATLAS_TLS11']) + connect(URIS["ATLAS_TLS11"]) def test_tls_12(self): - connect(URIS['ATLAS_TLS12']) + connect(URIS["ATLAS_TLS12"]) def test_serverless(self): - connect(URIS['ATLAS_SERVERLESS']) + connect(URIS["ATLAS_SERVERLESS"]) def connect_srv(self, uri): connect(uri) - self.assertIn('mongodb+srv://', uri) + self.assertIn("mongodb+srv://", uri) - @unittest.skipUnless(HAS_SNI, 'Free tier requires SNI support') - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + @unittest.skipUnless(HAS_SNI, "Free tier requires SNI support") + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_free_tier(self): - self.connect_srv(URIS['ATLAS_SRV_FREE']) + self.connect_srv(URIS["ATLAS_SRV_FREE"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_replica_set(self): - self.connect_srv(URIS['ATLAS_SRV_REPL']) + self.connect_srv(URIS["ATLAS_SRV_REPL"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_sharded_cluster(self): - self.connect_srv(URIS['ATLAS_SRV_SHRD']) + self.connect_srv(URIS["ATLAS_SRV_SHRD"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_tls_11(self): - self.connect_srv(URIS['ATLAS_SRV_TLS11']) + self.connect_srv(URIS["ATLAS_SRV_TLS11"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_tls_12(self): - self.connect_srv(URIS['ATLAS_SRV_TLS12']) + self.connect_srv(URIS["ATLAS_SRV_TLS12"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_serverless(self): - self.connect_srv(URIS['ATLAS_SRV_SERVERLESS']) + self.connect_srv(URIS["ATLAS_SRV_SERVERLESS"]) def test_uniqueness(self): """Ensure that we don't accidentally duplicate the test URIs.""" @@ -117,11 +117,12 @@ def test_uniqueness(self): for name, uri in URIS.items(): if uri: uri_to_names[uri].append(name) - duplicates = [names for names in uri_to_names.values() - if len(names) > 1] - self.assertFalse(duplicates, 'Error: the following env variables have ' - 'duplicate values: %s' % (duplicates,)) + duplicates = [names for names in uri_to_names.values() if len(names) > 1] + self.assertFalse( + duplicates, + "Error: the following env variables have " "duplicate values: %s" % (duplicates,), + ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index f096d0569a..750d18c4fe 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -30,21 +30,22 @@ class TestAuthAWS(unittest.TestCase): @classmethod def setUpClass(cls): - cls.uri = os.environ['MONGODB_URI'] + cls.uri = os.environ["MONGODB_URI"] def test_should_fail_without_credentials(self): - if '@' not in self.uri: - self.skipTest('MONGODB_URI already has no credentials') + if "@" not in self.uri: + self.skipTest("MONGODB_URI already has no credentials") - hosts = ['%s:%s' % addr for addr in parse_uri(self.uri)['nodelist']] + hosts = ["%s:%s" % addr for addr in parse_uri(self.uri)["nodelist"]] self.assertTrue(hosts) with MongoClient(hosts) as client: with self.assertRaises(OperationFailure): client.aws.test.find_one() def test_should_fail_incorrect_credentials(self): - with MongoClient(self.uri, username='fake', password='fake', - authMechanism='MONGODB-AWS') as client: + with MongoClient( + self.uri, username="fake", password="fake", authMechanism="MONGODB-AWS" + ) as client: with self.assertRaises(OperationFailure): client.get_database().test.find_one() @@ -53,5 +54,5 @@ def test_connect_uri(self): client.get_database().test.find_one() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/crud_v2_format.py b/test/crud_v2_format.py index dbdea40d46..4118dfef9f 100644 --- a/test/crud_v2_format.py +++ b/test/crud_v2_format.py @@ -33,22 +33,22 @@ def allowable_errors(self, op): def get_scenario_db_name(self, scenario_def): """Crud spec says database_name is optional.""" - return scenario_def.get('database_name', self.TEST_DB) + return scenario_def.get("database_name", self.TEST_DB) def get_scenario_coll_name(self, scenario_def): """Crud spec says collection_name is optional.""" - return scenario_def.get('collection_name', self.TEST_COLLECTION) + return scenario_def.get("collection_name", self.TEST_COLLECTION) def get_object_name(self, op): """Crud spec says object is optional and defaults to 'collection'.""" - return op.get('object', 'collection') + return op.get("object", "collection") def get_outcome_coll_name(self, outcome, collection): """Crud spec says outcome has an optional 'collection.name'.""" - return outcome['collection'].get('name', collection.name) + return outcome["collection"].get("name", collection.name) def setup_scenario(self, scenario_def): """Allow specs to override a test's setup.""" # PYTHON-1935 Only create the collection if there is data to insert. - if scenario_def['data']: + if scenario_def["data"]: super(TestCrudV2, self).setup_scenario(scenario_def) diff --git a/test/mockupdb/operations.py b/test/mockupdb/operations.py index 138c059ac6..efb9e5084e 100644 --- a/test/mockupdb/operations.py +++ b/test/mockupdb/operations.py @@ -14,15 +14,14 @@ from collections import namedtuple -from mockupdb import OpMsgReply, OpMsg, OpReply +from mockupdb import OpMsg, OpMsgReply, OpReply + from pymongo import ReadPreference -__all__ = ['operations', 'upgrades'] +__all__ = ["operations", "upgrades"] -Operation = namedtuple( - 'Operation', - ['name', 'function', 'reply', 'op_type', 'not_master']) +Operation = namedtuple("Operation", ["name", "function", "reply", "op_type", "not_master"]) """Client operations on MongoDB. Each has a human-readable name, a function that actually executes a test, and @@ -51,64 +50,71 @@ sharded cluster (PYTHON-868). """ -not_master_reply = OpMsgReply(ok=0, errmsg='not master') +not_master_reply = OpMsgReply(ok=0, errmsg="not master") operations = [ Operation( - 'find_one', + "find_one", lambda client: client.db.collection.find_one(), - reply={'cursor': {'id': 0, 'firstBatch': []}}, - op_type='may-use-secondary', - not_master=not_master_reply), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), Operation( - 'count', + "count", lambda client: client.db.collection.count_documents({}), - reply={'n': 1}, - op_type='may-use-secondary', - not_master=not_master_reply), + reply={"n": 1}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), Operation( - 'aggregate', + "aggregate", lambda client: client.db.collection.aggregate([]), - reply={'cursor': {'id': 0, 'firstBatch': []}}, - op_type='may-use-secondary', - not_master=not_master_reply), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), Operation( - 'options', + "options", lambda client: client.db.collection.options(), - reply={'cursor': {'id': 0, 'firstBatch': []}}, - op_type='must-use-primary', - not_master=not_master_reply), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="must-use-primary", + not_master=not_master_reply, + ), Operation( - 'command', - lambda client: client.db.command('foo'), - reply={'ok': 1}, - op_type='must-use-primary', # Ignores client's read preference. - not_master=not_master_reply), + "command", + lambda client: client.db.command("foo"), + reply={"ok": 1}, + op_type="must-use-primary", # Ignores client's read preference. + not_master=not_master_reply, + ), Operation( - 'secondary command', - lambda client: - client.db.command('foo', read_preference=ReadPreference.SECONDARY), - reply={'ok': 1}, - op_type='always-use-secondary', - not_master=OpReply(ok=0, errmsg='node is recovering')), + "secondary command", + lambda client: client.db.command("foo", read_preference=ReadPreference.SECONDARY), + reply={"ok": 1}, + op_type="always-use-secondary", + not_master=OpReply(ok=0, errmsg="node is recovering"), + ), Operation( - 'listIndexes', + "listIndexes", lambda client: client.db.collection.index_information(), - reply={'cursor': {'id': 0, 'firstBatch': []}}, - op_type='must-use-primary', - not_master=not_master_reply), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="must-use-primary", + not_master=not_master_reply, + ), ] _ops_by_name = dict([(op.name, op) for op in operations]) -Upgrade = namedtuple('Upgrade', - ['name', 'function', 'old', 'new', 'wire_version']) +Upgrade = namedtuple("Upgrade", ["name", "function", "old", "new", "wire_version"]) upgrades = [ - Upgrade('estimated_document_count', - lambda client: client.db.collection.estimated_document_count(), - old=OpMsg('count', 'collection', namespace='db'), - new=OpMsg('aggregate', 'collection', namespace='db'), - wire_version=12), + Upgrade( + "estimated_document_count", + lambda client: client.db.collection.estimated_document_count(), + old=OpMsg("count", "collection", namespace="db"), + new=OpMsg("aggregate", "collection", namespace="db"), + wire_version=12, + ), ] diff --git a/test/mockupdb/test_auth_recovering_member.py b/test/mockupdb/test_auth_recovering_member.py index 6fb983b37f..33d33da24c 100755 --- a/test/mockupdb/test_auth_recovering_member.py +++ b/test/mockupdb/test_auth_recovering_member.py @@ -12,31 +12,35 @@ # See the License for the specific language governing permissions and # limitations under the License. +import unittest + from mockupdb import MockupDB + from pymongo import MongoClient from pymongo.errors import ServerSelectionTimeoutError -import unittest - class TestAuthRecoveringMember(unittest.TestCase): def test_auth_recovering_member(self): # Test that we don't attempt auth against a recovering RS member. server = MockupDB() - server.autoresponds('ismaster', { - 'minWireVersion': 2, - 'maxWireVersion': 6, - 'ismaster': False, - 'secondary': False, - 'setName': 'rs'}) + server.autoresponds( + "ismaster", + { + "minWireVersion": 2, + "maxWireVersion": 6, + "ismaster": False, + "secondary": False, + "setName": "rs", + }, + ) server.run() self.addCleanup(server.stop) - client = MongoClient(server.uri, - replicaSet='rs', - serverSelectionTimeoutMS=100, - socketTimeoutMS=100) + client = MongoClient( + server.uri, replicaSet="rs", serverSelectionTimeoutMS=100, socketTimeoutMS=100 + ) self.addCleanup(client.close) @@ -46,5 +50,6 @@ def test_auth_recovering_member(self): with self.assertRaises(ServerSelectionTimeoutError): client.db.command("ping") -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_cluster_time.py b/test/mockupdb/test_cluster_time.py index 858e32a0fa..e6d8c2126c 100644 --- a/test/mockupdb/test_cluster_time.py +++ b/test/mockupdb/test_cluster_time.py @@ -14,15 +14,13 @@ """Test $clusterTime handling.""" -from bson import Timestamp -from mockupdb import going, MockupDB -from pymongo import (MongoClient, - InsertOne, - UpdateOne, - DeleteMany) - import unittest +from mockupdb import MockupDB, going + +from bson import Timestamp +from pymongo import DeleteMany, InsertOne, MongoClient, UpdateOne + class TestClusterTime(unittest.TestCase): def cluster_time_conversation(self, callback, replies): @@ -31,10 +29,13 @@ def cluster_time_conversation(self, callback, replies): # First test all commands include $clusterTime with wire version 6. responder = server.autoresponds( - 'ismaster', - {'minWireVersion': 0, - 'maxWireVersion': 6, - '$clusterTime': {'clusterTime': cluster_time}}) + "ismaster", + { + "minWireVersion": 0, + "maxWireVersion": 6, + "$clusterTime": {"clusterTime": cluster_time}, + }, + ) server.run() self.addCleanup(server.stop) @@ -45,39 +46,35 @@ def cluster_time_conversation(self, callback, replies): with going(callback, client): for reply in replies: request = server.receives() - self.assertIn('$clusterTime', request) - self.assertEqual(request['$clusterTime']['clusterTime'], - cluster_time) - cluster_time = Timestamp(cluster_time.time, - cluster_time.inc + 1) - reply['$clusterTime'] = {'clusterTime': cluster_time} + self.assertIn("$clusterTime", request) + self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) + cluster_time = Timestamp(cluster_time.time, cluster_time.inc + 1) + reply["$clusterTime"] = {"clusterTime": cluster_time} request.reply(reply) def test_command(self): def callback(client): - client.db.command('ping') - client.db.command('ping') + client.db.command("ping") + client.db.command("ping") - self.cluster_time_conversation(callback, [{'ok': 1}] * 2) + self.cluster_time_conversation(callback, [{"ok": 1}] * 2) def test_bulk(self): def callback(client): - client.db.collection.bulk_write([ - InsertOne({}), - InsertOne({}), - UpdateOne({}, {'$inc': {'x': 1}}), - DeleteMany({})]) + client.db.collection.bulk_write( + [InsertOne({}), InsertOne({}), UpdateOne({}, {"$inc": {"x": 1}}), DeleteMany({})] + ) self.cluster_time_conversation( callback, - [{'ok': 1, 'nInserted': 2}, - {'ok': 1, 'nModified': 1}, - {'ok': 1, 'nDeleted': 2}]) + [{"ok": 1, "nInserted": 2}, {"ok": 1, "nModified": 1}, {"ok": 1, "nDeleted": 2}], + ) batches = [ - {'cursor': {'id': 123, 'firstBatch': [{'a': 1}]}}, - {'cursor': {'id': 123, 'nextBatch': [{'a': 2}]}}, - {'cursor': {'id': 0, 'nextBatch': [{'a': 3}]}}] + {"cursor": {"id": 123, "firstBatch": [{"a": 1}]}}, + {"cursor": {"id": 123, "nextBatch": [{"a": 2}]}}, + {"cursor": {"id": 0, "nextBatch": [{"a": 3}]}}, + ] def test_cursor(self): def callback(client): @@ -95,13 +92,15 @@ def test_explain(self): def callback(client): client.db.collection.find().explain() - self.cluster_time_conversation(callback, [{'ok': 1}]) + self.cluster_time_conversation(callback, [{"ok": 1}]) def test_monitor(self): cluster_time = Timestamp(0, 0) - reply = {'minWireVersion': 0, - 'maxWireVersion': 6, - '$clusterTime': {'clusterTime': cluster_time}} + reply = { + "minWireVersion": 0, + "maxWireVersion": 6, + "$clusterTime": {"clusterTime": cluster_time}, + } server = MockupDB() server.run() @@ -110,55 +109,52 @@ def test_monitor(self): client = MongoClient(server.uri, heartbeatFrequencyMS=500) self.addCleanup(client.close) - request = server.receives('ismaster') + request = server.receives("ismaster") # No $clusterTime in first ismaster, only in subsequent ones - self.assertNotIn('$clusterTime', request) + self.assertNotIn("$clusterTime", request) request.ok(reply) # Next exchange: client returns first clusterTime, we send the second. - request = server.receives('ismaster') - self.assertIn('$clusterTime', request) - self.assertEqual(request['$clusterTime']['clusterTime'], - cluster_time) - cluster_time = Timestamp(cluster_time.time, - cluster_time.inc + 1) - reply['$clusterTime'] = {'clusterTime': cluster_time} + request = server.receives("ismaster") + self.assertIn("$clusterTime", request) + self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) + cluster_time = Timestamp(cluster_time.time, cluster_time.inc + 1) + reply["$clusterTime"] = {"clusterTime": cluster_time} request.reply(reply) # Third exchange: client returns second clusterTime. - request = server.receives('ismaster') - self.assertEqual(request['$clusterTime']['clusterTime'], - cluster_time) + request = server.receives("ismaster") + self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) # Return command error with a new clusterTime. - cluster_time = Timestamp(cluster_time.time, - cluster_time.inc + 1) - error = {'ok': 0, - 'code': 211, - 'errmsg': 'Cache Reader No keys found for HMAC ...', - '$clusterTime': {'clusterTime': cluster_time}} + cluster_time = Timestamp(cluster_time.time, cluster_time.inc + 1) + error = { + "ok": 0, + "code": 211, + "errmsg": "Cache Reader No keys found for HMAC ...", + "$clusterTime": {"clusterTime": cluster_time}, + } request.reply(error) # PyMongo 3.11+ closes the monitoring connection on command errors. # Fourth exchange: the Monitor closes the connection and runs the # handshake on a new connection. - request = server.receives('ismaster') + request = server.receives("ismaster") # No $clusterTime in first ismaster, only in subsequent ones - self.assertNotIn('$clusterTime', request) + self.assertNotIn("$clusterTime", request) # Reply without $clusterTime. - reply.pop('$clusterTime') + reply.pop("$clusterTime") request.reply(reply) # Fifth exchange: the Monitor attempt uses the clusterTime from # the previous isMaster error. - request = server.receives('ismaster') - self.assertEqual(request['$clusterTime']['clusterTime'], - cluster_time) + request = server.receives("ismaster") + self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) request.reply(reply) client.close() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_cursor_namespace.py b/test/mockupdb/test_cursor_namespace.py index a52e2fb4e7..10788ac0f9 100644 --- a/test/mockupdb/test_cursor_namespace.py +++ b/test/mockupdb/test_cursor_namespace.py @@ -14,11 +14,12 @@ """Test list_indexes with more than one batch.""" -from mockupdb import going, MockupDB -from pymongo import MongoClient - import unittest +from mockupdb import MockupDB, going + +from pymongo import MongoClient + class TestCursorNamespace(unittest.TestCase): server: MockupDB @@ -26,7 +27,7 @@ class TestCursorNamespace(unittest.TestCase): @classmethod def setUpClass(cls): - cls.server = MockupDB(auto_ismaster={'maxWireVersion': 6}) + cls.server = MockupDB(auto_ismaster={"maxWireVersion": 6}) cls.server.run() cls.client = MongoClient(cls.server.uri) @@ -37,38 +38,43 @@ def tearDownClass(cls): def _test_cursor_namespace(self, cursor_op, command): with going(cursor_op) as docs: - request = self.server.receives( - **{command: 'collection', 'namespace': 'test'}) + request = self.server.receives(**{command: "collection", "namespace": "test"}) # Respond with a different namespace. - request.reply({'cursor': { - 'firstBatch': [{'doc': 1}], - 'id': 123, - 'ns': 'different_db.different.coll'}}) + request.reply( + { + "cursor": { + "firstBatch": [{"doc": 1}], + "id": 123, + "ns": "different_db.different.coll", + } + } + ) # Client uses the namespace we returned. request = self.server.receives( - getMore=123, namespace='different_db', - collection='different.coll') + getMore=123, namespace="different_db", collection="different.coll" + ) - request.reply({'cursor': { - 'nextBatch': [{'doc': 2}], - 'id': 0}}) + request.reply({"cursor": {"nextBatch": [{"doc": 2}], "id": 0}}) - self.assertEqual([{'doc': 1}, {'doc': 2}], docs()) + self.assertEqual([{"doc": 1}, {"doc": 2}], docs()) def test_aggregate_cursor(self): def op(): return list(self.client.test.collection.aggregate([])) - self._test_cursor_namespace(op, 'aggregate') + + self._test_cursor_namespace(op, "aggregate") def test_find_cursor(self): def op(): return list(self.client.test.collection.find()) - self._test_cursor_namespace(op, 'find') + + self._test_cursor_namespace(op, "find") def test_list_indexes(self): def op(): return list(self.client.test.collection.list_indexes()) - self._test_cursor_namespace(op, 'listIndexes') + + self._test_cursor_namespace(op, "listIndexes") class TestKillCursorsNamespace(unittest.TestCase): @@ -77,7 +83,7 @@ class TestKillCursorsNamespace(unittest.TestCase): @classmethod def setUpClass(cls): - cls.server = MockupDB(auto_ismaster={'maxWireVersion': 6}) + cls.server = MockupDB(auto_ismaster={"maxWireVersion": 6}) cls.server.run() cls.client = MongoClient(cls.server.uri) @@ -88,39 +94,47 @@ def tearDownClass(cls): def _test_killCursors_namespace(self, cursor_op, command): with going(cursor_op): - request = self.server.receives( - **{command: 'collection', 'namespace': 'test'}) + request = self.server.receives(**{command: "collection", "namespace": "test"}) # Respond with a different namespace. - request.reply({'cursor': { - 'firstBatch': [{'doc': 1}], - 'id': 123, - 'ns': 'different_db.different.coll'}}) + request.reply( + { + "cursor": { + "firstBatch": [{"doc": 1}], + "id": 123, + "ns": "different_db.different.coll", + } + } + ) # Client uses the namespace we returned for killCursors. - request = self.server.receives(**{ - 'killCursors': 'different.coll', - 'cursors': [123], - '$db': 'different_db'}) - request.reply({ - 'ok': 1, - 'cursorsKilled': [123], - 'cursorsNotFound': [], - 'cursorsAlive': [], - 'cursorsUnknown': []}) + request = self.server.receives( + **{"killCursors": "different.coll", "cursors": [123], "$db": "different_db"} + ) + request.reply( + { + "ok": 1, + "cursorsKilled": [123], + "cursorsNotFound": [], + "cursorsAlive": [], + "cursorsUnknown": [], + } + ) def test_aggregate_killCursor(self): def op(): cursor = self.client.test.collection.aggregate([], batchSize=1) next(cursor) cursor.close() - self._test_killCursors_namespace(op, 'aggregate') + + self._test_killCursors_namespace(op, "aggregate") def test_find_killCursor(self): def op(): cursor = self.client.test.collection.find(batch_size=1) next(cursor) cursor.close() - self._test_killCursors_namespace(op, 'find') + + self._test_killCursors_namespace(op, "find") -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_getmore_sharded.py b/test/mockupdb/test_getmore_sharded.py index 0d91583378..5f5400ab07 100644 --- a/test/mockupdb/test_getmore_sharded.py +++ b/test/mockupdb/test_getmore_sharded.py @@ -13,13 +13,12 @@ # limitations under the License. """Test PyMongo cursor with a sharded cluster.""" -from pymongo import MongoClient - +import unittest from queue import Queue from mockupdb import MockupDB, going -import unittest +from pymongo import MongoClient class TestGetmoreSharded(unittest.TestCase): @@ -30,20 +29,22 @@ def test_getmore_sharded(self): q: Queue = Queue() for server in servers: server.subscribe(q.put) - server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - minWireVersion=2, maxWireVersion=6) + server.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6 + ) server.run() self.addCleanup(server.stop) - client = MongoClient('mongodb://%s:%d,%s:%d' % ( - servers[0].host, servers[0].port, - servers[1].host, servers[1].port)) + client = MongoClient( + "mongodb://%s:%d,%s:%d" + % (servers[0].host, servers[0].port, servers[1].host, servers[1].port) + ) self.addCleanup(client.close) collection = client.db.collection cursor = collection.find() with going(next, cursor): query = q.get(timeout=1) - query.replies({'cursor': {'id': 123, 'firstBatch': [{}]}}) + query.replies({"cursor": {"id": 123, "firstBatch": [{}]}}) # 10 batches, all getMores go to same server. for i in range(1, 10): @@ -51,9 +52,8 @@ def test_getmore_sharded(self): getmore = q.get(timeout=1) self.assertEqual(query.server, getmore.server) cursor_id = 123 if i < 9 else 0 - getmore.replies({'cursor': {'id': cursor_id, - 'nextBatch': [{}]}}) + getmore.replies({"cursor": {"id": cursor_id, "nextBatch": [{}]}}) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index 29313de8c2..c9799fa21e 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -12,15 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mockupdb import (MockupDB, OpReply, OpMsg, OpMsgReply, OpQuery, absent, - Command, go) +import unittest + +from mockupdb import Command, MockupDB, OpMsg, OpMsgReply, OpQuery, OpReply, absent, go -from pymongo import MongoClient, version as pymongo_version +from bson.objectid import ObjectId +from pymongo import MongoClient +from pymongo import version as pymongo_version from pymongo.errors import OperationFailure from pymongo.server_api import ServerApi, ServerApiVersion -from bson.objectid import ObjectId - -import unittest def test_hello_with_option(self, protocol, **kwargs): @@ -30,26 +30,28 @@ def test_hello_with_option(self, protocol, **kwargs): primary = MockupDB() # Set up a custom handler to save the first request from the driver. self.handshake_req = None + def respond(r): # Only save the very first request from the driver. if self.handshake_req == None: self.handshake_req = r - load_balanced_kwargs = {"serviceId": ObjectId()} if kwargs.get( - "loadBalanced") else {} - return r.reply(OpMsgReply(minWireVersion=0, maxWireVersion=13, - **kwargs, **load_balanced_kwargs)) + load_balanced_kwargs = {"serviceId": ObjectId()} if kwargs.get("loadBalanced") else {} + return r.reply( + OpMsgReply(minWireVersion=0, maxWireVersion=13, **kwargs, **load_balanced_kwargs) + ) + primary.autoresponds(respond) primary.run() self.addCleanup(primary.stop) # We need a special dict because MongoClient uses "server_api" and all # of the commands use "apiVersion". - k_map = {("apiVersion", "1"):("server_api", ServerApi( - ServerApiVersion.V1))} - client = MongoClient("mongodb://"+primary.address_string, - appname='my app', # For _check_handshake_data() - **dict([k_map.get((k, v), (k, v)) for k, v # type: ignore[arg-type] - in kwargs.items()])) + k_map = {("apiVersion", "1"): ("server_api", ServerApi(ServerApiVersion.V1))} + client = MongoClient( + "mongodb://" + primary.address_string, + appname="my app", # For _check_handshake_data() + **dict([k_map.get((k, v), (k, v)) for k, v in kwargs.items()]) # type: ignore[arg-type] + ) self.addCleanup(client.close) @@ -65,15 +67,15 @@ def respond(r): def _check_handshake_data(request): - assert 'client' in request - data = request['client'] + assert "client" in request + data = request["client"] - assert data['application'] == {'name': 'my app'} - assert data['driver'] == {'name': 'PyMongo', 'version': pymongo_version} + assert data["application"] == {"name": "my app"} + assert data["driver"] == {"name": "PyMongo", "version": pymongo_version} # Keep it simple, just check these fields exist. - assert 'os' in data - assert 'platform' in data + assert "os" in data + assert "platform" in data class TestHandshake(unittest.TestCase): @@ -84,63 +86,66 @@ def test_client_handshake_data(self): self.addCleanup(server.stop) hosts = [server.address_string for server in (primary, secondary)] - primary_response = OpReply('ismaster', True, - setName='rs', hosts=hosts, - minWireVersion=2, maxWireVersion=6) - error_response = OpReply( - 0, errmsg='Cache Reader No keys found for HMAC ...', code=211) - - secondary_response = OpReply('ismaster', False, - setName='rs', hosts=hosts, - secondary=True, - minWireVersion=2, maxWireVersion=6) - - client = MongoClient(primary.uri, - replicaSet='rs', - appname='my app', - heartbeatFrequencyMS=500) # Speed up the test. + primary_response = OpReply( + "ismaster", True, setName="rs", hosts=hosts, minWireVersion=2, maxWireVersion=6 + ) + error_response = OpReply(0, errmsg="Cache Reader No keys found for HMAC ...", code=211) + + secondary_response = OpReply( + "ismaster", + False, + setName="rs", + hosts=hosts, + secondary=True, + minWireVersion=2, + maxWireVersion=6, + ) + + client = MongoClient( + primary.uri, replicaSet="rs", appname="my app", heartbeatFrequencyMS=500 + ) # Speed up the test. self.addCleanup(client.close) # New monitoring sockets send data during handshake. - heartbeat = primary.receives('ismaster') + heartbeat = primary.receives("ismaster") _check_handshake_data(heartbeat) heartbeat.ok(primary_response) - heartbeat = secondary.receives('ismaster') + heartbeat = secondary.receives("ismaster") _check_handshake_data(heartbeat) heartbeat.ok(secondary_response) # Subsequent heartbeats have no client data. - primary.receives('ismaster', 1, client=absent).ok(error_response) - secondary.receives('ismaster', 1, client=absent).ok(error_response) + primary.receives("ismaster", 1, client=absent).ok(error_response) + secondary.receives("ismaster", 1, client=absent).ok(error_response) # The heartbeat retry (on a new connection) does have client data. - heartbeat = primary.receives('ismaster') + heartbeat = primary.receives("ismaster") _check_handshake_data(heartbeat) heartbeat.ok(primary_response) - heartbeat = secondary.receives('ismaster') + heartbeat = secondary.receives("ismaster") _check_handshake_data(heartbeat) heartbeat.ok(secondary_response) # Still no client data. - primary.receives('ismaster', 1, client=absent).ok(primary_response) - secondary.receives('ismaster', 1, client=absent).ok(secondary_response) + primary.receives("ismaster", 1, client=absent).ok(primary_response) + secondary.receives("ismaster", 1, client=absent).ok(secondary_response) # After a disconnect, next ismaster has client data again. - primary.receives('ismaster', 1, client=absent).hangup() - heartbeat = primary.receives('ismaster') + primary.receives("ismaster", 1, client=absent).hangup() + heartbeat = primary.receives("ismaster") _check_handshake_data(heartbeat) heartbeat.ok(primary_response) - secondary.autoresponds('ismaster', secondary_response) + secondary.autoresponds("ismaster", secondary_response) # Start a command, so the client opens an application socket. - future = go(client.db.command, 'whatever') + future = go(client.db.command, "whatever") for request in primary: - if request.matches(Command('ismaster')): + if request.matches(Command("ismaster")): if request.client_port == heartbeat.client_port: # This is the monitor again, keep going. request.ok(primary_response) @@ -150,7 +155,7 @@ def test_client_handshake_data(self): request.ok(primary_response) else: # Command succeeds. - request.assert_matches(OpMsg('whatever')) + request.assert_matches(OpMsg("whatever")) request.ok() assert future() return @@ -160,40 +165,42 @@ def test_client_handshake_saslSupportedMechs(self): server.run() self.addCleanup(server.stop) - primary_response = OpReply('ismaster', True, - minWireVersion=2, maxWireVersion=6) - client = MongoClient(server.uri, - username='username', - password='password') + primary_response = OpReply("ismaster", True, minWireVersion=2, maxWireVersion=6) + client = MongoClient(server.uri, username="username", password="password") self.addCleanup(client.close) # New monitoring sockets send data during handshake. - heartbeat = server.receives('ismaster') + heartbeat = server.receives("ismaster") heartbeat.ok(primary_response) - future = go(client.db.command, 'whatever') + future = go(client.db.command, "whatever") for request in server: - if request.matches('ismaster'): + if request.matches("ismaster"): if request.client_port == heartbeat.client_port: # This is the monitor again, keep going. request.ok(primary_response) else: # Handshaking a new application socket should send # saslSupportedMechs and speculativeAuthenticate. - self.assertEqual(request['saslSupportedMechs'], - 'admin.username') - self.assertIn( - 'saslStart', request['speculativeAuthenticate']) - auth = {'conversationId': 1, 'done': False, - 'payload': b'r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0' - b'1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky' - b'tXdF9r,s=4dcxugMJq2P4hQaDbGXZR8uR3ei' - b'PHrSmh4uhkg==,i=15000'} - request.ok('ismaster', True, - saslSupportedMechs=['SCRAM-SHA-256'], - speculativeAuthenticate=auth, - minWireVersion=2, maxWireVersion=6) + self.assertEqual(request["saslSupportedMechs"], "admin.username") + self.assertIn("saslStart", request["speculativeAuthenticate"]) + auth = { + "conversationId": 1, + "done": False, + "payload": b"r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0" + b"1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky" + b"tXdF9r,s=4dcxugMJq2P4hQaDbGXZR8uR3ei" + b"PHrSmh4uhkg==,i=15000", + } + request.ok( + "ismaster", + True, + saslSupportedMechs=["SCRAM-SHA-256"], + speculativeAuthenticate=auth, + minWireVersion=2, + maxWireVersion=6, + ) # Authentication should immediately fail with: # OperationFailure: Server returned an invalid nonce. with self.assertRaises(OperationFailure): @@ -219,8 +226,7 @@ def test_handshake_not_either(self): def test_handshake_max_wire(self): server = MockupDB() - primary_response = {"hello": 1, "ok": 1, - "minWireVersion": 0, "maxWireVersion": 6} + primary_response = {"hello": 1, "ok": 1, "minWireVersion": 0, "maxWireVersion": 6} self.found_auth_msg = False def responder(request): @@ -228,31 +234,36 @@ def responder(request): self.found_auth_msg = True # Immediately closes the connection with # OperationFailure: Server returned an invalid nonce. - request.reply(OpMsgReply(**primary_response, - **{'payload': - b'r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0' - b'1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky' - b'tXdF9r,' - b's=4dcxugMJq2P4hQaDbGXZR8uR3ei' - b'PHrSmh4uhkg==,i=15000', - "saslSupportedMechs": [ - "SCRAM-SHA-1"]})) + request.reply( + OpMsgReply( + **primary_response, + **{ + "payload": b"r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0" + b"1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky" + b"tXdF9r," + b"s=4dcxugMJq2P4hQaDbGXZR8uR3ei" + b"PHrSmh4uhkg==,i=15000", + "saslSupportedMechs": ["SCRAM-SHA-1"], + } + ) + ) else: return request.reply(**primary_response) server.autoresponds(responder) self.addCleanup(server.stop) server.run() - client = MongoClient(server.uri, - username='username', - password='password', - ) + client = MongoClient( + server.uri, + username="username", + password="password", + ) self.addCleanup(client.close) - self.assertRaises(OperationFailure, client.db.collection.find_one, - {"a": 1}) - self.assertTrue(self.found_auth_msg, "Could not find authentication " - "command with correct protocol") + self.assertRaises(OperationFailure, client.db.collection.find_one, {"a": 1}) + self.assertTrue( + self.found_auth_msg, "Could not find authentication " "command with correct protocol" + ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_initial_ismaster.py b/test/mockupdb/test_initial_ismaster.py index c67fcbf9e1..155ae6152e 100644 --- a/test/mockupdb/test_initial_ismaster.py +++ b/test/mockupdb/test_initial_ismaster.py @@ -13,11 +13,11 @@ # limitations under the License. import time +import unittest from mockupdb import MockupDB, wait_until -from pymongo import MongoClient -import unittest +from pymongo import MongoClient class TestInitialIsMaster(unittest.TestCase): @@ -32,15 +32,13 @@ def test_initial_ismaster(self): # A single ismaster is enough for the client to be connected. self.assertFalse(client.nodes) - server.receives('ismaster').ok(ismaster=True, - minWireVersion=2, maxWireVersion=6) - wait_until(lambda: client.nodes, - 'update nodes', timeout=1) + server.receives("ismaster").ok(ismaster=True, minWireVersion=2, maxWireVersion=6) + wait_until(lambda: client.nodes, "update nodes", timeout=1) # At least 10 seconds before next heartbeat. - server.receives('ismaster').ok(ismaster=True, - minWireVersion=2, maxWireVersion=6) + server.receives("ismaster").ok(ismaster=True, minWireVersion=2, maxWireVersion=6) self.assertGreaterEqual(time.time() - start, 10) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_list_indexes.py b/test/mockupdb/test_list_indexes.py index b4787ff624..2bdbd7b910 100644 --- a/test/mockupdb/test_list_indexes.py +++ b/test/mockupdb/test_list_indexes.py @@ -14,42 +14,34 @@ """Test list_indexes with more than one batch.""" -from bson import SON +import unittest -from mockupdb import going, MockupDB, OpGetMore -from pymongo import MongoClient +from mockupdb import MockupDB, OpGetMore, going -import unittest +from bson import SON +from pymongo import MongoClient class TestListIndexes(unittest.TestCase): - def test_list_indexes_command(self): - server = MockupDB(auto_ismaster={'maxWireVersion': 6}) + server = MockupDB(auto_ismaster={"maxWireVersion": 6}) server.run() self.addCleanup(server.stop) client = MongoClient(server.uri) self.addCleanup(client.close) with going(client.test.collection.list_indexes) as cursor: - request = server.receives( - listIndexes='collection', namespace='test') - request.reply({'cursor': { - 'firstBatch': [{'name': 'index_0'}], - 'id': 123}}) + request = server.receives(listIndexes="collection", namespace="test") + request.reply({"cursor": {"firstBatch": [{"name": "index_0"}], "id": 123}}) with going(list, cursor()) as indexes: - request = server.receives(getMore=123, - namespace='test', - collection='collection') + request = server.receives(getMore=123, namespace="test", collection="collection") - request.reply({'cursor': { - 'nextBatch': [{'name': 'index_1'}], - 'id': 0}}) + request.reply({"cursor": {"nextBatch": [{"name": "index_1"}], "id": 0}}) - self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes()) + self.assertEqual([{"name": "index_0"}, {"name": "index_1"}], indexes()) for index_info in indexes(): self.assertIsInstance(index_info, SON) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_max_staleness.py b/test/mockupdb/test_max_staleness.py index 9bd65a1764..02efb6a718 100644 --- a/test/mockupdb/test_max_staleness.py +++ b/test/mockupdb/test_max_staleness.py @@ -12,33 +12,31 @@ # See the License for the specific language governing permissions and # limitations under the License. +import unittest + from mockupdb import MockupDB, going -from pymongo import MongoClient -import unittest +from pymongo import MongoClient class TestMaxStalenessMongos(unittest.TestCase): def test_mongos(self): mongos = MockupDB() - mongos.autoresponds('ismaster', maxWireVersion=6, - ismaster=True, msg='isdbgrid') + mongos.autoresponds("ismaster", maxWireVersion=6, ismaster=True, msg="isdbgrid") mongos.run() self.addCleanup(mongos.stop) # No maxStalenessSeconds. - uri = 'mongodb://localhost:%d/?readPreference=secondary' % mongos.port + uri = "mongodb://localhost:%d/?readPreference=secondary" % mongos.port client = MongoClient(uri) self.addCleanup(client.close) with going(client.db.coll.find_one) as future: request = mongos.receives() - self.assertNotIn( - 'maxStalenessSeconds', - request.doc['$readPreference']) + self.assertNotIn("maxStalenessSeconds", request.doc["$readPreference"]) self.assertTrue(request.slave_okay) - request.ok(cursor={'firstBatch': [], 'id': 0}) + request.ok(cursor={"firstBatch": [], "id": 0}) # find_one succeeds with no result. self.assertIsNone(future()) @@ -46,22 +44,22 @@ def test_mongos(self): # Set maxStalenessSeconds to 1. Client has no minimum with mongos, # we let mongos enforce the 90-second minimum and return an error: # SERVER-27146. - uri = 'mongodb://localhost:%d/?readPreference=secondary' \ - '&maxStalenessSeconds=1' % mongos.port + uri = ( + "mongodb://localhost:%d/?readPreference=secondary" + "&maxStalenessSeconds=1" % mongos.port + ) client = MongoClient(uri) self.addCleanup(client.close) with going(client.db.coll.find_one) as future: request = mongos.receives() - self.assertEqual( - 1, - request.doc['$readPreference']['maxStalenessSeconds']) + self.assertEqual(1, request.doc["$readPreference"]["maxStalenessSeconds"]) self.assertTrue(request.slave_okay) - request.ok(cursor={'firstBatch': [], 'id': 0}) + request.ok(cursor={"firstBatch": [], "id": 0}) self.assertIsNone(future()) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index c3af907404..ce91794ee4 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -15,15 +15,14 @@ """Test PyMongo with a mixed-version cluster.""" import time - +import unittest from queue import Queue -from mockupdb import MockupDB, go, OpMsg -from pymongo import MongoClient - -import unittest +from mockupdb import MockupDB, OpMsg, go from operations import upgrades +from pymongo import MongoClient + class TestMixedVersionSharded(unittest.TestCase): def setup_server(self, upgrade): @@ -33,25 +32,29 @@ def setup_server(self, upgrade): self.q: Queue = Queue() for server in self.mongos_old, self.mongos_new: server.subscribe(self.q.put) - server.autoresponds('getlasterror') + server.autoresponds("getlasterror") server.run() self.addCleanup(server.stop) # Max wire version is too old for the upgraded operation. - self.mongos_old.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - maxWireVersion=upgrade.wire_version - 1) + self.mongos_old.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", maxWireVersion=upgrade.wire_version - 1 + ) # Up-to-date max wire version. - self.mongos_new.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - maxWireVersion=upgrade.wire_version) + self.mongos_new.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", maxWireVersion=upgrade.wire_version + ) - self.mongoses_uri = 'mongodb://%s,%s' % (self.mongos_old.address_string, - self.mongos_new.address_string) + self.mongoses_uri = "mongodb://%s,%s" % ( + self.mongos_old.address_string, + self.mongos_new.address_string, + ) self.client = MongoClient(self.mongoses_uri) def tearDown(self): - if hasattr(self, 'client') and self.client: + if hasattr(self, "client") and self.client: self.client.close() @@ -64,23 +67,24 @@ def test(self): go(upgrade.function, self.client) request = self.q.get(timeout=1) servers_used.add(request.server) - request.assert_matches(upgrade.old - if request.server is self.mongos_old - else upgrade.new) + request.assert_matches( + upgrade.old if request.server is self.mongos_old else upgrade.new + ) if time.time() > start + 10: - self.fail('never used both mongoses') + self.fail("never used both mongoses") + return test def generate_mixed_version_sharded_tests(): for upgrade in upgrades: test = create_mixed_version_sharded_test(upgrade) - test_name = 'test_%s' % upgrade.name.replace(' ', '_') + test_name = "test_%s" % upgrade.name.replace(" ", "_") test.__name__ = test_name setattr(TestMixedVersionSharded, test_name, test) generate_mixed_version_sharded_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py index 49aee27047..d2c3bfc1b0 100644 --- a/test/mockupdb/test_mongos_command_read_mode.py +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -13,23 +13,26 @@ # limitations under the License. import itertools +import unittest + +from mockupdb import MockupDB, OpMsg, go, going +from operations import operations from bson import SON -from mockupdb import MockupDB, going, OpMsg, go from pymongo import MongoClient, ReadPreference -from pymongo.read_preferences import (make_read_preference, - read_pref_mode_from_name, - _MONGOS_MODES) - -import unittest -from operations import operations +from pymongo.read_preferences import ( + _MONGOS_MODES, + make_read_preference, + read_pref_mode_from_name, +) class TestMongosCommandReadMode(unittest.TestCase): def test_aggregate(self): server = MockupDB() - server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - minWireVersion=2, maxWireVersion=6) + server.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6 + ) self.addCleanup(server.stop) server.run() @@ -37,20 +40,25 @@ def test_aggregate(self): self.addCleanup(client.close) collection = client.test.collection with going(collection.aggregate, []): - command = server.receives(aggregate='collection', pipeline=[]) - self.assertFalse(command.slave_ok, 'SlaveOkay set') + command = server.receives(aggregate="collection", pipeline=[]) + self.assertFalse(command.slave_ok, "SlaveOkay set") command.ok(result=[{}]) - secondary_collection = collection.with_options( - read_preference=ReadPreference.SECONDARY) + secondary_collection = collection.with_options(read_preference=ReadPreference.SECONDARY) with going(secondary_collection.aggregate, []): - command = server.receives(OpMsg({"aggregate": "collection", - "pipeline": [], - '$readPreference': {'mode': 'secondary'}})) + command = server.receives( + OpMsg( + { + "aggregate": "collection", + "pipeline": [], + "$readPreference": {"mode": "secondary"}, + } + ) + ) command.ok(result=[{}]) - self.assertTrue(command.slave_ok, 'SlaveOkay not set') + self.assertTrue(command.slave_ok, "SlaveOkay not set") def create_mongos_read_mode_test(mode, operation): @@ -58,11 +66,11 @@ def test(self): server = MockupDB() self.addCleanup(server.stop) server.run() - server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - minWireVersion=2, maxWireVersion=6) + server.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6 + ) - pref = make_read_preference(read_pref_mode_from_name(mode), - tag_sets=None) + pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) client = MongoClient(server.uri, read_preference=pref) self.addCleanup(client.close) @@ -71,26 +79,25 @@ def test(self): request = server.receive() request.reply(operation.reply) - if operation.op_type == 'always-use-secondary': - self.assertEqual(ReadPreference.SECONDARY.document, - request.doc.get('$readPreference')) - slave_ok = mode != 'primary' - elif operation.op_type == 'must-use-primary': + if operation.op_type == "always-use-secondary": + self.assertEqual(ReadPreference.SECONDARY.document, request.doc.get("$readPreference")) + slave_ok = mode != "primary" + elif operation.op_type == "must-use-primary": slave_ok = False - elif operation.op_type == 'may-use-secondary': - slave_ok = mode != 'primary' - actual_pref = request.doc.get('$readPreference') - if mode == 'primary': + elif operation.op_type == "may-use-secondary": + slave_ok = mode != "primary" + actual_pref = request.doc.get("$readPreference") + if mode == "primary": self.assertIsNone(actual_pref) else: self.assertEqual(pref.document, actual_pref) else: - self.fail('unrecognized op_type %r' % operation.op_type) + self.fail("unrecognized op_type %r" % operation.op_type) if slave_ok: - self.assertTrue(request.slave_ok, 'SlaveOkay not set') + self.assertTrue(request.slave_ok, "SlaveOkay not set") else: - self.assertFalse(request.slave_ok, 'SlaveOkay set') + self.assertFalse(request.slave_ok, "SlaveOkay set") return test @@ -100,12 +107,11 @@ def generate_mongos_read_mode_tests(): for entry in matrix: mode, operation = entry - if mode == 'primary' and operation.op_type == 'always-use-secondary': + if mode == "primary" and operation.op_type == "always-use-secondary": # Skip something like command('foo', read_preference=SECONDARY). continue test = create_mongos_read_mode_test(mode, operation) - test_name = 'test_%s_with_mode_%s' % ( - operation.name.replace(' ', '_'), mode) + test_name = "test_%s_with_mode_%s" % (operation.name.replace(" ", "_"), mode) test.__name__ = test_name setattr(TestMongosCommandReadMode, test_name, test) @@ -113,5 +119,5 @@ def generate_mongos_read_mode_tests(): generate_mongos_read_mode_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_network_disconnect_primary.py b/test/mockupdb/test_network_disconnect_primary.py index bc29ce5f0f..dcf5256fac 100755 --- a/test/mockupdb/test_network_disconnect_primary.py +++ b/test/mockupdb/test_network_disconnect_primary.py @@ -12,14 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import unittest from queue import Queue -from mockupdb import MockupDB, wait_until, OpReply, going, Future +from mockupdb import Future, MockupDB, OpReply, going, wait_until + +from pymongo import MongoClient from pymongo.errors import ConnectionFailure from pymongo.topology_description import TOPOLOGY_TYPE -from pymongo import MongoClient - -import unittest class TestNetworkDisconnectPrimary(unittest.TestCase): @@ -33,52 +33,53 @@ def test_network_disconnect_primary(self): self.addCleanup(server.stop) hosts = [server.address_string for server in servers] - primary_response = OpReply(ismaster=True, setName='rs', hosts=hosts, - minWireVersion=2, maxWireVersion=6) - primary.autoresponds('ismaster', primary_response) + primary_response = OpReply( + ismaster=True, setName="rs", hosts=hosts, minWireVersion=2, maxWireVersion=6 + ) + primary.autoresponds("ismaster", primary_response) secondary.autoresponds( - 'ismaster', - ismaster=False, secondary=True, setName='rs', hosts=hosts, - minWireVersion=2, maxWireVersion=6) - - client = MongoClient(primary.uri, replicaSet='rs') + "ismaster", + ismaster=False, + secondary=True, + setName="rs", + hosts=hosts, + minWireVersion=2, + maxWireVersion=6, + ) + + client = MongoClient(primary.uri, replicaSet="rs") self.addCleanup(client.close) - wait_until(lambda: client.primary == primary.address, - 'discover primary') + wait_until(lambda: client.primary == primary.address, "discover primary") topology = client._topology - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, - topology.description.topology_type) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, topology.description.topology_type) # Open a socket in the application pool (calls ismaster). - with going(client.db.command, 'buildinfo'): - primary.receives('buildinfo').ok() + with going(client.db.command, "buildinfo"): + primary.receives("buildinfo").ok() # The primary hangs replying to ismaster. ismaster_future = Future() - primary.autoresponds('ismaster', - lambda r: r.ok(ismaster_future.result())) + primary.autoresponds("ismaster", lambda r: r.ok(ismaster_future.result())) # Network error on application operation. with self.assertRaises(ConnectionFailure): - with going(client.db.command, 'buildinfo'): - primary.receives('buildinfo').hangup() + with going(client.db.command, "buildinfo"): + primary.receives("buildinfo").hangup() # Topology type is updated. - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, - topology.description.topology_type) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, topology.description.topology_type) # Let ismasters through again. ismaster_future.set_result(primary_response) # Demand a primary. - with going(client.db.command, 'buildinfo'): - wait_until(lambda: client.primary == primary.address, - 'rediscover primary') - primary.receives('buildinfo').ok() + with going(client.db.command, "buildinfo"): + wait_until(lambda: client.primary == primary.address, "rediscover primary") + primary.receives("buildinfo").ok() + + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, topology.description.topology_type) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, - topology.description.topology_type) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_op_msg.py b/test/mockupdb/test_op_msg.py index 78397a3336..da7ff3d33e 100755 --- a/test/mockupdb/test_op_msg.py +++ b/test/mockupdb/test_op_msg.py @@ -12,223 +12,248 @@ # See the License for the specific language governing permissions and # limitations under the License. +import unittest from collections import namedtuple -from mockupdb import MockupDB, going, OpMsg, OpMsgReply, OP_MSG_FLAGS +from mockupdb import OP_MSG_FLAGS, MockupDB, OpMsg, OpMsgReply, going + from pymongo import MongoClient, WriteConcern -from pymongo.operations import InsertOne, UpdateOne, DeleteOne from pymongo.cursor import CursorType +from pymongo.operations import DeleteOne, InsertOne, UpdateOne -import unittest - - -Operation = namedtuple( - 'Operation', - ['name', 'function', 'request', 'reply']) +Operation = namedtuple("Operation", ["name", "function", "request", "reply"]) operations = [ Operation( - 'find_one', + "find_one", lambda coll: coll.find_one({}), request=OpMsg({"find": "coll"}, flags=0), - reply={'ok': 1, 'cursor': {'firstBatch': [], 'id': 0}}), + reply={"ok": 1, "cursor": {"firstBatch": [], "id": 0}}, + ), Operation( - 'aggregate', + "aggregate", lambda coll: coll.aggregate([]), request=OpMsg({"aggregate": "coll"}, flags=0), - reply={'ok': 1, 'cursor': {'firstBatch': [], 'id': 0}}), + reply={"ok": 1, "cursor": {"firstBatch": [], "id": 0}}, + ), Operation( - 'insert_one', + "insert_one", lambda coll: coll.insert_one({}), request=OpMsg({"insert": "coll"}, flags=0), - reply={'ok': 1, 'n': 1}), + reply={"ok": 1, "n": 1}, + ), Operation( - 'insert_one-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).insert_one({}), - request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "insert_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_one({}), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), Operation( - 'insert_many', + "insert_many", lambda coll: coll.insert_many([{}, {}, {}]), request=OpMsg({"insert": "coll"}, flags=0), - reply={'ok': 1, 'n': 3}), + reply={"ok": 1, "n": 3}, + ), Operation( - 'insert_many-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).insert_many([{}, {}, {}]), + "insert_many-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_many([{}, {}, {}]), request=OpMsg({"insert": "coll"}, flags=0), - reply={'ok': 1, 'n': 3}), + reply={"ok": 1, "n": 3}, + ), Operation( - 'insert_many-w0-unordered', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).insert_many( - [{}, {}, {}], ordered=False), - request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "insert_many-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_many( + [{}, {}, {}], ordered=False + ), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), Operation( - 'replace_one', + "replace_one", lambda coll: coll.replace_one({"_id": 1}, {"new": 1}), request=OpMsg({"update": "coll"}, flags=0), - reply={'ok': 1, 'n': 1, 'nModified': 1}), + reply={"ok": 1, "n": 1, "nModified": 1}, + ), Operation( - 'replace_one-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).replace_one({"_id": 1}, - {"new": 1}), - request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "replace_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).replace_one( + {"_id": 1}, {"new": 1} + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), Operation( - 'update_one', + "update_one", lambda coll: coll.update_one({"_id": 1}, {"$set": {"new": 1}}), request=OpMsg({"update": "coll"}, flags=0), - reply={'ok': 1, 'n': 1, 'nModified': 1}), + reply={"ok": 1, "n": 1, "nModified": 1}, + ), Operation( - 'replace_one-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).update_one({"_id": 1}, - {"$set": {"new": 1}}), - request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "replace_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).update_one( + {"_id": 1}, {"$set": {"new": 1}} + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), Operation( - 'update_many', + "update_many", lambda coll: coll.update_many({"_id": 1}, {"$set": {"new": 1}}), request=OpMsg({"update": "coll"}, flags=0), - reply={'ok': 1, 'n': 1, 'nModified': 1}), + reply={"ok": 1, "n": 1, "nModified": 1}, + ), Operation( - 'update_many-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).update_many({"_id": 1}, - {"$set": {"new": 1}}), - request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "update_many-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).update_many( + {"_id": 1}, {"$set": {"new": 1}} + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), Operation( - 'delete_one', + "delete_one", lambda coll: coll.delete_one({"a": 1}), request=OpMsg({"delete": "coll"}, flags=0), - reply={'ok': 1, 'n': 1}), + reply={"ok": 1, "n": 1}, + ), Operation( - 'delete_one-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).delete_one({"a": 1}), - request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "delete_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).delete_one({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), Operation( - 'delete_many', + "delete_many", lambda coll: coll.delete_many({"a": 1}), request=OpMsg({"delete": "coll"}, flags=0), - reply={'ok': 1, 'n': 1}), + reply={"ok": 1, "n": 1}, + ), Operation( - 'delete_many-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).delete_many({"a": 1}), - request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "delete_many-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).delete_many({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), # Legacy methods Operation( - 'bulk_write_insert', + "bulk_write_insert", lambda coll: coll.bulk_write([InsertOne({}), InsertOne({})]), request=OpMsg({"insert": "coll"}, flags=0), - reply={'ok': 1, 'n': 2}), + reply={"ok": 1, "n": 2}, + ), Operation( - 'bulk_write_insert-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).bulk_write([InsertOne({}), - InsertOne({})]), + "bulk_write_insert-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [InsertOne({}), InsertOne({})] + ), request=OpMsg({"insert": "coll"}, flags=0), - reply={'ok': 1, 'n': 2}), + reply={"ok": 1, "n": 2}, + ), Operation( - 'bulk_write_insert-w0-unordered', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).bulk_write( - [InsertOne({}), InsertOne({})], ordered=False), - request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "bulk_write_insert-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [InsertOne({}), InsertOne({})], ordered=False + ), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), Operation( - 'bulk_write_update', - lambda coll: coll.bulk_write([ - UpdateOne({"_id": 1}, {"$set": {"new": 1}}), - UpdateOne({"_id": 2}, {"$set": {"new": 1}})]), + "bulk_write_update", + lambda coll: coll.bulk_write( + [ + UpdateOne({"_id": 1}, {"$set": {"new": 1}}), + UpdateOne({"_id": 2}, {"$set": {"new": 1}}), + ] + ), request=OpMsg({"update": "coll"}, flags=0), - reply={'ok': 1, 'n': 2, 'nModified': 2}), + reply={"ok": 1, "n": 2, "nModified": 2}, + ), Operation( - 'bulk_write_update-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).bulk_write([ + "bulk_write_update-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [ UpdateOne({"_id": 1}, {"$set": {"new": 1}}), - UpdateOne({"_id": 2}, {"$set": {"new": 1}})]), + UpdateOne({"_id": 2}, {"$set": {"new": 1}}), + ] + ), request=OpMsg({"update": "coll"}, flags=0), - reply={'ok': 1, 'n': 2, 'nModified': 2}), + reply={"ok": 1, "n": 2, "nModified": 2}, + ), Operation( - 'bulk_write_update-w0-unordered', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).bulk_write([ + "bulk_write_update-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [ UpdateOne({"_id": 1}, {"$set": {"new": 1}}), - UpdateOne({"_id": 2}, {"$set": {"new": 1}})], ordered=False), - request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + UpdateOne({"_id": 2}, {"$set": {"new": 1}}), + ], + ordered=False, + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), Operation( - 'bulk_write_delete', - lambda coll: coll.bulk_write([ - DeleteOne({"_id": 1}), DeleteOne({"_id": 2})]), + "bulk_write_delete", + lambda coll: coll.bulk_write([DeleteOne({"_id": 1}), DeleteOne({"_id": 2})]), request=OpMsg({"delete": "coll"}, flags=0), - reply={'ok': 1, 'n': 2}), + reply={"ok": 1, "n": 2}, + ), Operation( - 'bulk_write_delete-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).bulk_write([ - DeleteOne({"_id": 1}), DeleteOne({"_id": 2})]), + "bulk_write_delete-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [DeleteOne({"_id": 1}), DeleteOne({"_id": 2})] + ), request=OpMsg({"delete": "coll"}, flags=0), - reply={'ok': 1, 'n': 2}), + reply={"ok": 1, "n": 2}, + ), Operation( - 'bulk_write_delete-w0-unordered', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).bulk_write([ - DeleteOne({"_id": 1}), DeleteOne({"_id": 2})], ordered=False), - request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "bulk_write_delete-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [DeleteOne({"_id": 1}), DeleteOne({"_id": 2})], ordered=False + ), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), ] operations_312 = [ Operation( - 'find_raw_batches', + "find_raw_batches", lambda coll: list(coll.find_raw_batches({})), request=[ OpMsg({"find": "coll"}, flags=0), OpMsg({"getMore": 7}, flags=0), ], reply=[ - {'ok': 1, 'cursor': {'firstBatch': [{}], 'id': 7}}, - {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 0}}, - ]), + {"ok": 1, "cursor": {"firstBatch": [{}], "id": 7}}, + {"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, + ], + ), Operation( - 'aggregate_raw_batches', + "aggregate_raw_batches", lambda coll: list(coll.aggregate_raw_batches([])), request=[ OpMsg({"aggregate": "coll"}, flags=0), OpMsg({"getMore": 7}, flags=0), ], reply=[ - {'ok': 1, 'cursor': {'firstBatch': [], 'id': 7}}, - {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 0}}, - ]), + {"ok": 1, "cursor": {"firstBatch": [], "id": 7}}, + {"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, + ], + ), Operation( - 'find_exhaust_cursor', + "find_exhaust_cursor", lambda coll: list(coll.find({}, cursor_type=CursorType.EXHAUST)), request=[ OpMsg({"find": "coll"}, flags=0), OpMsg({"getMore": 7}, flags=1 << 16), ], reply=[ - OpMsgReply( - {'ok': 1, 'cursor': {'firstBatch': [{}], 'id': 7}}, flags=0), - OpMsgReply( - {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 7}}, flags=2), - OpMsgReply( - {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 7}}, flags=2), - OpMsgReply( - {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 0}}, flags=0), - ]), + OpMsgReply({"ok": 1, "cursor": {"firstBatch": [{}], "id": 7}}, flags=0), + OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 7}}, flags=2), + OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 7}}, flags=2), + OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, flags=0), + ], + ), ] @@ -273,6 +298,7 @@ def _test_operation(self, op): def operation_test(op): def test(self): self._test_operation(op) + return test @@ -286,5 +312,5 @@ def create_tests(ops): create_tests(operations_312) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index eb3a14fa01..b8d1348b97 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -14,17 +14,19 @@ import copy import itertools +import unittest from typing import Any -from mockupdb import MockupDB, going, CommandBase -from pymongo import MongoClient, ReadPreference -from pymongo.read_preferences import (make_read_preference, - read_pref_mode_from_name, - _MONGOS_MODES) - -import unittest +from mockupdb import CommandBase, MockupDB, going from operations import operations +from pymongo import MongoClient, ReadPreference +from pymongo.read_preferences import ( + _MONGOS_MODES, + make_read_preference, + read_pref_mode_from_name, +) + class OpMsgReadPrefBase(unittest.TestCase): single_mongod = False @@ -40,22 +42,20 @@ def add_test(cls, mode, test_name, test): setattr(cls, test_name, test) def setup_client(self, read_preference): - client = MongoClient(self.primary.uri, - read_preference=read_preference) + client = MongoClient(self.primary.uri, read_preference=read_preference) self.addCleanup(client.close) return client class TestOpMsgMongos(OpMsgReadPrefBase): - @classmethod def setUpClass(cls): super(TestOpMsgMongos, cls).setUpClass() auto_ismaster = { - 'ismaster': True, - 'msg': 'isdbgrid', # Mongos. - 'minWireVersion': 2, - 'maxWireVersion': 6, + "ismaster": True, + "msg": "isdbgrid", # Mongos. + "minWireVersion": 2, + "maxWireVersion": 6, } cls.primary = MockupDB(auto_ismaster=auto_ismaster) cls.primary.run() @@ -68,7 +68,6 @@ def tearDownClass(cls): class TestOpMsgReplicaSet(OpMsgReadPrefBase): - @classmethod def setUpClass(cls): super(TestOpMsgReplicaSet, cls).setUpClass() @@ -76,21 +75,20 @@ def setUpClass(cls): for server in cls.primary, cls.secondary: server.run() - hosts = [server.address_string - for server in (cls.primary, cls.secondary)] + hosts = [server.address_string for server in (cls.primary, cls.secondary)] primary_ismaster = { - 'ismaster': True, - 'setName': 'rs', - 'hosts': hosts, - 'minWireVersion': 2, - 'maxWireVersion': 6, + "ismaster": True, + "setName": "rs", + "hosts": hosts, + "minWireVersion": 2, + "maxWireVersion": 6, } - cls.primary.autoresponds(CommandBase('ismaster'), primary_ismaster) + cls.primary.autoresponds(CommandBase("ismaster"), primary_ismaster) secondary_ismaster = copy.copy(primary_ismaster) - secondary_ismaster['ismaster'] = False - secondary_ismaster['secondary'] = True - cls.secondary.autoresponds(CommandBase('ismaster'), secondary_ismaster) + secondary_ismaster["ismaster"] = False + secondary_ismaster["secondary"] = True + cls.secondary.autoresponds(CommandBase("ismaster"), secondary_ismaster) @classmethod def tearDownClass(cls): @@ -102,18 +100,15 @@ def tearDownClass(cls): def add_test(cls, mode, test_name, test): # Skip nearest tests since we don't know if we will select the primary # or secondary. - if mode != 'nearest': + if mode != "nearest": setattr(cls, test_name, test) def setup_client(self, read_preference): - client = MongoClient(self.primary.uri, - replicaSet='rs', - read_preference=read_preference) + client = MongoClient(self.primary.uri, replicaSet="rs", read_preference=read_preference) # Run a command on a secondary to discover the topology. This ensures # that secondaryPreferred commands will select the secondary. - client.admin.command('ismaster', - read_preference=ReadPreference.SECONDARY) + client.admin.command("ismaster", read_preference=ReadPreference.SECONDARY) self.addCleanup(client.close) return client @@ -125,9 +120,9 @@ class TestOpMsgSingle(OpMsgReadPrefBase): def setUpClass(cls): super(TestOpMsgSingle, cls).setUpClass() auto_ismaster = { - 'ismaster': True, - 'minWireVersion': 2, - 'maxWireVersion': 6, + "ismaster": True, + "minWireVersion": 2, + "maxWireVersion": 6, } cls.primary = MockupDB(auto_ismaster=auto_ismaster) cls.primary.run() @@ -141,29 +136,28 @@ def tearDownClass(cls): def create_op_msg_read_mode_test(mode, operation): def test(self): - pref = make_read_preference(read_pref_mode_from_name(mode), - tag_sets=None) + pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) client = self.setup_client(read_preference=pref) expected_pref: Any - if operation.op_type == 'always-use-secondary': + if operation.op_type == "always-use-secondary": expected_server = self.secondary expected_pref = ReadPreference.SECONDARY - elif operation.op_type == 'must-use-primary': + elif operation.op_type == "must-use-primary": expected_server = self.primary expected_pref = None - elif operation.op_type == 'may-use-secondary': - if mode == 'primary': + elif operation.op_type == "may-use-secondary": + if mode == "primary": expected_server = self.primary expected_pref = None - elif mode == 'primaryPreferred': + elif mode == "primaryPreferred": expected_server = self.primary expected_pref = pref else: expected_server = self.secondary expected_pref = pref else: - self.fail('unrecognized op_type %r' % operation.op_type) + self.fail("unrecognized op_type %r" % operation.op_type) # For single mongod we omit the read preference. if self.single_mongod: expected_pref = None @@ -171,12 +165,12 @@ def test(self): request = expected_server.receive() request.reply(operation.reply) - actual_pref = request.doc.get('$readPreference') + actual_pref = request.doc.get("$readPreference") if expected_pref: self.assertEqual(expected_pref.document, actual_pref) else: self.assertIsNone(actual_pref) - self.assertNotIn('$query', request.doc) + self.assertNotIn("$query", request.doc) return test @@ -187,8 +181,7 @@ def generate_op_msg_read_mode_tests(): for entry in matrix: mode, operation = entry test = create_op_msg_read_mode_test(mode, operation) - test_name = 'test_%s_with_mode_%s' % ( - operation.name.replace(' ', '_'), mode) + test_name = "test_%s_with_mode_%s" % (operation.name.replace(" ", "_"), mode) test.__name__ = test_name for cls in TestOpMsgMongos, TestOpMsgReplicaSet, TestOpMsgSingle: cls.add_test(mode, test_name, test) @@ -197,5 +190,5 @@ def generate_op_msg_read_mode_tests(): generate_op_msg_read_mode_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_query_read_pref_sharded.py b/test/mockupdb/test_query_read_pref_sharded.py index 88dcdd8351..7ad4f2afc8 100644 --- a/test/mockupdb/test_query_read_pref_sharded.py +++ b/test/mockupdb/test_query_read_pref_sharded.py @@ -14,24 +14,28 @@ """Test PyMongo query and read preference with a sharded cluster.""" +import unittest + +from mockupdb import MockupDB, OpMsg, going + from bson import SON from pymongo import MongoClient -from pymongo.read_preferences import (Primary, - PrimaryPreferred, - Secondary, - SecondaryPreferred, - Nearest) -from mockupdb import MockupDB, going, OpMsg - -import unittest +from pymongo.read_preferences import ( + Nearest, + Primary, + PrimaryPreferred, + Secondary, + SecondaryPreferred, +) class TestQueryAndReadModeSharded(unittest.TestCase): def test_query_and_read_mode_sharded_op_msg(self): """Test OP_MSG sends non-primary $readPreference and never $query.""" server = MockupDB() - server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - minWireVersion=2, maxWireVersion=6) + server.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6 + ) server.run() self.addCleanup(server.stop) @@ -44,24 +48,26 @@ def test_query_and_read_mode_sharded_op_msg(self): PrimaryPreferred(), Secondary(), Nearest(), - SecondaryPreferred([{'tag': 'value'}]),) + SecondaryPreferred([{"tag": "value"}]), + ) - for query in ({'a': 1}, {'$query': {'a': 1}},): + for query in ( + {"a": 1}, + {"$query": {"a": 1}}, + ): for pref in read_prefs: - collection = client.db.get_collection('test', - read_preference=pref) + collection = client.db.get_collection("test", read_preference=pref) cursor = collection.find(query.copy()) with going(next, cursor): request = server.receives() # Command is not nested in $query. - expected_cmd = SON([('find', 'test'), - ('filter', {'a': 1})]) + expected_cmd = SON([("find", "test"), ("filter", {"a": 1})]) if pref.mode: - expected_cmd['$readPreference'] = pref.document + expected_cmd["$readPreference"] = pref.document request.assert_matches(OpMsg(expected_cmd)) - request.replies({'cursor': {'id': 0, 'firstBatch': [{}]}}) + request.replies({"cursor": {"id": 0, "firstBatch": [{}]}}) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py index 48f9486544..778be3d5ca 100755 --- a/test/mockupdb/test_reset_and_request_check.py +++ b/test/mockupdb/test_reset_and_request_check.py @@ -12,17 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -import time import itertools +import time +import unittest from mockupdb import MockupDB, going, wait_until -from pymongo.server_type import SERVER_TYPE -from pymongo.errors import ConnectionFailure -from pymongo import MongoClient - -import unittest from operations import operations +from pymongo import MongoClient +from pymongo.errors import ConnectionFailure +from pymongo.server_type import SERVER_TYPE + class TestResetAndRequestCheck(unittest.TestCase): def __init__(self, *args, **kwargs): @@ -38,18 +38,18 @@ def responder(request): self.ismaster_time = time.time() return request.ok(ismaster=True, minWireVersion=2, maxWireVersion=6) - self.server.autoresponds('ismaster', responder) + self.server.autoresponds("ismaster", responder) self.server.run() self.addCleanup(self.server.stop) - kwargs = {'socketTimeoutMS': 100} + kwargs = {"socketTimeoutMS": 100} # Disable retryable reads when pymongo supports it. - kwargs['retryReads'] = False + kwargs["retryReads"] = False self.client = MongoClient(self.server.uri, **kwargs) # type: ignore - wait_until(lambda: self.client.nodes, 'connect to standalone') + wait_until(lambda: self.client.nodes, "connect to standalone") def tearDown(self): - if hasattr(self, 'client') and self.client: + if hasattr(self, "client") and self.client: self.client.close() def _test_disconnect(self, operation): @@ -73,11 +73,11 @@ def _test_disconnect(self, operation): after = time.time() # Demand a reconnect. - with going(self.client.db.command, 'buildinfo'): - self.server.receives('buildinfo').ok() + with going(self.client.db.command, "buildinfo"): + self.server.receives("buildinfo").ok() last = self.ismaster_time - self.assertGreaterEqual(last, after, 'called ismaster before needed') + self.assertGreaterEqual(last, after, "called ismaster before needed") def _test_timeout(self, operation): # Application operation times out. Test that client does *not* reset @@ -99,7 +99,7 @@ def _test_timeout(self, operation): self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) after = self.ismaster_time - self.assertEqual(after, before, 'unneeded ismaster call') + self.assertEqual(after, before, "unneeded ismaster call") def _test_not_master(self, operation): # Application operation gets a "not master" error. @@ -121,7 +121,7 @@ def _test_not_master(self, operation): self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) after = self.ismaster_time - self.assertGreater(after, before, 'ismaster not called') + self.assertGreater(after, before, "ismaster not called") def create_reset_test(operation, test_method): @@ -133,9 +133,9 @@ def test(self): def generate_reset_tests(): test_methods = [ - (TestResetAndRequestCheck._test_disconnect, 'test_disconnect'), - (TestResetAndRequestCheck._test_timeout, 'test_timeout'), - (TestResetAndRequestCheck._test_not_master, 'test_not_master'), + (TestResetAndRequestCheck._test_disconnect, "test_disconnect"), + (TestResetAndRequestCheck._test_timeout, "test_timeout"), + (TestResetAndRequestCheck._test_not_master, "test_not_master"), ] matrix = itertools.product(operations, test_methods) @@ -143,12 +143,12 @@ def generate_reset_tests(): for entry in matrix: operation, (test_method, name) = entry test = create_reset_test(operation, test_method) - test_name = '%s_%s' % (name, operation.name.replace(' ', '_')) + test_name = "%s_%s" % (name, operation.name.replace(" ", "_")) test.__name__ = test_name setattr(TestResetAndRequestCheck, test_name, test) generate_reset_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_rsghost.py b/test/mockupdb/test_rsghost.py index 2f02503f54..354399728d 100644 --- a/test/mockupdb/test_rsghost.py +++ b/test/mockupdb/test_rsghost.py @@ -15,38 +15,45 @@ """Test connections to RSGhost nodes.""" import datetime +import unittest + +from mockupdb import MockupDB, going -from mockupdb import going, MockupDB from pymongo import MongoClient from pymongo.errors import ServerSelectionTimeoutError -import unittest - class TestRSGhost(unittest.TestCase): - def test_rsghost(self): rsother_response = { - 'ok': 1.0, 'ismaster': False, 'secondary': False, - 'info': 'Does not have a valid replica set config', - 'isreplicaset': True, 'maxBsonObjectSize': 16777216, - 'maxMessageSizeBytes': 48000000, 'maxWriteBatchSize': 100000, - 'localTime': datetime.datetime(2021, 11, 30, 0, 53, 4, 99000), - 'logicalSessionTimeoutMinutes': 30, 'connectionId': 3, - 'minWireVersion': 0, 'maxWireVersion': 15, 'readOnly': False} + "ok": 1.0, + "ismaster": False, + "secondary": False, + "info": "Does not have a valid replica set config", + "isreplicaset": True, + "maxBsonObjectSize": 16777216, + "maxMessageSizeBytes": 48000000, + "maxWriteBatchSize": 100000, + "localTime": datetime.datetime(2021, 11, 30, 0, 53, 4, 99000), + "logicalSessionTimeoutMinutes": 30, + "connectionId": 3, + "minWireVersion": 0, + "maxWireVersion": 15, + "readOnly": False, + } server = MockupDB(auto_ismaster=rsother_response) server.run() self.addCleanup(server.stop) # Default auto discovery yields a server selection timeout. with MongoClient(server.uri, serverSelectionTimeoutMS=250) as client: with self.assertRaises(ServerSelectionTimeoutError): - client.test.command('ping') + client.test.command("ping") # Direct connection succeeds. with MongoClient(server.uri, directConnection=True) as client: - with going(client.test.command, 'ping'): + with going(client.test.command, "ping"): request = server.receives(ping=1) request.reply() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_slave_okay_rs.py b/test/mockupdb/test_slave_okay_rs.py index 5ff6fced4e..5a162c08e3 100644 --- a/test/mockupdb/test_slave_okay_rs.py +++ b/test/mockupdb/test_slave_okay_rs.py @@ -17,12 +17,13 @@ Just make sure SlaveOkay is *not* set on primary reads. """ -from mockupdb import MockupDB, going -from pymongo import MongoClient - import unittest + +from mockupdb import MockupDB, going from operations import operations +from pymongo import MongoClient + class TestSlaveOkayRS(unittest.TestCase): def setup_server(self): @@ -31,24 +32,27 @@ def setup_server(self): server.run() self.addCleanup(server.stop) - hosts = [server.address_string - for server in (self.primary, self.secondary)] + hosts = [server.address_string for server in (self.primary, self.secondary)] self.primary.autoresponds( - 'ismaster', - ismaster=True, setName='rs', hosts=hosts, - minWireVersion=2, maxWireVersion=6) + "ismaster", ismaster=True, setName="rs", hosts=hosts, minWireVersion=2, maxWireVersion=6 + ) self.secondary.autoresponds( - 'ismaster', - ismaster=False, secondary=True, setName='rs', hosts=hosts, - minWireVersion=2, maxWireVersion=6) + "ismaster", + ismaster=False, + secondary=True, + setName="rs", + hosts=hosts, + minWireVersion=2, + maxWireVersion=6, + ) def create_slave_ok_rs_test(operation): def test(self): self.setup_server() - assert not operation.op_type == 'always-use-secondary' + assert not operation.op_type == "always-use-secondary" - client = MongoClient(self.primary.uri, replicaSet='rs') + client = MongoClient(self.primary.uri, replicaSet="rs") self.addCleanup(client.close) with going(operation.function, client): request = self.primary.receive() @@ -63,11 +67,11 @@ def generate_slave_ok_rs_tests(): for operation in operations: # Don't test secondary operations with MockupDB, the server enforces the # SlaveOkay bit so integration tests prove we set it. - if operation.op_type == 'always-use-secondary': + if operation.op_type == "always-use-secondary": continue test = create_slave_ok_rs_test(operation) - test_name = 'test_%s' % operation.name.replace(' ', '_') + test_name = "test_%s" % operation.name.replace(" ", "_") test.__name__ = test_name setattr(TestSlaveOkayRS, test_name, test) @@ -75,5 +79,5 @@ def generate_slave_ok_rs_tests(): generate_slave_ok_rs_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py index 07e05bfece..52c643b417 100644 --- a/test/mockupdb/test_slave_okay_sharded.py +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -19,18 +19,15 @@ - A direct connection to a mongos. """ import itertools - -from pymongo.read_preferences import make_read_preference -from pymongo.read_preferences import read_pref_mode_from_name - +import unittest from queue import Queue from mockupdb import MockupDB, going -from pymongo import MongoClient - -import unittest from operations import operations +from pymongo import MongoClient +from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name + class TestSlaveOkaySharded(unittest.TestCase): def setup_server(self): @@ -42,27 +39,29 @@ def setup_server(self): server.subscribe(self.q.put) server.run() self.addCleanup(server.stop) - server.autoresponds('ismaster', minWireVersion=2, maxWireVersion=6, - ismaster=True, msg='isdbgrid') + server.autoresponds( + "ismaster", minWireVersion=2, maxWireVersion=6, ismaster=True, msg="isdbgrid" + ) - self.mongoses_uri = 'mongodb://%s,%s' % (self.mongos1.address_string, - self.mongos2.address_string) + self.mongoses_uri = "mongodb://%s,%s" % ( + self.mongos1.address_string, + self.mongos2.address_string, + ) def create_slave_ok_sharded_test(mode, operation): def test(self): self.setup_server() - if operation.op_type == 'always-use-secondary': + if operation.op_type == "always-use-secondary": slave_ok = True - elif operation.op_type == 'may-use-secondary': - slave_ok = mode != 'primary' - elif operation.op_type == 'must-use-primary': + elif operation.op_type == "may-use-secondary": + slave_ok = mode != "primary" + elif operation.op_type == "must-use-primary": slave_ok = False else: - assert False, 'unrecognized op_type %r' % operation.op_type + assert False, "unrecognized op_type %r" % operation.op_type - pref = make_read_preference(read_pref_mode_from_name(mode), - tag_sets=None) + pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) client = MongoClient(self.mongoses_uri, read_preference=pref) self.addCleanup(client.close) @@ -71,22 +70,21 @@ def test(self): request.reply(operation.reply) if slave_ok: - self.assertTrue(request.slave_ok, 'SlaveOkay not set') + self.assertTrue(request.slave_ok, "SlaveOkay not set") else: - self.assertFalse(request.slave_ok, 'SlaveOkay set') + self.assertFalse(request.slave_ok, "SlaveOkay set") return test def generate_slave_ok_sharded_tests(): - modes = 'primary', 'secondary', 'nearest' + modes = "primary", "secondary", "nearest" matrix = itertools.product(modes, operations) for entry in matrix: mode, operation = entry test = create_slave_ok_sharded_test(mode, operation) - test_name = 'test_%s_with_mode_%s' % ( - operation.name.replace(' ', '_'), mode) + test_name = "test_%s_with_mode_%s" % (operation.name.replace(" ", "_"), mode) test.__name__ = test_name setattr(TestSlaveOkaySharded, test_name, test) @@ -94,5 +92,5 @@ def generate_slave_ok_sharded_tests(): generate_slave_ok_sharded_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py index 83c0f925a4..98cd1f2706 100644 --- a/test/mockupdb/test_slave_okay_single.py +++ b/test/mockupdb/test_slave_okay_single.py @@ -20,16 +20,15 @@ """ import itertools +import unittest from mockupdb import MockupDB, going +from operations import operations + from pymongo import MongoClient -from pymongo.read_preferences import (make_read_preference, - read_pref_mode_from_name) +from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name from pymongo.topology_description import TOPOLOGY_TYPE -import unittest -from operations import operations - def topology_type_name(client): topology_type = client._topology._description.topology_type @@ -46,20 +45,19 @@ def setUp(self): def create_slave_ok_single_test(mode, server_type, ismaster, operation): def test(self): ismaster_with_version = ismaster.copy() - ismaster_with_version['minWireVersion'] = 2 - ismaster_with_version['maxWireVersion'] = 6 - self.server.autoresponds('ismaster', **ismaster_with_version) - if operation.op_type == 'always-use-secondary': + ismaster_with_version["minWireVersion"] = 2 + ismaster_with_version["maxWireVersion"] = 6 + self.server.autoresponds("ismaster", **ismaster_with_version) + if operation.op_type == "always-use-secondary": slave_ok = True - elif operation.op_type == 'may-use-secondary': - slave_ok = mode != 'primary' or server_type != 'mongos' - elif operation.op_type == 'must-use-primary': - slave_ok = server_type != 'mongos' + elif operation.op_type == "may-use-secondary": + slave_ok = mode != "primary" or server_type != "mongos" + elif operation.op_type == "must-use-primary": + slave_ok = server_type != "mongos" else: - assert False, 'unrecognized op_type %r' % operation.op_type + assert False, "unrecognized op_type %r" % operation.op_type - pref = make_read_preference(read_pref_mode_from_name(mode), - tag_sets=None) + pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) client = MongoClient(self.server.uri, read_preference=pref) self.addCleanup(client.close) @@ -67,27 +65,30 @@ def test(self): request = self.server.receive() request.reply(operation.reply) - self.assertIn(topology_type_name(client), ['Sharded', 'Single']) + self.assertIn(topology_type_name(client), ["Sharded", "Single"]) return test def generate_slave_ok_single_tests(): - modes = 'primary', 'secondary', 'nearest' + modes = "primary", "secondary", "nearest" server_types = [ - ('standalone', {'ismaster': True}), - ('slave', {'ismaster': False}), - ('mongos', {'ismaster': True, 'msg': 'isdbgrid'})] + ("standalone", {"ismaster": True}), + ("slave", {"ismaster": False}), + ("mongos", {"ismaster": True, "msg": "isdbgrid"}), + ] matrix = itertools.product(modes, server_types, operations) for entry in matrix: mode, (server_type, ismaster), operation = entry - test = create_slave_ok_single_test(mode, server_type, ismaster, - operation) + test = create_slave_ok_single_test(mode, server_type, ismaster, operation) - test_name = 'test_%s_%s_with_mode_%s' % ( - operation.name.replace(' ', '_'), server_type, mode) + test_name = "test_%s_%s_with_mode_%s" % ( + operation.name.replace(" ", "_"), + server_type, + mode, + ) test.__name__ = test_name setattr(TestSlaveOkaySingle, test_name, test) @@ -96,5 +97,5 @@ def generate_slave_ok_single_tests(): generate_slave_ok_single_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mod_wsgi_test/test_client.py b/test/mod_wsgi_test/test_client.py index f99ac0054e..bfdae9e824 100644 --- a/test/mod_wsgi_test/test_client.py +++ b/test/mod_wsgi_test/test_client.py @@ -15,42 +15,58 @@ """Test client for mod_wsgi application, see bug PYTHON-353. """ +import _thread as thread import sys import threading import time - from optparse import OptionParser - from urllib.request import urlopen -import _thread as thread def parse_args(): - parser = OptionParser("""usage: %prog [options] mode url + parser = OptionParser( + """usage: %prog [options] mode url - mode:\tparallel or serial""") + mode:\tparallel or serial""" + ) # Should be enough that any connection leak will exhaust available file # descriptors. parser.add_option( - "-n", "--nrequests", type="int", - dest="nrequests", default=50 * 1000, - help="Number of times to GET the URL, in total") + "-n", + "--nrequests", + type="int", + dest="nrequests", + default=50 * 1000, + help="Number of times to GET the URL, in total", + ) parser.add_option( - "-t", "--nthreads", type="int", - dest="nthreads", default=100, - help="Number of threads with mode 'parallel'") + "-t", + "--nthreads", + type="int", + dest="nthreads", + default=100, + help="Number of threads with mode 'parallel'", + ) parser.add_option( - "-q", "--quiet", - action="store_false", dest="verbose", default=True, - help="Don't print status messages to stdout") + "-q", + "--quiet", + action="store_false", + dest="verbose", + default=True, + help="Don't print status messages to stdout", + ) parser.add_option( - "-c", "--continue", - action="store_true", dest="continue_", default=False, - help="Continue after HTTP errors") + "-c", + "--continue", + action="store_true", + dest="continue_", + default=False, + help="Continue after HTTP errors", + ) try: options, (mode, url) = parser.parse_args() @@ -58,7 +74,7 @@ def parse_args(): parser.print_usage() sys.exit(1) - if mode not in ('parallel', 'serial'): + if mode not in ("parallel", "serial"): parser.print_usage() sys.exit(1) @@ -107,18 +123,22 @@ def run(self): def main(options, mode, url): start_time = time.time() errors = 0 - if mode == 'parallel': + if mode == "parallel": nrequests_per_thread = options.nrequests // options.nthreads if options.verbose: - print ( - 'Getting %s %s times total in %s threads, ' - '%s times per thread' % ( - url, nrequests_per_thread * options.nthreads, - options.nthreads, nrequests_per_thread)) + print( + "Getting %s %s times total in %s threads, " + "%s times per thread" + % ( + url, + nrequests_per_thread * options.nthreads, + options.nthreads, + nrequests_per_thread, + ) + ) threads = [ - URLGetterThread(options, url, nrequests_per_thread) - for _ in range(options.nthreads) + URLGetterThread(options, url, nrequests_per_thread) for _ in range(options.nthreads) ] for t in threads: @@ -130,14 +150,11 @@ def main(options, mode, url): errors = sum([t.errors for t in threads]) nthreads_with_errors = len([t for t in threads if t.errors]) if nthreads_with_errors: - print('%d threads had errors! %d errors in total' % ( - nthreads_with_errors, errors)) + print("%d threads had errors! %d errors in total" % (nthreads_with_errors, errors)) else: - assert mode == 'serial' + assert mode == "serial" if options.verbose: - print('Getting %s %s times in one thread' % ( - url, options.nrequests - )) + print("Getting %s %s times in one thread" % (url, options.nrequests)) for i in range(1, options.nrequests + 1): try: @@ -153,16 +170,16 @@ def main(options, mode, url): print(i) if errors: - print('%d errors!' % errors) + print("%d errors!" % errors) if options.verbose: - print('Completed in %.2f seconds' % (time.time() - start_time)) + print("Completed in %.2f seconds" % (time.time() - start_time)) if errors: # Failure sys.exit(1) -if __name__ == '__main__': +if __name__ == "__main__": options, mode, url = parse_args() main(options, mode, url) diff --git a/test/ocsp/test_ocsp.py b/test/ocsp/test_ocsp.py index 07197e73b6..cce846feac 100644 --- a/test/ocsp/test_ocsp.py +++ b/test/ocsp/test_ocsp.py @@ -22,19 +22,17 @@ sys.path[0:0] = [""] import pymongo - from pymongo.errors import ServerSelectionTimeoutError - CA_FILE = os.environ.get("CA_FILE") -OCSP_TLS_SHOULD_SUCCEED = (os.environ.get('OCSP_TLS_SHOULD_SUCCEED') == 'true') +OCSP_TLS_SHOULD_SUCCEED = os.environ.get("OCSP_TLS_SHOULD_SUCCEED") == "true" # Enable logs in this format: # 2020-06-08 23:49:35,982 DEBUG ocsp_support Peer did not staple an OCSP response -FORMAT = '%(asctime)s %(levelname)s %(module)s %(message)s' +FORMAT = "%(asctime)s %(levelname)s %(module)s %(message)s" logging.basicConfig(format=FORMAT, level=logging.DEBUG) -if sys.platform == 'win32': +if sys.platform == "win32": # The non-stapled OCSP endpoint check is slow on Windows. TIMEOUT_MS = 5000 else: @@ -42,15 +40,17 @@ def _connect(options): - uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS=%s" - "&tlsCAFile=%s&%s") % (TIMEOUT_MS, CA_FILE, options) + uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS=%s" "&tlsCAFile=%s&%s") % ( + TIMEOUT_MS, + CA_FILE, + options, + ) print(uri) client = pymongo.MongoClient(uri) - client.admin.command('ping') + client.admin.command("ping") class TestOCSP(unittest.TestCase): - def test_tls_insecure(self): # Should always succeed options = "tls=true&tlsInsecure=true" @@ -65,12 +65,11 @@ def test_tls(self): options = "tls=true" if not OCSP_TLS_SHOULD_SUCCEED: self.assertRaisesRegex( - ServerSelectionTimeoutError, - "invalid status response", - _connect, options) + ServerSelectionTimeoutError, "invalid status response", _connect, options + ) else: _connect(options) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index 7effa1c1ee..3cb4b5d5d1 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -19,8 +19,8 @@ import sys import tempfile import time -from typing import Any, List import warnings +from typing import Any, List try: import simplejson as json @@ -29,28 +29,30 @@ sys.path[0:0] = [""] +from test import client_context, host, port, unittest + from bson import decode, encode from bson.json_util import loads from gridfs import GridFSBucket from pymongo import MongoClient -from test import client_context, host, port, unittest NUM_ITERATIONS = 100 MAX_ITERATION_TIME = 300 NUM_DOCS = 10000 -TEST_PATH = os.environ.get('TEST_PATH', os.path.join( - os.path.dirname(os.path.realpath(__file__)), - os.path.join('data'))) +TEST_PATH = os.environ.get( + "TEST_PATH", os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.join("data")) +) -OUTPUT_FILE = os.environ.get('OUTPUT_FILE') +OUTPUT_FILE = os.environ.get("OUTPUT_FILE") result_data: List = [] + def tearDownModule(): output = json.dumps(result_data, indent=4) if OUTPUT_FILE: - with open(OUTPUT_FILE, 'w') as opf: + with open(OUTPUT_FILE, "w") as opf: opf.write(output) else: print(output) @@ -83,22 +85,20 @@ def tearDown(self): name = self.__class__.__name__ median = self.percentile(50) bytes_per_sec = self.data_size / median - print('Running %s. MEDIAN=%s' % (self.__class__.__name__, - self.percentile(50))) - result_data.append({ - 'info': { - 'test_name': name, - 'args': { - 'threads': 1, + print("Running %s. MEDIAN=%s" % (self.__class__.__name__, self.percentile(50))) + result_data.append( + { + "info": { + "test_name": name, + "args": { + "threads": 1, + }, }, - }, - 'metrics': [ - { - 'name': 'bytes_per_sec', - 'value': bytes_per_sec - }, - ] - }) + "metrics": [ + {"name": "bytes_per_sec", "value": bytes_per_sec}, + ], + } + ) def before(self): pass @@ -107,12 +107,12 @@ def after(self): pass def percentile(self, percentile): - if hasattr(self, 'results'): + if hasattr(self, "results"): sorted_results = sorted(self.results) percentile_index = int(len(sorted_results) * percentile / 100) - 1 return sorted_results[percentile_index] else: - self.fail('Test execution failed') + self.fail("Test execution failed") def runTest(self): results = [] @@ -120,7 +120,7 @@ def runTest(self): self.max_iterations = NUM_ITERATIONS for i in range(NUM_ITERATIONS): if time.monotonic() - start > MAX_ITERATION_TIME: - warnings.warn('Test timed out, completed %s iterations.' % i) + warnings.warn("Test timed out, completed %s iterations." % i) break self.before() with Timer() as timer: @@ -135,9 +135,7 @@ def runTest(self): class BsonEncodingTest(PerformanceTest): def setUp(self): # Location of test data. - with open( - os.path.join(TEST_PATH, - os.path.join('extended_bson', self.dataset))) as data: + with open(os.path.join(TEST_PATH, os.path.join("extended_bson", self.dataset))) as data: self.document = loads(data.read()) def do_task(self): @@ -148,9 +146,7 @@ def do_task(self): class BsonDecodingTest(PerformanceTest): def setUp(self): # Location of test data. - with open( - os.path.join(TEST_PATH, - os.path.join('extended_bson', self.dataset))) as data: + with open(os.path.join(TEST_PATH, os.path.join("extended_bson", self.dataset))) as data: self.document = encode(json.loads(data.read())) def do_task(self): @@ -159,41 +155,42 @@ def do_task(self): class TestFlatEncoding(BsonEncodingTest, unittest.TestCase): - dataset = 'flat_bson.json' + dataset = "flat_bson.json" data_size = 75310000 class TestFlatDecoding(BsonDecodingTest, unittest.TestCase): - dataset = 'flat_bson.json' + dataset = "flat_bson.json" data_size = 75310000 class TestDeepEncoding(BsonEncodingTest, unittest.TestCase): - dataset = 'deep_bson.json' + dataset = "deep_bson.json" data_size = 19640000 class TestDeepDecoding(BsonDecodingTest, unittest.TestCase): - dataset = 'deep_bson.json' + dataset = "deep_bson.json" data_size = 19640000 class TestFullEncoding(BsonEncodingTest, unittest.TestCase): - dataset = 'full_bson.json' + dataset = "full_bson.json" data_size = 57340000 class TestFullDecoding(BsonDecodingTest, unittest.TestCase): - dataset = 'full_bson.json' + dataset = "full_bson.json" data_size = 57340000 # SINGLE-DOC BENCHMARKS class TestRunCommand(PerformanceTest, unittest.TestCase): data_size = 160000 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") def do_task(self): command = self.client.perftest.command @@ -205,29 +202,29 @@ class TestDocument(PerformanceTest): def setUp(self): # Location of test data. with open( - os.path.join( - TEST_PATH, os.path.join( - 'single_and_multi_document', self.dataset)), 'r') as data: + os.path.join(TEST_PATH, os.path.join("single_and_multi_document", self.dataset)), "r" + ) as data: self.document = json.loads(data.read()) self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") def tearDown(self): super(TestDocument, self).tearDown() - self.client.drop_database('perftest') + self.client.drop_database("perftest") def before(self): - self.corpus = self.client.perftest.create_collection('corpus') + self.corpus = self.client.perftest.create_collection("corpus") def after(self): - self.client.perftest.drop_collection('corpus') + self.client.perftest.drop_collection("corpus") class TestFindOneByID(TestDocument, unittest.TestCase): data_size = 16220000 + def setUp(self): - self.dataset = 'tweet.json' + self.dataset = "tweet.json" super(TestFindOneByID, self).setUp() documents = [self.document.copy() for _ in range(NUM_DOCS)] @@ -238,7 +235,7 @@ def setUp(self): def do_task(self): find_one = self.corpus.find_one for _id in self.inserted_ids: - find_one({'_id': _id}) + find_one({"_id": _id}) def before(self): pass @@ -249,8 +246,9 @@ def after(self): class TestSmallDocInsertOne(TestDocument, unittest.TestCase): data_size = 2750000 + def setUp(self): - self.dataset = 'small_doc.json' + self.dataset = "small_doc.json" super(TestSmallDocInsertOne, self).setUp() self.documents = [self.document.copy() for _ in range(NUM_DOCS)] @@ -263,8 +261,9 @@ def do_task(self): class TestLargeDocInsertOne(TestDocument, unittest.TestCase): data_size = 27310890 + def setUp(self): - self.dataset = 'large_doc.json' + self.dataset = "large_doc.json" super(TestLargeDocInsertOne, self).setUp() self.documents = [self.document.copy() for _ in range(10)] @@ -278,14 +277,13 @@ def do_task(self): # MULTI-DOC BENCHMARKS class TestFindManyAndEmptyCursor(TestDocument, unittest.TestCase): data_size = 16220000 + def setUp(self): - self.dataset = 'tweet.json' + self.dataset = "tweet.json" super(TestFindManyAndEmptyCursor, self).setUp() for _ in range(10): - self.client.perftest.command( - 'insert', 'corpus', - documents=[self.document] * 1000) + self.client.perftest.command("insert", "corpus", documents=[self.document] * 1000) self.corpus = self.client.perftest.corpus def do_task(self): @@ -300,13 +298,14 @@ def after(self): class TestSmallDocBulkInsert(TestDocument, unittest.TestCase): data_size = 2750000 + def setUp(self): - self.dataset = 'small_doc.json' + self.dataset = "small_doc.json" super(TestSmallDocBulkInsert, self).setUp() self.documents = [self.document.copy() for _ in range(NUM_DOCS)] def before(self): - self.corpus = self.client.perftest.create_collection('corpus') + self.corpus = self.client.perftest.create_collection("corpus") def do_task(self): self.corpus.insert_many(self.documents, ordered=True) @@ -314,13 +313,14 @@ def do_task(self): class TestLargeDocBulkInsert(TestDocument, unittest.TestCase): data_size = 27310890 + def setUp(self): - self.dataset = 'large_doc.json' + self.dataset = "large_doc.json" super(TestLargeDocBulkInsert, self).setUp() self.documents = [self.document.copy() for _ in range(10)] def before(self): - self.corpus = self.client.perftest.create_collection('corpus') + self.corpus = self.client.perftest.create_collection("corpus") def do_task(self): self.corpus.insert_many(self.documents, ordered=True) @@ -328,47 +328,48 @@ def do_task(self): class TestGridFsUpload(PerformanceTest, unittest.TestCase): data_size = 52428800 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") gridfs_path = os.path.join( - TEST_PATH, - os.path.join('single_and_multi_document', 'gridfs_large.bin')) - with open(gridfs_path, 'rb') as data: + TEST_PATH, os.path.join("single_and_multi_document", "gridfs_large.bin") + ) + with open(gridfs_path, "rb") as data: self.document = data.read() self.bucket = GridFSBucket(self.client.perftest) def tearDown(self): super(TestGridFsUpload, self).tearDown() - self.client.drop_database('perftest') + self.client.drop_database("perftest") def before(self): - self.bucket.upload_from_stream('init', b'x') + self.bucket.upload_from_stream("init", b"x") def do_task(self): - self.bucket.upload_from_stream('gridfstest', self.document) + self.bucket.upload_from_stream("gridfstest", self.document) class TestGridFsDownload(PerformanceTest, unittest.TestCase): data_size = 52428800 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") gridfs_path = os.path.join( - TEST_PATH, - os.path.join('single_and_multi_document', 'gridfs_large.bin')) + TEST_PATH, os.path.join("single_and_multi_document", "gridfs_large.bin") + ) self.bucket = GridFSBucket(self.client.perftest) - with open(gridfs_path, 'rb') as gfile: - self.uploaded_id = self.bucket.upload_from_stream( - 'gridfstest', gfile) + with open(gridfs_path, "rb") as gfile: + self.uploaded_id = self.bucket.upload_from_stream("gridfstest", gfile) def tearDown(self): super(TestGridFsDownload, self).tearDown() - self.client.drop_database('perftest') + self.client.drop_database("perftest") def do_task(self): self.bucket.open_download_stream(self.uploaded_id).read() @@ -391,17 +392,17 @@ def mp_map(map_func, files): def insert_json_file(filename): assert proc_client is not None - with open(filename, 'r') as data: + with open(filename, "r") as data: coll = proc_client.perftest.corpus coll.insert_many([json.loads(line) for line in data]) def insert_json_file_with_file_id(filename): documents = [] - with open(filename, 'r') as data: + with open(filename, "r") as data: for line in data: doc = json.loads(line) - doc['file'] = filename + doc["file"] = filename documents.append(doc) assert proc_client is not None coll = proc_client.perftest.corpus @@ -411,11 +412,11 @@ def insert_json_file_with_file_id(filename): def read_json_file(filename): assert proc_client is not None coll = proc_client.perftest.corpus - temp = tempfile.TemporaryFile(mode='w') + temp = tempfile.TemporaryFile(mode="w") try: temp.writelines( - [json.dumps(doc) + '\n' for - doc in coll.find({'file': filename}, {'_id': False})]) + [json.dumps(doc) + "\n" for doc in coll.find({"file": filename}, {"_id": False})] + ) finally: temp.close() @@ -424,7 +425,7 @@ def insert_gridfs_file(filename): assert proc_client is not None bucket = GridFSBucket(proc_client.perftest) - with open(filename, 'rb') as gfile: + with open(filename, "rb") as gfile: bucket.upload_from_stream(filename, gfile) @@ -441,41 +442,39 @@ def read_gridfs_file(filename): class TestJsonMultiImport(PerformanceTest, unittest.TestCase): data_size = 565000000 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") def before(self): - self.client.perftest.command({'create': 'corpus'}) + self.client.perftest.command({"create": "corpus"}) self.corpus = self.client.perftest.corpus - ldjson_path = os.path.join( - TEST_PATH, os.path.join('parallel', 'ldjson_multi')) - self.files = [os.path.join( - ldjson_path, s) for s in os.listdir(ldjson_path)] + ldjson_path = os.path.join(TEST_PATH, os.path.join("parallel", "ldjson_multi")) + self.files = [os.path.join(ldjson_path, s) for s in os.listdir(ldjson_path)] def do_task(self): mp_map(insert_json_file, self.files) def after(self): - self.client.perftest.drop_collection('corpus') + self.client.perftest.drop_collection("corpus") def tearDown(self): super(TestJsonMultiImport, self).tearDown() - self.client.drop_database('perftest') + self.client.drop_database("perftest") class TestJsonMultiExport(PerformanceTest, unittest.TestCase): data_size = 565000000 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') - self.client.perfest.corpus.create_index('file') + self.client.drop_database("perftest") + self.client.perfest.corpus.create_index("file") - ldjson_path = os.path.join( - TEST_PATH, os.path.join('parallel', 'ldjson_multi')) - self.files = [os.path.join( - ldjson_path, s) for s in os.listdir(ldjson_path)] + ldjson_path = os.path.join(TEST_PATH, os.path.join("parallel", "ldjson_multi")) + self.files = [os.path.join(ldjson_path, s) for s in os.listdir(ldjson_path)] mp_map(insert_json_file_with_file_id, self.files) @@ -484,48 +483,46 @@ def do_task(self): def tearDown(self): super(TestJsonMultiExport, self).tearDown() - self.client.drop_database('perftest') + self.client.drop_database("perftest") class TestGridFsMultiFileUpload(PerformanceTest, unittest.TestCase): data_size = 262144000 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") def before(self): - self.client.perftest.drop_collection('fs.files') - self.client.perftest.drop_collection('fs.chunks') + self.client.perftest.drop_collection("fs.files") + self.client.perftest.drop_collection("fs.chunks") self.bucket = GridFSBucket(self.client.perftest) - gridfs_path = os.path.join( - TEST_PATH, os.path.join('parallel', 'gridfs_multi')) - self.files = [os.path.join( - gridfs_path, s) for s in os.listdir(gridfs_path)] + gridfs_path = os.path.join(TEST_PATH, os.path.join("parallel", "gridfs_multi")) + self.files = [os.path.join(gridfs_path, s) for s in os.listdir(gridfs_path)] def do_task(self): mp_map(insert_gridfs_file, self.files) def tearDown(self): super(TestGridFsMultiFileUpload, self).tearDown() - self.client.drop_database('perftest') + self.client.drop_database("perftest") class TestGridFsMultiFileDownload(PerformanceTest, unittest.TestCase): data_size = 262144000 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") bucket = GridFSBucket(self.client.perftest) - gridfs_path = os.path.join( - TEST_PATH, os.path.join('parallel', 'gridfs_multi')) - self.files = [os.path.join( - gridfs_path, s) for s in os.listdir(gridfs_path)] + gridfs_path = os.path.join(TEST_PATH, os.path.join("parallel", "gridfs_multi")) + self.files = [os.path.join(gridfs_path, s) for s in os.listdir(gridfs_path)] for fname in self.files: - with open(fname, 'rb') as gfile: + with open(fname, "rb") as gfile: bucket.upload_from_stream(fname, gfile) def do_task(self): @@ -533,7 +530,7 @@ def do_task(self): def tearDown(self): super(TestGridFsMultiFileDownload, self).tearDown() - self.client.drop_database('perftest') + self.client.drop_database("perftest") if __name__ == "__main__": diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index 1494fbedcc..580c5da993 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -15,19 +15,17 @@ """Tools for mocking parts of PyMongo to test other parts.""" import contextlib -from functools import partial import weakref +from functools import partial +from test import client_context -from pymongo import common -from pymongo import MongoClient +from pymongo import MongoClient, common from pymongo.errors import AutoReconnect, NetworkTimeout from pymongo.hello import Hello, HelloCompat from pymongo.monitor import Monitor from pymongo.pool import Pool from pymongo.server_description import ServerDescription -from test import client_context - class MockPool(Pool): def __init__(self, client, pair, *args, **kwargs): @@ -42,14 +40,13 @@ def __init__(self, client, pair, *args, **kwargs): @contextlib.contextmanager def get_socket(self, handler=None): client = self.client - host_and_port = '%s:%s' % (self.mock_host, self.mock_port) + host_and_port = "%s:%s" % (self.mock_host, self.mock_port) if host_and_port in client.mock_down_hosts: - raise AutoReconnect('mock error') + raise AutoReconnect("mock error") assert host_and_port in ( - client.mock_standalones - + client.mock_members - + client.mock_mongoses), "bad host: %s" % host_and_port + client.mock_standalones + client.mock_members + client.mock_mongoses + ), ("bad host: %s" % host_and_port) with Pool.get_socket(self, handler) as sock_info: sock_info.mock_host = self.mock_host @@ -79,34 +76,31 @@ def close(self): class MockMonitor(Monitor): - def __init__( - self, - client, - server_description, - topology, - pool, - topology_settings): + def __init__(self, client, server_description, topology, pool, topology_settings): # MockMonitor gets a 'client' arg, regular monitors don't. Weakref it # to avoid cycles. self.client = weakref.proxy(client) - Monitor.__init__( - self, - server_description, - topology, - pool, - topology_settings) + Monitor.__init__(self, server_description, topology, pool, topology_settings) def _check_once(self): client = self.client address = self._server_description.address - response, rtt = client.mock_hello('%s:%d' % address) + response, rtt = client.mock_hello("%s:%d" % address) return ServerDescription(address, Hello(response), rtt) class MockClient(MongoClient): def __init__( - self, standalones, members, mongoses, hello_hosts=None, - arbiters=None, down_hosts=None, *args, **kwargs): + self, + standalones, + members, + mongoses, + hello_hosts=None, + arbiters=None, + down_hosts=None, + *args, + **kwargs + ): """A MongoClient connected to the default server, with a mock topology. standalones, members, mongoses, arbiters, and down_hosts determine the @@ -144,8 +138,8 @@ def __init__( # Hostname -> round trip time self.mock_rtts = {} - kwargs['_pool_class'] = partial(MockPool, self) - kwargs['_monitor_class'] = partial(MockMonitor, self) + kwargs["_pool_class"] = partial(MockPool, self) + kwargs["_monitor_class"] = partial(MockMonitor, self) client_options = client_context.default_client_options.copy() client_options.update(kwargs) @@ -175,53 +169,57 @@ def mock_hello(self, host): max_wire_version = common.MAX_SUPPORTED_WIRE_VERSION max_write_batch_size = self.mock_max_write_batch_sizes.get( - host, common.MAX_WRITE_BATCH_SIZE) + host, common.MAX_WRITE_BATCH_SIZE + ) rtt = self.mock_rtts.get(host, 0) # host is like 'a:1'. if host in self.mock_down_hosts: - raise NetworkTimeout('mock timeout') + raise NetworkTimeout("mock timeout") elif host in self.mock_standalones: response = { - 'ok': 1, + "ok": 1, HelloCompat.LEGACY_CMD: True, - 'minWireVersion': min_wire_version, - 'maxWireVersion': max_wire_version, - 'maxWriteBatchSize': max_write_batch_size} + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "maxWriteBatchSize": max_write_batch_size, + } elif host in self.mock_members: - primary = (host == self.mock_primary) + primary = host == self.mock_primary # Simulate a replica set member. response = { - 'ok': 1, + "ok": 1, HelloCompat.LEGACY_CMD: primary, - 'secondary': not primary, - 'setName': 'rs', - 'hosts': self.mock_hello_hosts, - 'minWireVersion': min_wire_version, - 'maxWireVersion': max_wire_version, - 'maxWriteBatchSize': max_write_batch_size} + "secondary": not primary, + "setName": "rs", + "hosts": self.mock_hello_hosts, + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "maxWriteBatchSize": max_write_batch_size, + } if self.mock_primary: - response['primary'] = self.mock_primary + response["primary"] = self.mock_primary if host in self.mock_arbiters: - response['arbiterOnly'] = True - response['secondary'] = False + response["arbiterOnly"] = True + response["secondary"] = False elif host in self.mock_mongoses: response = { - 'ok': 1, + "ok": 1, HelloCompat.LEGACY_CMD: True, - 'minWireVersion': min_wire_version, - 'maxWireVersion': max_wire_version, - 'msg': 'isdbgrid', - 'maxWriteBatchSize': max_write_batch_size} + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "msg": "isdbgrid", + "maxWriteBatchSize": max_write_batch_size, + } else: # In test_internal_ips(), we try to connect to a host listed # in hello['hosts'] but not publicly accessible. - raise AutoReconnect('Unknown host: %s' % host) + raise AutoReconnect("Unknown host: %s" % host) return response, rtt diff --git a/test/qcheck.py b/test/qcheck.py index 57e0940b72..4cce7b5bc8 100644 --- a/test/qcheck.py +++ b/test/qcheck.py @@ -83,9 +83,7 @@ def gen_unichar(): def gen_unicode(gen_length): - return lambda: "".join([x for x in - gen_list(gen_unichar(), gen_length)() if - x not in ".$"]) + return lambda: "".join([x for x in gen_list(gen_unichar(), gen_length)() if x not in ".$"]) def gen_list(generator, gen_length): @@ -93,22 +91,24 @@ def gen_list(generator, gen_length): def gen_datetime(): - return lambda: datetime.datetime(random.randint(1970, 2037), - random.randint(1, 12), - random.randint(1, 28), - random.randint(0, 23), - random.randint(0, 59), - random.randint(0, 59), - random.randint(0, 999) * 1000) + return lambda: datetime.datetime( + random.randint(1970, 2037), + random.randint(1, 12), + random.randint(1, 28), + random.randint(0, 23), + random.randint(0, 59), + random.randint(0, 59), + random.randint(0, 999) * 1000, + ) def gen_dict(gen_key, gen_value, gen_length): - def a_dict(gen_key, gen_value, length): result = {} for _ in range(length): result[gen_key()] = gen_value() return result + return lambda: a_dict(gen_key, gen_value, gen_length()) @@ -128,6 +128,7 @@ def gen_flags(): flags = flags | re.VERBOSE return flags + return lambda: re.compile(pattern(), gen_flags()) @@ -142,15 +143,17 @@ def gen_dbref(): def gen_mongo_value(depth, ref): - choices = [gen_unicode(gen_range(0, 50)), - gen_printable_string(gen_range(0, 50)), - my_map(gen_string(gen_range(0, 1000)), bytes), - gen_int(), - gen_float(), - gen_boolean(), - gen_datetime(), - gen_objectid(), - lift(None)] + choices = [ + gen_unicode(gen_range(0, 50)), + gen_printable_string(gen_range(0, 50)), + my_map(gen_string(gen_range(0, 1000)), bytes), + gen_int(), + gen_float(), + gen_boolean(), + gen_datetime(), + gen_objectid(), + lift(None), + ] if ref: choices.append(gen_dbref()) if depth > 0: @@ -164,9 +167,10 @@ def gen_mongo_list(depth, ref): def gen_mongo_dict(depth, ref=True): - return my_map(gen_dict(gen_unicode(gen_range(0, 20)), - gen_mongo_value(depth - 1, ref), - gen_range(0, 10)), SON) + return my_map( + gen_dict(gen_unicode(gen_range(0, 20)), gen_mongo_value(depth - 1, ref), gen_range(0, 10)), + SON, + ) def simplify(case): # TODO this is a hack @@ -236,8 +240,10 @@ def check_unittest(test, predicate, generator): counter_examples = check(predicate, generator) if counter_examples: failures = len(counter_examples) - message = "\n".join([" -> %s" % f for f in - counter_examples[:examples]]) - message = ("found %d counter examples, displaying first %d:\n%s" % - (failures, min(failures, examples), message)) + message = "\n".join([" -> %s" % f for f in counter_examples[:examples]]) + message = "found %d counter examples, displaying first %d:\n%s" % ( + failures, + min(failures, examples), + message, + ) test.fail(message) diff --git a/test/test_auth.py b/test/test_auth.py index 5b4ef0c51f..5abdbef3dc 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -17,42 +17,44 @@ import os import sys import threading - from urllib.parse import quote_plus sys.path[0:0] = [""] +from test import IntegrationTest, SkipTest, Version, client_context, unittest +from test.utils import ( + AllowListEventListener, + delay, + get_pool, + ignore_deprecations, + rs_or_single_client, + rs_or_single_client_noauth, + single_client, + single_client_noauth, +) + from pymongo import MongoClient, monitoring from pymongo.auth import HAVE_KERBEROS, _build_credentials_tuple from pymongo.errors import OperationFailure from pymongo.hello import HelloCompat from pymongo.read_preferences import ReadPreference from pymongo.saslprep import HAVE_STRINGPREP -from test import client_context, IntegrationTest, SkipTest, unittest, Version -from test.utils import (delay, - get_pool, - ignore_deprecations, - single_client, - rs_or_single_client, - rs_or_single_client_noauth, - single_client_noauth, - AllowListEventListener) # YOU MUST RUN KINIT BEFORE RUNNING GSSAPI TESTS ON UNIX. -GSSAPI_HOST = os.environ.get('GSSAPI_HOST') -GSSAPI_PORT = int(os.environ.get('GSSAPI_PORT', '27017')) -GSSAPI_PRINCIPAL = os.environ.get('GSSAPI_PRINCIPAL') -GSSAPI_SERVICE_NAME = os.environ.get('GSSAPI_SERVICE_NAME', 'mongodb') -GSSAPI_CANONICALIZE = os.environ.get('GSSAPI_CANONICALIZE', 'false') -GSSAPI_SERVICE_REALM = os.environ.get('GSSAPI_SERVICE_REALM') -GSSAPI_PASS = os.environ.get('GSSAPI_PASS') -GSSAPI_DB = os.environ.get('GSSAPI_DB', 'test') - -SASL_HOST = os.environ.get('SASL_HOST') -SASL_PORT = int(os.environ.get('SASL_PORT', '27017')) -SASL_USER = os.environ.get('SASL_USER') -SASL_PASS = os.environ.get('SASL_PASS') -SASL_DB = os.environ.get('SASL_DB', '$external') +GSSAPI_HOST = os.environ.get("GSSAPI_HOST") +GSSAPI_PORT = int(os.environ.get("GSSAPI_PORT", "27017")) +GSSAPI_PRINCIPAL = os.environ.get("GSSAPI_PRINCIPAL") +GSSAPI_SERVICE_NAME = os.environ.get("GSSAPI_SERVICE_NAME", "mongodb") +GSSAPI_CANONICALIZE = os.environ.get("GSSAPI_CANONICALIZE", "false") +GSSAPI_SERVICE_REALM = os.environ.get("GSSAPI_SERVICE_REALM") +GSSAPI_PASS = os.environ.get("GSSAPI_PASS") +GSSAPI_DB = os.environ.get("GSSAPI_DB", "test") + +SASL_HOST = os.environ.get("SASL_HOST") +SASL_PORT = int(os.environ.get("SASL_PORT", "27017")) +SASL_USER = os.environ.get("SASL_USER") +SASL_PASS = os.environ.get("SASL_PASS") +SASL_DB = os.environ.get("SASL_DB", "$external") class AutoAuthenticateThread(threading.Thread): @@ -71,7 +73,7 @@ def __init__(self, collection): self.success = False def run(self): - assert self.collection.find_one({'$where': delay(1)}) is not None + assert self.collection.find_one({"$where": delay(1)}) is not None self.success = True @@ -82,36 +84,33 @@ class TestGSSAPI(unittest.TestCase): @classmethod def setUpClass(cls): if not HAVE_KERBEROS: - raise SkipTest('Kerberos module not available.') + raise SkipTest("Kerberos module not available.") if not GSSAPI_HOST or not GSSAPI_PRINCIPAL: - raise SkipTest( - 'Must set GSSAPI_HOST and GSSAPI_PRINCIPAL to test GSSAPI') + raise SkipTest("Must set GSSAPI_HOST and GSSAPI_PRINCIPAL to test GSSAPI") cls.service_realm_required = ( - GSSAPI_SERVICE_REALM is not None and - GSSAPI_SERVICE_REALM not in GSSAPI_PRINCIPAL) - mech_properties = 'SERVICE_NAME:%s' % (GSSAPI_SERVICE_NAME,) - mech_properties += ( - ',CANONICALIZE_HOST_NAME:%s' % (GSSAPI_CANONICALIZE,)) + GSSAPI_SERVICE_REALM is not None and GSSAPI_SERVICE_REALM not in GSSAPI_PRINCIPAL + ) + mech_properties = "SERVICE_NAME:%s" % (GSSAPI_SERVICE_NAME,) + mech_properties += ",CANONICALIZE_HOST_NAME:%s" % (GSSAPI_CANONICALIZE,) if GSSAPI_SERVICE_REALM is not None: - mech_properties += ',SERVICE_REALM:%s' % (GSSAPI_SERVICE_REALM,) + mech_properties += ",SERVICE_REALM:%s" % (GSSAPI_SERVICE_REALM,) cls.mech_properties = mech_properties def test_credentials_hashing(self): # GSSAPI credentials are properly hashed. - creds0 = _build_credentials_tuple( - 'GSSAPI', None, 'user', 'pass', {}, None) + creds0 = _build_credentials_tuple("GSSAPI", None, "user", "pass", {}, None) creds1 = _build_credentials_tuple( - 'GSSAPI', None, 'user', 'pass', - {'authmechanismproperties': {'SERVICE_NAME': 'A'}}, None) + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "A"}}, None + ) creds2 = _build_credentials_tuple( - 'GSSAPI', None, 'user', 'pass', - {'authmechanismproperties': {'SERVICE_NAME': 'A'}}, None) + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "A"}}, None + ) creds3 = _build_credentials_tuple( - 'GSSAPI', None, 'user', 'pass', - {'authmechanismproperties': {'SERVICE_NAME': 'B'}}, None) + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "B"}}, None + ) self.assertEqual(1, len(set([creds1, creds2]))) self.assertEqual(3, len(set([creds0, creds1, creds2, creds3]))) @@ -120,24 +119,28 @@ def test_credentials_hashing(self): def test_gssapi_simple(self): assert GSSAPI_PRINCIPAL is not None if GSSAPI_PASS is not None: - uri = ('mongodb://%s:%s@%s:%d/?authMechanism=' - 'GSSAPI' % (quote_plus(GSSAPI_PRINCIPAL), - GSSAPI_PASS, - GSSAPI_HOST, - GSSAPI_PORT)) + uri = "mongodb://%s:%s@%s:%d/?authMechanism=" "GSSAPI" % ( + quote_plus(GSSAPI_PRINCIPAL), + GSSAPI_PASS, + GSSAPI_HOST, + GSSAPI_PORT, + ) else: - uri = ('mongodb://%s@%s:%d/?authMechanism=' - 'GSSAPI' % (quote_plus(GSSAPI_PRINCIPAL), - GSSAPI_HOST, - GSSAPI_PORT)) + uri = "mongodb://%s@%s:%d/?authMechanism=" "GSSAPI" % ( + quote_plus(GSSAPI_PRINCIPAL), + GSSAPI_HOST, + GSSAPI_PORT, + ) if not self.service_realm_required: # Without authMechanismProperties. - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI') + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + ) client[GSSAPI_DB].collection.find_one() @@ -146,60 +149,68 @@ def test_gssapi_simple(self): client[GSSAPI_DB].collection.find_one() # Authenticate with authMechanismProperties. - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI', - authMechanismProperties=self.mech_properties) + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) client[GSSAPI_DB].collection.find_one() # Log in using URI, with authMechanismProperties. - mech_uri = uri + '&authMechanismProperties=%s' % (self.mech_properties,) + mech_uri = uri + "&authMechanismProperties=%s" % (self.mech_properties,) client = MongoClient(mech_uri) client[GSSAPI_DB].collection.find_one() - set_name = client.admin.command(HelloCompat.LEGACY_CMD).get('setName') + set_name = client.admin.command(HelloCompat.LEGACY_CMD).get("setName") if set_name: if not self.service_realm_required: # Without authMechanismProperties - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI', - replicaSet=set_name) + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + replicaSet=set_name, + ) client[GSSAPI_DB].list_collection_names() - uri = uri + '&replicaSet=%s' % (str(set_name),) + uri = uri + "&replicaSet=%s" % (str(set_name),) client = MongoClient(uri) client[GSSAPI_DB].list_collection_names() # With authMechanismProperties - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI', - authMechanismProperties=self.mech_properties, - replicaSet=set_name) + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + replicaSet=set_name, + ) client[GSSAPI_DB].list_collection_names() - mech_uri = mech_uri + '&replicaSet=%s' % (str(set_name),) + mech_uri = mech_uri + "&replicaSet=%s" % (str(set_name),) client = MongoClient(mech_uri) client[GSSAPI_DB].list_collection_names() @ignore_deprecations def test_gssapi_threaded(self): - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI', - authMechanismProperties=self.mech_properties) + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) # Authentication succeeded? client.server_info() @@ -213,7 +224,7 @@ def test_gssapi_threaded(self): if not collection.count_documents({}): try: collection.drop() - collection.insert_one({'_id': 1}) + collection.insert_one({"_id": 1}) except OperationFailure: raise SkipTest("User must be able to write.") @@ -226,15 +237,17 @@ def test_gssapi_threaded(self): thread.join() self.assertTrue(thread.success) - set_name = client.admin.command(HelloCompat.LEGACY_CMD).get('setName') + set_name = client.admin.command(HelloCompat.LEGACY_CMD).get("setName") if set_name: - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI', - authMechanismProperties=self.mech_properties, - replicaSet=set_name) + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + replicaSet=set_name, + ) # Succeeded? client.server_info() @@ -250,101 +263,109 @@ def test_gssapi_threaded(self): class TestSASLPlain(unittest.TestCase): - @classmethod def setUpClass(cls): if not SASL_HOST or not SASL_USER or not SASL_PASS: - raise SkipTest('Must set SASL_HOST, ' - 'SASL_USER, and SASL_PASS to test SASL') + raise SkipTest("Must set SASL_HOST, " "SASL_USER, and SASL_PASS to test SASL") def test_sasl_plain(self): - client = MongoClient(SASL_HOST, - SASL_PORT, - username=SASL_USER, - password=SASL_PASS, - authSource=SASL_DB, - authMechanism='PLAIN') + client = MongoClient( + SASL_HOST, + SASL_PORT, + username=SASL_USER, + password=SASL_PASS, + authSource=SASL_DB, + authMechanism="PLAIN", + ) client.ldap.test.find_one() assert SASL_USER is not None assert SASL_PASS is not None - uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;' - 'authSource=%s' % (quote_plus(SASL_USER), - quote_plus(SASL_PASS), - SASL_HOST, SASL_PORT, SASL_DB)) + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;" "authSource=%s" % ( + quote_plus(SASL_USER), + quote_plus(SASL_PASS), + SASL_HOST, + SASL_PORT, + SASL_DB, + ) client = MongoClient(uri) client.ldap.test.find_one() - set_name = client.admin.command(HelloCompat.LEGACY_CMD).get('setName') + set_name = client.admin.command(HelloCompat.LEGACY_CMD).get("setName") if set_name: - client = MongoClient(SASL_HOST, - SASL_PORT, - replicaSet=set_name, - username=SASL_USER, - password=SASL_PASS, - authSource=SASL_DB, - authMechanism='PLAIN') + client = MongoClient( + SASL_HOST, + SASL_PORT, + replicaSet=set_name, + username=SASL_USER, + password=SASL_PASS, + authSource=SASL_DB, + authMechanism="PLAIN", + ) client.ldap.test.find_one() - uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;' - 'authSource=%s;replicaSet=%s' % (quote_plus(SASL_USER), - quote_plus(SASL_PASS), - SASL_HOST, SASL_PORT, - SASL_DB, str(set_name))) + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;" "authSource=%s;replicaSet=%s" % ( + quote_plus(SASL_USER), + quote_plus(SASL_PASS), + SASL_HOST, + SASL_PORT, + SASL_DB, + str(set_name), + ) client = MongoClient(uri) client.ldap.test.find_one() def test_sasl_plain_bad_credentials(self): def auth_string(user, password): - uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;' - 'authSource=%s' % (quote_plus(user), - quote_plus(password), - SASL_HOST, SASL_PORT, SASL_DB)) + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;" "authSource=%s" % ( + quote_plus(user), + quote_plus(password), + SASL_HOST, + SASL_PORT, + SASL_DB, + ) return uri - bad_user = MongoClient(auth_string('not-user', SASL_PASS)) - bad_pwd = MongoClient(auth_string(SASL_USER, 'not-pwd')) + bad_user = MongoClient(auth_string("not-user", SASL_PASS)) + bad_pwd = MongoClient(auth_string(SASL_USER, "not-pwd")) # OperationFailure raised upon connecting. - self.assertRaises(OperationFailure, bad_user.admin.command, 'ping') - self.assertRaises(OperationFailure, bad_pwd.admin.command, 'ping') + self.assertRaises(OperationFailure, bad_user.admin.command, "ping") + self.assertRaises(OperationFailure, bad_pwd.admin.command, "ping") class TestSCRAMSHA1(IntegrationTest): - @client_context.require_auth def setUp(self): super(TestSCRAMSHA1, self).setUp() - client_context.create_user( - 'pymongo_test', 'user', 'pass', roles=['userAdmin', 'readWrite']) + client_context.create_user("pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"]) def tearDown(self): - client_context.drop_user('pymongo_test', 'user') + client_context.drop_user("pymongo_test", "user") super(TestSCRAMSHA1, self).tearDown() def test_scram_sha1(self): host, port = client_context.host, client_context.port client = rs_or_single_client_noauth( - 'mongodb://user:pass@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1' - % (host, port)) - client.pymongo_test.command('dbstats') + "mongodb://user:pass@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" % (host, port) + ) + client.pymongo_test.command("dbstats") if client_context.is_rs: - uri = ('mongodb://user:pass' - '@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1' - '&replicaSet=%s' % (host, port, - client_context.replica_set_name)) + uri = ( + "mongodb://user:pass" + "@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" + "&replicaSet=%s" % (host, port, client_context.replica_set_name) + ) client = single_client_noauth(uri) - client.pymongo_test.command('dbstats') - db = client.get_database( - 'pymongo_test', read_preference=ReadPreference.SECONDARY) - db.command('dbstats') + client.pymongo_test.command("dbstats") + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + db.command("dbstats") # https://github.com/mongodb/specifications/blob/master/source/auth/auth.rst#scram-sha-256-and-mechanism-negotiation class TestSCRAM(IntegrationTest): - @client_context.require_auth @client_context.require_version_min(3, 7, 2) def setUp(self): @@ -362,114 +383,118 @@ def tearDown(self): def test_scram_skip_empty_exchange(self): listener = AllowListEventListener("saslStart", "saslContinue") client_context.create_user( - 'testscram', 'sha256', 'pwd', roles=['dbOwner'], - mechanisms=['SCRAM-SHA-256']) + "testscram", "sha256", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) client = rs_or_single_client_noauth( - username='sha256', password='pwd', authSource='testscram', - event_listeners=[listener]) - client.testscram.command('dbstats') + username="sha256", password="pwd", authSource="testscram", event_listeners=[listener] + ) + client.testscram.command("dbstats") if client_context.version < (4, 4, -1): # Assert we sent the skipEmptyExchange option. - first_event = listener.results['started'][0] - self.assertEqual(first_event.command_name, 'saslStart') - self.assertEqual( - first_event.command['options'], {'skipEmptyExchange': True}) + first_event = listener.results["started"][0] + self.assertEqual(first_event.command_name, "saslStart") + self.assertEqual(first_event.command["options"], {"skipEmptyExchange": True}) # Assert the third exchange was skipped on servers that support it. # Note that the first exchange occurs on the connection handshake. started = listener.started_command_names() if client_context.version.at_least(4, 4, -1): - self.assertEqual(started, ['saslContinue']) + self.assertEqual(started, ["saslContinue"]) else: - self.assertEqual( - started, ['saslStart', 'saslContinue', 'saslContinue']) + self.assertEqual(started, ["saslStart", "saslContinue", "saslContinue"]) def test_scram(self): # Step 1: create users client_context.create_user( - 'testscram', 'sha1', 'pwd', roles=['dbOwner'], - mechanisms=['SCRAM-SHA-1']) + "testscram", "sha1", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-1"] + ) client_context.create_user( - 'testscram', 'sha256', 'pwd', roles=['dbOwner'], - mechanisms=['SCRAM-SHA-256']) + "testscram", "sha256", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) client_context.create_user( - 'testscram', 'both', 'pwd', roles=['dbOwner'], - mechanisms=['SCRAM-SHA-1', 'SCRAM-SHA-256']) + "testscram", + "both", + "pwd", + roles=["dbOwner"], + mechanisms=["SCRAM-SHA-1", "SCRAM-SHA-256"], + ) # Step 2: verify auth success cases - client = rs_or_single_client_noauth( - username='sha1', password='pwd', authSource='testscram') - client.testscram.command('dbstats') + client = rs_or_single_client_noauth(username="sha1", password="pwd", authSource="testscram") + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='sha1', password='pwd', authSource='testscram', - authMechanism='SCRAM-SHA-1') - client.testscram.command('dbstats') + username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='sha256', password='pwd', authSource='testscram') - client.testscram.command('dbstats') + username="sha256", password="pwd", authSource="testscram" + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='sha256', password='pwd', authSource='testscram', - authMechanism='SCRAM-SHA-256') - client.testscram.command('dbstats') + username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + client.testscram.command("dbstats") # Step 2: SCRAM-SHA-1 and SCRAM-SHA-256 client = rs_or_single_client_noauth( - username='both', password='pwd', authSource='testscram', - authMechanism='SCRAM-SHA-1') - client.testscram.command('dbstats') + username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='both', password='pwd', authSource='testscram', - authMechanism='SCRAM-SHA-256') - client.testscram.command('dbstats') + username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + client.testscram.command("dbstats") self.listener.results.clear() client = rs_or_single_client_noauth( - username='both', password='pwd', authSource='testscram', - event_listeners=[self.listener]) - client.testscram.command('dbstats') + username="both", password="pwd", authSource="testscram", event_listeners=[self.listener] + ) + client.testscram.command("dbstats") if client_context.version.at_least(4, 4, -1): # Speculative authentication in 4.4+ sends saslStart with the # handshake. - self.assertEqual(self.listener.results['started'], []) + self.assertEqual(self.listener.results["started"], []) else: - started = self.listener.results['started'][0] - self.assertEqual(started.command.get('mechanism'), 'SCRAM-SHA-256') + started = self.listener.results["started"][0] + self.assertEqual(started.command.get("mechanism"), "SCRAM-SHA-256") # Step 3: verify auth failure conditions client = rs_or_single_client_noauth( - username='sha1', password='pwd', authSource='testscram', - authMechanism='SCRAM-SHA-256') + username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) with self.assertRaises(OperationFailure): - client.testscram.command('dbstats') + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='sha256', password='pwd', authSource='testscram', - authMechanism='SCRAM-SHA-1') + username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) with self.assertRaises(OperationFailure): - client.testscram.command('dbstats') + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='not-a-user', password='pwd', authSource='testscram') + username="not-a-user", password="pwd", authSource="testscram" + ) with self.assertRaises(OperationFailure): - client.testscram.command('dbstats') + client.testscram.command("dbstats") if client_context.is_rs: host, port = client_context.host, client_context.port - uri = ('mongodb://both:pwd@%s:%d/testscram' - '?replicaSet=%s' % (host, port, - client_context.replica_set_name)) + uri = "mongodb://both:pwd@%s:%d/testscram" "?replicaSet=%s" % ( + host, + port, + client_context.replica_set_name, + ) client = single_client_noauth(uri) - client.testscram.command('dbstats') - db = client.get_database( - 'testscram', read_preference=ReadPreference.SECONDARY) - db.command('dbstats') + client.testscram.command("dbstats") + db = client.get_database("testscram", read_preference=ReadPreference.SECONDARY) + db.command("dbstats") - @unittest.skipUnless(HAVE_STRINGPREP, 'Cannot test without stringprep') + @unittest.skipUnless(HAVE_STRINGPREP, "Cannot test without stringprep") def test_scram_saslprep(self): # Step 4: test SASLprep host, port = client_context.host, client_context.port @@ -478,52 +503,59 @@ def test_scram_saslprep(self): # becomes 'IX'. SASLprep is only supported when the standard # library provides stringprep. client_context.create_user( - 'testscram', '\u2168', '\u2163', roles=['dbOwner'], - mechanisms=['SCRAM-SHA-256']) + "testscram", "\u2168", "\u2163", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) client_context.create_user( - 'testscram', 'IX', 'IX', roles=['dbOwner'], - mechanisms=['SCRAM-SHA-256']) + "testscram", "IX", "IX", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) client = rs_or_single_client_noauth( - username='\u2168', password='\u2163', authSource='testscram') - client.testscram.command('dbstats') + username="\u2168", password="\u2163", authSource="testscram" + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='\u2168', password='\u2163', authSource='testscram', - authMechanism='SCRAM-SHA-256') - client.testscram.command('dbstats') + username="\u2168", + password="\u2163", + authSource="testscram", + authMechanism="SCRAM-SHA-256", + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='\u2168', password='IV', authSource='testscram') - client.testscram.command('dbstats') + username="\u2168", password="IV", authSource="testscram" + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='IX', password='I\u00ADX', authSource='testscram') - client.testscram.command('dbstats') + username="IX", password="I\u00ADX", authSource="testscram" + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='IX', password='I\u00ADX', authSource='testscram', - authMechanism='SCRAM-SHA-256') - client.testscram.command('dbstats') + username="IX", + password="I\u00ADX", + authSource="testscram", + authMechanism="SCRAM-SHA-256", + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='IX', password='IX', authSource='testscram', - authMechanism='SCRAM-SHA-256') - client.testscram.command('dbstats') + username="IX", password="IX", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - 'mongodb://\u2168:\u2163@%s:%d/testscram' % (host, port)) - client.testscram.command('dbstats') - client = rs_or_single_client_noauth( - 'mongodb://\u2168:IV@%s:%d/testscram' % (host, port)) - client.testscram.command('dbstats') + "mongodb://\u2168:\u2163@%s:%d/testscram" % (host, port) + ) + client.testscram.command("dbstats") + client = rs_or_single_client_noauth("mongodb://\u2168:IV@%s:%d/testscram" % (host, port)) + client.testscram.command("dbstats") - client = rs_or_single_client_noauth( - 'mongodb://IX:I\u00ADX@%s:%d/testscram' % (host, port)) - client.testscram.command('dbstats') - client = rs_or_single_client_noauth( - 'mongodb://IX:IX@%s:%d/testscram' % (host, port)) - client.testscram.command('dbstats') + client = rs_or_single_client_noauth("mongodb://IX:I\u00ADX@%s:%d/testscram" % (host, port)) + client.testscram.command("dbstats") + client = rs_or_single_client_noauth("mongodb://IX:IX@%s:%d/testscram" % (host, port)) + client.testscram.command("dbstats") def test_cache(self): client = single_client() @@ -532,7 +564,7 @@ def test_cache(self): self.assertIsNotNone(cache) self.assertIsNone(cache.data) # Force authentication. - client.admin.command('ping') + client.admin.command("ping") cache = credentials.cache self.assertIsNotNone(cache) data = cache.data @@ -547,7 +579,7 @@ def test_cache(self): def test_scram_threaded(self): coll = client_context.client.db.test coll.drop() - coll.insert_one({'_id': 1}) + coll.insert_one({"_id": 1}) # The first thread to call find() will authenticate coll = rs_or_single_client().db.test @@ -562,71 +594,68 @@ def test_scram_threaded(self): class TestAuthURIOptions(IntegrationTest): - @client_context.require_auth def setUp(self): super(TestAuthURIOptions, self).setUp() - client_context.create_user('admin', 'admin', 'pass') - client_context.create_user( - 'pymongo_test', 'user', 'pass', ['userAdmin', 'readWrite']) + client_context.create_user("admin", "admin", "pass") + client_context.create_user("pymongo_test", "user", "pass", ["userAdmin", "readWrite"]) def tearDown(self): - client_context.drop_user('pymongo_test', 'user') - client_context.drop_user('admin', 'admin') + client_context.drop_user("pymongo_test", "user") + client_context.drop_user("admin", "admin") super(TestAuthURIOptions, self).tearDown() def test_uri_options(self): # Test default to admin host, port = client_context.host, client_context.port - client = rs_or_single_client_noauth( - 'mongodb://admin:pass@%s:%d' % (host, port)) - self.assertTrue(client.admin.command('dbstats')) + client = rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port)) + self.assertTrue(client.admin.command("dbstats")) if client_context.is_rs: - uri = ('mongodb://admin:pass@%s:%d/?replicaSet=%s' % ( - host, port, client_context.replica_set_name)) + uri = "mongodb://admin:pass@%s:%d/?replicaSet=%s" % ( + host, + port, + client_context.replica_set_name, + ) client = single_client_noauth(uri) - self.assertTrue(client.admin.command('dbstats')) - db = client.get_database( - 'admin', read_preference=ReadPreference.SECONDARY) - self.assertTrue(db.command('dbstats')) + self.assertTrue(client.admin.command("dbstats")) + db = client.get_database("admin", read_preference=ReadPreference.SECONDARY) + self.assertTrue(db.command("dbstats")) # Test explicit database - uri = 'mongodb://user:pass@%s:%d/pymongo_test' % (host, port) + uri = "mongodb://user:pass@%s:%d/pymongo_test" % (host, port) client = rs_or_single_client_noauth(uri) - self.assertRaises(OperationFailure, client.admin.command, 'dbstats') - self.assertTrue(client.pymongo_test.command('dbstats')) + self.assertRaises(OperationFailure, client.admin.command, "dbstats") + self.assertTrue(client.pymongo_test.command("dbstats")) if client_context.is_rs: - uri = ('mongodb://user:pass@%s:%d/pymongo_test?replicaSet=%s' % ( - host, port, client_context.replica_set_name)) + uri = "mongodb://user:pass@%s:%d/pymongo_test?replicaSet=%s" % ( + host, + port, + client_context.replica_set_name, + ) client = single_client_noauth(uri) - self.assertRaises(OperationFailure, - client.admin.command, 'dbstats') - self.assertTrue(client.pymongo_test.command('dbstats')) - db = client.get_database( - 'pymongo_test', read_preference=ReadPreference.SECONDARY) - self.assertTrue(db.command('dbstats')) + self.assertRaises(OperationFailure, client.admin.command, "dbstats") + self.assertTrue(client.pymongo_test.command("dbstats")) + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + self.assertTrue(db.command("dbstats")) # Test authSource - uri = ('mongodb://user:pass@%s:%d' - '/pymongo_test2?authSource=pymongo_test' % (host, port)) + uri = "mongodb://user:pass@%s:%d" "/pymongo_test2?authSource=pymongo_test" % (host, port) client = rs_or_single_client_noauth(uri) - self.assertRaises(OperationFailure, - client.pymongo_test2.command, 'dbstats') - self.assertTrue(client.pymongo_test.command('dbstats')) + self.assertRaises(OperationFailure, client.pymongo_test2.command, "dbstats") + self.assertTrue(client.pymongo_test.command("dbstats")) if client_context.is_rs: - uri = ('mongodb://user:pass@%s:%d/pymongo_test2?replicaSet=' - '%s;authSource=pymongo_test' % ( - host, port, client_context.replica_set_name)) + uri = ( + "mongodb://user:pass@%s:%d/pymongo_test2?replicaSet=" + "%s;authSource=pymongo_test" % (host, port, client_context.replica_set_name) + ) client = single_client_noauth(uri) - self.assertRaises(OperationFailure, - client.pymongo_test2.command, 'dbstats') - self.assertTrue(client.pymongo_test.command('dbstats')) - db = client.get_database( - 'pymongo_test', read_preference=ReadPreference.SECONDARY) - self.assertTrue(db.command('dbstats')) + self.assertRaises(OperationFailure, client.pymongo_test2.command, "dbstats") + self.assertTrue(client.pymongo_test.command("dbstats")) + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + self.assertTrue(db.command("dbstats")) if __name__ == "__main__": diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index e78b4b209a..9f2fa374ac 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -21,12 +21,11 @@ sys.path[0:0] = [""] -from pymongo import MongoClient from test import unittest +from pymongo import MongoClient -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'auth') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "auth") class TestAuthSpec(unittest.TestCase): @@ -34,11 +33,10 @@ class TestAuthSpec(unittest.TestCase): def create_test(test_case): - def run_test(self): - uri = test_case['uri'] - valid = test_case['valid'] - credential = test_case.get('credential') + uri = test_case["uri"] + valid = test_case["valid"] + credential = test_case.get("credential") if not valid: self.assertRaises(Exception, MongoClient, uri, connect=False) @@ -49,39 +47,34 @@ def run_test(self): self.assertIsNone(credentials) else: self.assertIsNotNone(credentials) - self.assertEqual(credentials.username, credential['username']) - self.assertEqual(credentials.password, credential['password']) - self.assertEqual(credentials.source, credential['source']) - if credential['mechanism'] is not None: - self.assertEqual( - credentials.mechanism, credential['mechanism']) + self.assertEqual(credentials.username, credential["username"]) + self.assertEqual(credentials.password, credential["password"]) + self.assertEqual(credentials.source, credential["source"]) + if credential["mechanism"] is not None: + self.assertEqual(credentials.mechanism, credential["mechanism"]) else: - self.assertEqual(credentials.mechanism, 'DEFAULT') - expected = credential['mechanism_properties'] + self.assertEqual(credentials.mechanism, "DEFAULT") + expected = credential["mechanism_properties"] if expected is not None: actual = credentials.mechanism_properties for key, val in expected.items(): - if 'SERVICE_NAME' in expected: - self.assertEqual( - actual.service_name, expected['SERVICE_NAME']) - elif 'CANONICALIZE_HOST_NAME' in expected: - self.assertEqual( - actual.canonicalize_host_name, - expected['CANONICALIZE_HOST_NAME']) - elif 'SERVICE_REALM' in expected: + if "SERVICE_NAME" in expected: + self.assertEqual(actual.service_name, expected["SERVICE_NAME"]) + elif "CANONICALIZE_HOST_NAME" in expected: self.assertEqual( - actual.service_realm, - expected['SERVICE_REALM']) - elif 'AWS_SESSION_TOKEN' in expected: + actual.canonicalize_host_name, expected["CANONICALIZE_HOST_NAME"] + ) + elif "SERVICE_REALM" in expected: + self.assertEqual(actual.service_realm, expected["SERVICE_REALM"]) + elif "AWS_SESSION_TOKEN" in expected: self.assertEqual( - actual.aws_session_token, - expected['AWS_SESSION_TOKEN']) + actual.aws_session_token, expected["AWS_SESSION_TOKEN"] + ) else: - self.fail('Unhandled property: %s' % (key,)) + self.fail("Unhandled property: %s" % (key,)) else: - if credential['mechanism'] == 'MONGODB-AWS': - self.assertIsNone( - credentials.mechanism_properties.aws_session_token) + if credential["mechanism"] == "MONGODB-AWS": + self.assertIsNone(credentials.mechanism_properties.aws_session_token) else: self.assertIsNone(credentials.mechanism_properties) @@ -89,19 +82,16 @@ def run_test(self): def create_tests(): - for filename in glob.glob(os.path.join(_TEST_PATH, '*.json')): + for filename in glob.glob(os.path.join(_TEST_PATH, "*.json")): test_suffix, _ = os.path.splitext(os.path.basename(filename)) with open(filename) as auth_tests: - test_cases = json.load(auth_tests)['tests'] + test_cases = json.load(auth_tests)["tests"] for test_case in test_cases: - if test_case.get('optional', False): + if test_case.get("optional", False): continue test_method = create_test(test_case) - name = str(test_case['description'].lower().replace(' ', '_')) - setattr( - TestAuthSpec, - 'test_%s_%s' % (test_suffix, name), - test_method) + name = str(test_case["description"].lower().replace(" ", "_")) + setattr(TestAuthSpec, "test_%s_%s" % (test_suffix, name), test_method) create_tests() diff --git a/test/test_binary.py b/test/test_binary.py index 4bbda0c9d4..6352e93d2c 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -25,20 +25,18 @@ sys.path[0:0] = [""] -import bson +from test import IntegrationTest, client_context, unittest +from test.utils import ignore_deprecations +import bson from bson import decode, encode from bson.binary import * from bson.codec_options import CodecOptions from bson.son import SON - from pymongo.common import validate_uuid_representation from pymongo.mongo_client import MongoClient from pymongo.write_concern import WriteConcern -from test import client_context, unittest, IntegrationTest -from test.utils import ignore_deprecations - class TestBinary(unittest.TestCase): csharp_data: bytes @@ -48,37 +46,39 @@ class TestBinary(unittest.TestCase): def setUpClass(cls): # Generated by the Java driver from_java = ( - b'bAAAAAdfaWQAUCBQxkVm+XdxJ9tOBW5ld2d1aWQAEAAAAAMIQkfACFu' - b'Z/0RustLOU/G6Am5ld2d1aWRzdHJpbmcAJQAAAGZmOTk1YjA4LWMwND' - b'ctNDIwOC1iYWYxLTUzY2VkMmIyNmU0NAAAbAAAAAdfaWQAUCBQxkVm+' - b'XdxJ9tPBW5ld2d1aWQAEAAAAANgS/xhRXXv8kfIec+dYdyCAm5ld2d1' - b'aWRzdHJpbmcAJQAAAGYyZWY3NTQ1LTYxZmMtNGI2MC04MmRjLTYxOWR' - b'jZjc5Yzg0NwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tQBW5ld2d1aWQAEA' - b'AAAAPqREIbhZPUJOSdHCJIgaqNAm5ld2d1aWRzdHJpbmcAJQAAADI0Z' - b'DQ5Mzg1LTFiNDItNDRlYS04ZGFhLTgxNDgyMjFjOWRlNAAAbAAAAAdf' - b'aWQAUCBQxkVm+XdxJ9tRBW5ld2d1aWQAEAAAAANjQBn/aQuNfRyfNyx' - b'29COkAm5ld2d1aWRzdHJpbmcAJQAAADdkOGQwYjY5LWZmMTktNDA2My' - b'1hNDIzLWY0NzYyYzM3OWYxYwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tSB' - b'W5ld2d1aWQAEAAAAAMtSv/Et1cAQUFHUYevqxaLAm5ld2d1aWRzdHJp' - b'bmcAJQAAADQxMDA1N2I3LWM0ZmYtNGEyZC04YjE2LWFiYWY4NzUxNDc' - b'0MQAA') + b"bAAAAAdfaWQAUCBQxkVm+XdxJ9tOBW5ld2d1aWQAEAAAAAMIQkfACFu" + b"Z/0RustLOU/G6Am5ld2d1aWRzdHJpbmcAJQAAAGZmOTk1YjA4LWMwND" + b"ctNDIwOC1iYWYxLTUzY2VkMmIyNmU0NAAAbAAAAAdfaWQAUCBQxkVm+" + b"XdxJ9tPBW5ld2d1aWQAEAAAAANgS/xhRXXv8kfIec+dYdyCAm5ld2d1" + b"aWRzdHJpbmcAJQAAAGYyZWY3NTQ1LTYxZmMtNGI2MC04MmRjLTYxOWR" + b"jZjc5Yzg0NwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tQBW5ld2d1aWQAEA" + b"AAAAPqREIbhZPUJOSdHCJIgaqNAm5ld2d1aWRzdHJpbmcAJQAAADI0Z" + b"DQ5Mzg1LTFiNDItNDRlYS04ZGFhLTgxNDgyMjFjOWRlNAAAbAAAAAdf" + b"aWQAUCBQxkVm+XdxJ9tRBW5ld2d1aWQAEAAAAANjQBn/aQuNfRyfNyx" + b"29COkAm5ld2d1aWRzdHJpbmcAJQAAADdkOGQwYjY5LWZmMTktNDA2My" + b"1hNDIzLWY0NzYyYzM3OWYxYwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tSB" + b"W5ld2d1aWQAEAAAAAMtSv/Et1cAQUFHUYevqxaLAm5ld2d1aWRzdHJp" + b"bmcAJQAAADQxMDA1N2I3LWM0ZmYtNGEyZC04YjE2LWFiYWY4NzUxNDc" + b"0MQAA" + ) cls.java_data = base64.b64decode(from_java) # Generated by the .net driver from_csharp = ( - b'ZAAAABBfaWQAAAAAAAVuZXdndWlkABAAAAAD+MkoCd/Jy0iYJ7Vhl' - b'iF3BAJuZXdndWlkc3RyaW5nACUAAAAwOTI4YzlmOC1jOWRmLTQ4Y2' - b'ItOTgyNy1iNTYxOTYyMTc3MDQAAGQAAAAQX2lkAAEAAAAFbmV3Z3V' - b'pZAAQAAAAA9MD0oXQe6VOp7mK4jkttWUCbmV3Z3VpZHN0cmluZwAl' - b'AAAAODVkMjAzZDMtN2JkMC00ZWE1LWE3YjktOGFlMjM5MmRiNTY1A' - b'ABkAAAAEF9pZAACAAAABW5ld2d1aWQAEAAAAAPRmIO2auc/Tprq1Z' - b'oQ1oNYAm5ld2d1aWRzdHJpbmcAJQAAAGI2ODM5OGQxLWU3NmEtNGU' - b'zZi05YWVhLWQ1OWExMGQ2ODM1OAAAZAAAABBfaWQAAwAAAAVuZXdn' - b'dWlkABAAAAADISpriopuTEaXIa7arYOCFAJuZXdndWlkc3RyaW5nA' - b'CUAAAA4YTZiMmEyMS02ZThhLTQ2NGMtOTcyMS1hZWRhYWQ4MzgyMT' - b'QAAGQAAAAQX2lkAAQAAAAFbmV3Z3VpZAAQAAAAA98eg0CFpGlPihP' - b'MwOmYGOMCbmV3Z3VpZHN0cmluZwAlAAAANDA4MzFlZGYtYTQ4NS00' - b'ZjY5LThhMTMtY2NjMGU5OTgxOGUzAAA=') + b"ZAAAABBfaWQAAAAAAAVuZXdndWlkABAAAAAD+MkoCd/Jy0iYJ7Vhl" + b"iF3BAJuZXdndWlkc3RyaW5nACUAAAAwOTI4YzlmOC1jOWRmLTQ4Y2" + b"ItOTgyNy1iNTYxOTYyMTc3MDQAAGQAAAAQX2lkAAEAAAAFbmV3Z3V" + b"pZAAQAAAAA9MD0oXQe6VOp7mK4jkttWUCbmV3Z3VpZHN0cmluZwAl" + b"AAAAODVkMjAzZDMtN2JkMC00ZWE1LWE3YjktOGFlMjM5MmRiNTY1A" + b"ABkAAAAEF9pZAACAAAABW5ld2d1aWQAEAAAAAPRmIO2auc/Tprq1Z" + b"oQ1oNYAm5ld2d1aWRzdHJpbmcAJQAAAGI2ODM5OGQxLWU3NmEtNGU" + b"zZi05YWVhLWQ1OWExMGQ2ODM1OAAAZAAAABBfaWQAAwAAAAVuZXdn" + b"dWlkABAAAAADISpriopuTEaXIa7arYOCFAJuZXdndWlkc3RyaW5nA" + b"CUAAAA4YTZiMmEyMS02ZThhLTQ2NGMtOTcyMS1hZWRhYWQ4MzgyMT" + b"QAAGQAAAAQX2lkAAQAAAAFbmV3Z3VpZAAQAAAAA98eg0CFpGlPihP" + b"MwOmYGOMCbmV3Z3VpZHN0cmluZwAlAAAANDA4MzFlZGYtYTQ4NS00" + b"ZjY5LThhMTMtY2NjMGU5OTgxOGUzAAA=" + ) cls.csharp_data = base64.b64decode(from_csharp) def test_binary(self): @@ -124,20 +124,15 @@ def test_equality(self): def test_repr(self): one = Binary(b"hello world") - self.assertEqual(repr(one), - "Binary(%s, 0)" % (repr(b"hello world"),)) + self.assertEqual(repr(one), "Binary(%s, 0)" % (repr(b"hello world"),)) two = Binary(b"hello world", 2) - self.assertEqual(repr(two), - "Binary(%s, 2)" % (repr(b"hello world"),)) + self.assertEqual(repr(two), "Binary(%s, 2)" % (repr(b"hello world"),)) three = Binary(b"\x08\xFF") - self.assertEqual(repr(three), - "Binary(%s, 0)" % (repr(b"\x08\xFF"),)) + self.assertEqual(repr(three), "Binary(%s, 0)" % (repr(b"\x08\xFF"),)) four = Binary(b"\x08\xFF", 2) - self.assertEqual(repr(four), - "Binary(%s, 2)" % (repr(b"\x08\xFF"),)) + self.assertEqual(repr(four), "Binary(%s, 2)" % (repr(b"\x08\xFF"),)) five = Binary(b"test", 100) - self.assertEqual(repr(five), - "Binary(%s, 100)" % (repr(b"test"),)) + self.assertEqual(repr(five), "Binary(%s, 100)" % (repr(b"test"),)) def test_hash(self): one = Binary(b"hello world") @@ -152,9 +147,11 @@ def test_uuid_subtype_4(self): expected_bin = Binary(expected_uuid.bytes, 4) doc = {"uuid": expected_bin} encoded = encode(doc) - for uuid_rep in (UuidRepresentation.PYTHON_LEGACY, - UuidRepresentation.JAVA_LEGACY, - UuidRepresentation.CSHARP_LEGACY): + for uuid_rep in ( + UuidRepresentation.PYTHON_LEGACY, + UuidRepresentation.JAVA_LEGACY, + UuidRepresentation.CSHARP_LEGACY, + ): opts = CodecOptions(uuid_representation=uuid_rep) self.assertEqual(expected_bin, decode(encoded, opts)["uuid"]) opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) @@ -165,39 +162,39 @@ def test_legacy_java_uuid(self): data = self.java_data docs = bson.decode_all(data, CodecOptions(SON, False, PYTHON_LEGACY)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) docs = bson.decode_all(data, CodecOptions(SON, False, STANDARD)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) docs = bson.decode_all(data, CodecOptions(SON, False, CSHARP_LEGACY)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) docs = bson.decode_all(data, CodecOptions(SON, False, JAVA_LEGACY)) for d in docs: - self.assertEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) # Test encoding - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=PYTHON_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=PYTHON_LEGACY)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=STANDARD)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=STANDARD)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=CSHARP_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=CSHARP_LEGACY)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=JAVA_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=JAVA_LEGACY)) for doc in docs] + ) self.assertEqual(data, encoded) @client_context.require_connection @@ -205,21 +202,19 @@ def test_legacy_java_uuid_roundtrip(self): data = self.java_data docs = bson.decode_all(data, CodecOptions(SON, False, JAVA_LEGACY)) - client_context.client.pymongo_test.drop_collection('java_uuid') + client_context.client.pymongo_test.drop_collection("java_uuid") db = client_context.client.pymongo_test - coll = db.get_collection( - 'java_uuid', CodecOptions(uuid_representation=JAVA_LEGACY)) + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=JAVA_LEGACY)) coll.insert_many(docs) self.assertEqual(5, coll.count_documents({})) for d in coll.find(): - self.assertEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - coll = db.get_collection( - 'java_uuid', CodecOptions(uuid_representation=PYTHON_LEGACY)) + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) for d in coll.find(): - self.assertNotEqual(d['newguid'], d['newguidstring']) - client_context.client.pymongo_test.drop_collection('java_uuid') + self.assertNotEqual(d["newguid"], d["newguidstring"]) + client_context.client.pymongo_test.drop_collection("java_uuid") def test_legacy_csharp_uuid(self): data = self.csharp_data @@ -227,39 +222,39 @@ def test_legacy_csharp_uuid(self): # Test decoding docs = bson.decode_all(data, CodecOptions(SON, False, PYTHON_LEGACY)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) docs = bson.decode_all(data, CodecOptions(SON, False, STANDARD)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) docs = bson.decode_all(data, CodecOptions(SON, False, JAVA_LEGACY)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) docs = bson.decode_all(data, CodecOptions(SON, False, CSHARP_LEGACY)) for d in docs: - self.assertEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) # Test encoding - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=PYTHON_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=PYTHON_LEGACY)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=STANDARD)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=STANDARD)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=JAVA_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=JAVA_LEGACY)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=CSHARP_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=CSHARP_LEGACY)) for doc in docs] + ) self.assertEqual(data, encoded) @client_context.require_connection @@ -267,29 +262,25 @@ def test_legacy_csharp_uuid_roundtrip(self): data = self.csharp_data docs = bson.decode_all(data, CodecOptions(SON, False, CSHARP_LEGACY)) - client_context.client.pymongo_test.drop_collection('csharp_uuid') + client_context.client.pymongo_test.drop_collection("csharp_uuid") db = client_context.client.pymongo_test - coll = db.get_collection( - 'csharp_uuid', CodecOptions(uuid_representation=CSHARP_LEGACY)) + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=CSHARP_LEGACY)) coll.insert_many(docs) self.assertEqual(5, coll.count_documents({})) for d in coll.find(): - self.assertEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - coll = db.get_collection( - 'csharp_uuid', CodecOptions(uuid_representation=PYTHON_LEGACY)) + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) for d in coll.find(): - self.assertNotEqual(d['newguid'], d['newguidstring']) - client_context.client.pymongo_test.drop_collection('csharp_uuid') + self.assertNotEqual(d["newguid"], d["newguidstring"]) + client_context.client.pymongo_test.drop_collection("csharp_uuid") def test_uri_to_uuid(self): uri = "mongodb://foo/?uuidrepresentation=csharpLegacy" client = MongoClient(uri, connect=False) - self.assertEqual( - client.pymongo_test.test.codec_options.uuid_representation, - CSHARP_LEGACY) + self.assertEqual(client.pymongo_test.test.codec_options.uuid_representation, CSHARP_LEGACY) @client_context.require_connection def test_uuid_queries(self): @@ -298,37 +289,39 @@ def test_uuid_queries(self): coll.drop() uu = uuid.uuid4() - coll.insert_one({'uuid': Binary(uu.bytes, 3)}) + coll.insert_one({"uuid": Binary(uu.bytes, 3)}) self.assertEqual(1, coll.count_documents({})) # Test regular UUID queries (using subtype 4). coll = db.get_collection( - "test", CodecOptions( - uuid_representation=UuidRepresentation.STANDARD)) - self.assertEqual(0, coll.count_documents({'uuid': uu})) - coll.insert_one({'uuid': uu}) + "test", CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + ) + self.assertEqual(0, coll.count_documents({"uuid": uu})) + coll.insert_one({"uuid": uu}) self.assertEqual(2, coll.count_documents({})) - docs = list(coll.find({'uuid': uu})) + docs = list(coll.find({"uuid": uu})) self.assertEqual(1, len(docs)) - self.assertEqual(uu, docs[0]['uuid']) + self.assertEqual(uu, docs[0]["uuid"]) # Test both. uu_legacy = Binary.from_uuid(uu, UuidRepresentation.PYTHON_LEGACY) - predicate = {'uuid': {'$in': [uu, uu_legacy]}} + predicate = {"uuid": {"$in": [uu, uu_legacy]}} self.assertEqual(2, coll.count_documents(predicate)) docs = list(coll.find(predicate)) self.assertEqual(2, len(docs)) coll.drop() def test_pickle(self): - b1 = Binary(b'123', 2) + b1 = Binary(b"123", 2) # For testing backwards compatibility with pre-2.4 pymongo - p = (b"\x80\x03cbson.binary\nBinary\nq\x00C\x03123q\x01\x85q" - b"\x02\x81q\x03}q\x04X\x10\x00\x00\x00_Binary__subtypeq" - b"\x05K\x02sb.") + p = ( + b"\x80\x03cbson.binary\nBinary\nq\x00C\x03123q\x01\x85q" + b"\x02\x81q\x03}q\x04X\x10\x00\x00\x00_Binary__subtypeq" + b"\x05K\x02sb." + ) - if not sys.version.startswith('3.0'): + if not sys.version.startswith("3.0"): self.assertEqual(b1, pickle.loads(p)) for proto in range(pickle.HIGHEST_PROTOCOL + 1): @@ -344,15 +337,15 @@ def test_pickle(self): self.assertEqual(uul, pickle.loads(pickle.dumps(uul, proto))) def test_buffer_protocol(self): - b0 = Binary(b'123', 2) + b0 = Binary(b"123", 2) - self.assertEqual(b0, Binary(memoryview(b'123'), 2)) - self.assertEqual(b0, Binary(bytearray(b'123'), 2)) - with mmap.mmap(-1, len(b'123')) as mm: - mm.write(b'123') + self.assertEqual(b0, Binary(memoryview(b"123"), 2)) + self.assertEqual(b0, Binary(bytearray(b"123"), 2)) + with mmap.mmap(-1, len(b"123")) as mm: + mm.write(b"123") mm.seek(0) self.assertEqual(b0, Binary(mm, 2)) - self.assertEqual(b0, Binary(array.array('B', b'123'), 2)) + self.assertEqual(b0, Binary(array.array("B", b"123"), 2)) class TestUuidSpecExplicitCoding(unittest.TestCase): @@ -370,40 +363,37 @@ def _hex_to_bytes(hexstring): # Explicit encoding prose test #1 def test_encoding_1(self): obj = Binary.from_uuid(self.uuid) - expected_obj = Binary( - self._hex_to_bytes("00112233445566778899AABBCCDDEEFF"), 4) + expected_obj = Binary(self._hex_to_bytes("00112233445566778899AABBCCDDEEFF"), 4) self.assertEqual(obj, expected_obj) - def _test_encoding_w_uuid_rep( - self, uuid_rep, expected_hexstring, expected_subtype): + def _test_encoding_w_uuid_rep(self, uuid_rep, expected_hexstring, expected_subtype): obj = Binary.from_uuid(self.uuid, uuid_rep) - expected_obj = Binary( - self._hex_to_bytes(expected_hexstring), expected_subtype) + expected_obj = Binary(self._hex_to_bytes(expected_hexstring), expected_subtype) self.assertEqual(obj, expected_obj) # Explicit encoding prose test #2 def test_encoding_2(self): self._test_encoding_w_uuid_rep( - UuidRepresentation.STANDARD, - "00112233445566778899AABBCCDDEEFF", 4) + UuidRepresentation.STANDARD, "00112233445566778899AABBCCDDEEFF", 4 + ) # Explicit encoding prose test #3 def test_encoding_3(self): self._test_encoding_w_uuid_rep( - UuidRepresentation.JAVA_LEGACY, - "7766554433221100FFEEDDCCBBAA9988", 3) + UuidRepresentation.JAVA_LEGACY, "7766554433221100FFEEDDCCBBAA9988", 3 + ) # Explicit encoding prose test #4 def test_encoding_4(self): self._test_encoding_w_uuid_rep( - UuidRepresentation.CSHARP_LEGACY, - "33221100554477668899AABBCCDDEEFF", 3) + UuidRepresentation.CSHARP_LEGACY, "33221100554477668899AABBCCDDEEFF", 3 + ) # Explicit encoding prose test #5 def test_encoding_5(self): self._test_encoding_w_uuid_rep( - UuidRepresentation.PYTHON_LEGACY, - "00112233445566778899AABBCCDDEEFF", 3) + UuidRepresentation.PYTHON_LEGACY, "00112233445566778899AABBCCDDEEFF", 3 + ) # Explicit encoding prose test #6 def test_encoding_6(self): @@ -412,17 +402,18 @@ def test_encoding_6(self): # Explicit decoding prose test #1 def test_decoding_1(self): - obj = Binary( - self._hex_to_bytes("00112233445566778899AABBCCDDEEFF"), 4) + obj = Binary(self._hex_to_bytes("00112233445566778899AABBCCDDEEFF"), 4) # Case i: self.assertEqual(obj.as_uuid(), self.uuid) # Case ii: self.assertEqual(obj.as_uuid(UuidRepresentation.STANDARD), self.uuid) # Cases iii-vi: - for uuid_rep in (UuidRepresentation.JAVA_LEGACY, - UuidRepresentation.CSHARP_LEGACY, - UuidRepresentation.PYTHON_LEGACY): + for uuid_rep in ( + UuidRepresentation.JAVA_LEGACY, + UuidRepresentation.CSHARP_LEGACY, + UuidRepresentation.PYTHON_LEGACY, + ): with self.assertRaises(ValueError): obj.as_uuid(uuid_rep) @@ -433,31 +424,29 @@ def _test_decoding_legacy(self, hexstring, uuid_rep): with self.assertRaises(ValueError): obj.as_uuid() # Cases ii-iii: - for rep in (UuidRepresentation.STANDARD, - UuidRepresentation.UNSPECIFIED): + for rep in (UuidRepresentation.STANDARD, UuidRepresentation.UNSPECIFIED): with self.assertRaises(ValueError): obj.as_uuid(rep) # Case iv: - self.assertEqual(obj.as_uuid(uuid_rep), - self.uuid) + self.assertEqual(obj.as_uuid(uuid_rep), self.uuid) # Explicit decoding prose test #2 def test_decoding_2(self): self._test_decoding_legacy( - "7766554433221100FFEEDDCCBBAA9988", - UuidRepresentation.JAVA_LEGACY) + "7766554433221100FFEEDDCCBBAA9988", UuidRepresentation.JAVA_LEGACY + ) # Explicit decoding prose test #3 def test_decoding_3(self): self._test_decoding_legacy( - "33221100554477668899AABBCCDDEEFF", - UuidRepresentation.CSHARP_LEGACY) + "33221100554477668899AABBCCDDEEFF", UuidRepresentation.CSHARP_LEGACY + ) # Explicit decoding prose test #4 def test_decoding_4(self): self._test_decoding_legacy( - "00112233445566778899AABBCCDDEEFF", - UuidRepresentation.PYTHON_LEGACY) + "00112233445566778899AABBCCDDEEFF", UuidRepresentation.PYTHON_LEGACY + ) class TestUuidSpecImplicitCoding(IntegrationTest): @@ -474,95 +463,90 @@ def _hex_to_bytes(hexstring): def _get_coll_w_uuid_rep(self, uuid_rep): codec_options = self.client.codec_options.with_options( - uuid_representation=validate_uuid_representation(None, uuid_rep)) + uuid_representation=validate_uuid_representation(None, uuid_rep) + ) coll = self.db.get_collection( - 'pymongo_test', codec_options=codec_options, - write_concern=WriteConcern("majority")) + "pymongo_test", codec_options=codec_options, write_concern=WriteConcern("majority") + ) return coll def _test_encoding(self, uuid_rep, expected_hexstring, expected_subtype): coll = self._get_coll_w_uuid_rep(uuid_rep) coll.delete_many({}) - coll.insert_one({'_id': self.uuid}) + coll.insert_one({"_id": self.uuid}) self.assertTrue( - coll.find_one({"_id": Binary( - self._hex_to_bytes(expected_hexstring), expected_subtype)})) + coll.find_one({"_id": Binary(self._hex_to_bytes(expected_hexstring), expected_subtype)}) + ) # Implicit encoding prose test #1 def test_encoding_1(self): - self._test_encoding( - "javaLegacy", "7766554433221100FFEEDDCCBBAA9988", 3) + self._test_encoding("javaLegacy", "7766554433221100FFEEDDCCBBAA9988", 3) # Implicit encoding prose test #2 def test_encoding_2(self): - self._test_encoding( - "csharpLegacy", "33221100554477668899AABBCCDDEEFF", 3) + self._test_encoding("csharpLegacy", "33221100554477668899AABBCCDDEEFF", 3) # Implicit encoding prose test #3 def test_encoding_3(self): - self._test_encoding( - "pythonLegacy", "00112233445566778899AABBCCDDEEFF", 3) + self._test_encoding("pythonLegacy", "00112233445566778899AABBCCDDEEFF", 3) # Implicit encoding prose test #4 def test_encoding_4(self): - self._test_encoding( - "standard", "00112233445566778899AABBCCDDEEFF", 4) + self._test_encoding("standard", "00112233445566778899AABBCCDDEEFF", 4) # Implicit encoding prose test #5 def test_encoding_5(self): with self.assertRaises(ValueError): - self._test_encoding( - "unspecifed", "dummy", -1) - - def _test_decoding(self, client_uuid_representation_string, - legacy_field_uuid_representation, - expected_standard_field_value, - expected_legacy_field_value): + self._test_encoding("unspecifed", "dummy", -1) + + def _test_decoding( + self, + client_uuid_representation_string, + legacy_field_uuid_representation, + expected_standard_field_value, + expected_legacy_field_value, + ): coll = self._get_coll_w_uuid_rep(client_uuid_representation_string) coll.drop() standard_val = Binary.from_uuid(self.uuid, UuidRepresentation.STANDARD) legacy_val = Binary.from_uuid(self.uuid, legacy_field_uuid_representation) - coll.insert_one({'standard': standard_val, 'legacy': legacy_val}) + coll.insert_one({"standard": standard_val, "legacy": legacy_val}) doc = coll.find_one() - self.assertEqual(doc['standard'], expected_standard_field_value) - self.assertEqual(doc['legacy'], expected_legacy_field_value) + self.assertEqual(doc["standard"], expected_standard_field_value) + self.assertEqual(doc["legacy"], expected_legacy_field_value) # Implicit decoding prose test #1 def test_decoding_1(self): - standard_binary = Binary.from_uuid( - self.uuid, UuidRepresentation.STANDARD) + standard_binary = Binary.from_uuid(self.uuid, UuidRepresentation.STANDARD) self._test_decoding( - "javaLegacy", UuidRepresentation.JAVA_LEGACY, - standard_binary, self.uuid) + "javaLegacy", UuidRepresentation.JAVA_LEGACY, standard_binary, self.uuid + ) self._test_decoding( - "csharpLegacy", UuidRepresentation.CSHARP_LEGACY, - standard_binary, self.uuid) + "csharpLegacy", UuidRepresentation.CSHARP_LEGACY, standard_binary, self.uuid + ) self._test_decoding( - "pythonLegacy", UuidRepresentation.PYTHON_LEGACY, - standard_binary, self.uuid) + "pythonLegacy", UuidRepresentation.PYTHON_LEGACY, standard_binary, self.uuid + ) # Implicit decoding pose test #2 def test_decoding_2(self): - legacy_binary = Binary.from_uuid( - self.uuid, UuidRepresentation.PYTHON_LEGACY) - self._test_decoding( - "standard", UuidRepresentation.PYTHON_LEGACY, - self.uuid, legacy_binary) + legacy_binary = Binary.from_uuid(self.uuid, UuidRepresentation.PYTHON_LEGACY) + self._test_decoding("standard", UuidRepresentation.PYTHON_LEGACY, self.uuid, legacy_binary) # Implicit decoding pose test #3 def test_decoding_3(self): - expected_standard_value = Binary.from_uuid( - self.uuid, UuidRepresentation.STANDARD) - for legacy_uuid_rep in (UuidRepresentation.PYTHON_LEGACY, - UuidRepresentation.CSHARP_LEGACY, - UuidRepresentation.JAVA_LEGACY): - expected_legacy_value = Binary.from_uuid( - self.uuid, legacy_uuid_rep) + expected_standard_value = Binary.from_uuid(self.uuid, UuidRepresentation.STANDARD) + for legacy_uuid_rep in ( + UuidRepresentation.PYTHON_LEGACY, + UuidRepresentation.CSHARP_LEGACY, + UuidRepresentation.JAVA_LEGACY, + ): + expected_legacy_value = Binary.from_uuid(self.uuid, legacy_uuid_rep) self._test_decoding( - "unspecified", legacy_uuid_rep, - expected_standard_value, expected_legacy_value) + "unspecified", legacy_uuid_rep, expected_standard_value, expected_legacy_value + ) if __name__ == "__main__": diff --git a/test/test_bson.py b/test/test_bson.py index 7052042ca8..f8f587567d 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -21,44 +21,43 @@ import datetime import mmap import os +import pickle import re import sys import tempfile import uuid -import pickle - -from collections import abc, OrderedDict +from collections import OrderedDict, abc from io import BytesIO sys.path[0:0] = [""] +from test import qcheck, unittest +from test.utils import ExceptionCatchingThread + import bson -from bson import (BSON, - decode, - decode_all, - decode_file_iter, - decode_iter, - encode, - EPOCH_AWARE, - is_valid, - Regex) +from bson import ( + BSON, + EPOCH_AWARE, + Regex, + decode, + decode_all, + decode_file_iter, + decode_iter, + encode, + is_valid, +) from bson.binary import Binary, UuidRepresentation from bson.code import Code from bson.codec_options import CodecOptions +from bson.dbref import DBRef +from bson.errors import InvalidBSON, InvalidDocument from bson.int64 import Int64 +from bson.max_key import MaxKey +from bson.min_key import MinKey from bson.objectid import ObjectId -from bson.dbref import DBRef from bson.son import SON from bson.timestamp import Timestamp -from bson.errors import (InvalidBSON, - InvalidDocument) -from bson.max_key import MaxKey -from bson.min_key import MinKey -from bson.tz_util import (FixedOffset, - utc) - -from test import qcheck, unittest -from test.utils import ExceptionCatchingThread +from bson.tz_util import FixedOffset, utc class NotADict(abc.MutableMapping): @@ -95,7 +94,6 @@ def __repr__(self): class DSTAwareTimezone(datetime.tzinfo): - def __init__(self, offset, name, dst_start_month, dst_end_month): self.__offset = offset self.__dst_start_month = dst_start_month @@ -121,11 +119,10 @@ class TestBSON(unittest.TestCase): def assertInvalid(self, data): self.assertRaises(InvalidBSON, decode, data) - def check_encode_then_decode(self, doc_class=dict, decoder=decode, - encoder=encode): + def check_encode_then_decode(self, doc_class=dict, decoder=decode, encoder=encode): # Work around http://bugs.jython.org/issue1728 - if sys.platform.startswith('java'): + if sys.platform.startswith("java"): doc_class = SON def helper(doc): @@ -134,8 +131,7 @@ def helper(doc): helper({}) helper({"test": "hello"}) - self.assertTrue(isinstance(decoder(encoder( - {"hello": "world"}))["hello"], str)) + self.assertTrue(isinstance(decoder(encoder({"hello": "world"}))["hello"], str)) helper({"mike": -10120}) helper({"long": Int64(10)}) helper({"really big long": 2147483648}) @@ -148,9 +144,8 @@ def helper(doc): helper({"a binary": Binary(b"test", 128)}) helper({"a binary": Binary(b"test", 254)}) helper({"another binary": Binary(b"test", 2)}) - helper(SON([('test dst', datetime.datetime(1993, 4, 4, 2))])) - helper(SON([('test negative dst', - datetime.datetime(1, 1, 1, 1, 1, 1))])) + helper(SON([("test dst", datetime.datetime(1993, 4, 4, 2))])) + helper(SON([("test negative dst", datetime.datetime(1, 1, 1, 1, 1, 1))])) helper({"big float": float(10000000000)}) helper({"ref": DBRef("coll", 5)}) helper({"ref": DBRef("coll", 5, foo="bar", bar=4)}) @@ -160,14 +155,12 @@ def helper(doc): helper({"foo": MinKey()}) helper({"foo": MaxKey()}) helper({"$field": Code("function(){ return true; }")}) - helper({"$field": Code("return function(){ return x; }", scope={'x': False})}) + helper({"$field": Code("return function(){ return x; }", scope={"x": False})}) def encode_then_decode(doc): - return doc_class(doc) == decoder(encode(doc), CodecOptions( - document_class=doc_class)) + return doc_class(doc) == decoder(encode(doc), CodecOptions(document_class=doc_class)) - qcheck.check_unittest(self, encode_then_decode, - qcheck.gen_mongo_dict(3)) + qcheck.check_unittest(self, encode_then_decode, qcheck.gen_mongo_dict(3)) def test_encode_then_decode(self): self.check_encode_then_decode() @@ -177,18 +170,20 @@ def test_encode_then_decode_any_mapping(self): def test_encode_then_decode_legacy(self): self.check_encode_then_decode( - encoder=BSON.encode, - decoder=lambda *args: BSON(args[0]).decode(*args[1:])) + encoder=BSON.encode, decoder=lambda *args: BSON(args[0]).decode(*args[1:]) + ) def test_encode_then_decode_any_mapping_legacy(self): self.check_encode_then_decode( - doc_class=NotADict, encoder=BSON.encode, - decoder=lambda *args: BSON(args[0]).decode(*args[1:])) + doc_class=NotADict, + encoder=BSON.encode, + decoder=lambda *args: BSON(args[0]).decode(*args[1:]), + ) def test_encoding_defaultdict(self): - dct = collections.defaultdict(dict, [('foo', 'bar')]) # type: ignore[arg-type] + dct = collections.defaultdict(dict, [("foo", "bar")]) # type: ignore[arg-type] encode(dct) - self.assertEqual(dct, collections.defaultdict(dict, [('foo', 'bar')])) + self.assertEqual(dct, collections.defaultdict(dict, [("foo", "bar")])) def test_basic_validation(self): self.assertRaises(TypeError, is_valid, 100) @@ -209,117 +204,132 @@ def test_basic_validation(self): self.assertInvalid(b"\x07\x00\x00\x00\x02a\x00\x78\x56\x34\x12") self.assertInvalid(b"\x09\x00\x00\x00\x10a\x00\x05\x00") self.assertInvalid(b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00") - self.assertInvalid(b"\x13\x00\x00\x00\x02foo\x00" - b"\x04\x00\x00\x00bar\x00\x00") - self.assertInvalid(b"\x18\x00\x00\x00\x03foo\x00\x0f\x00\x00" - b"\x00\x10bar\x00\xff\xff\xff\x7f\x00\x00") - self.assertInvalid(b"\x15\x00\x00\x00\x03foo\x00\x0c" - b"\x00\x00\x00\x08bar\x00\x01\x00\x00") - self.assertInvalid(b"\x1c\x00\x00\x00\x03foo\x00" - b"\x12\x00\x00\x00\x02bar\x00" - b"\x05\x00\x00\x00baz\x00\x00\x00") - self.assertInvalid(b"\x10\x00\x00\x00\x02a\x00" - b"\x04\x00\x00\x00abc\xff\x00") - - def test_bad_string_lengths(self): - self.assertInvalid( - b"\x0c\x00\x00\x00\x02\x00" - b"\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x13\x00\x00\x00\x02foo\x00" b"\x04\x00\x00\x00bar\x00\x00") self.assertInvalid( - b"\x12\x00\x00\x00\x02\x00" - b"\xff\xff\xff\xfffoobar\x00\x00") + b"\x18\x00\x00\x00\x03foo\x00\x0f\x00\x00" b"\x00\x10bar\x00\xff\xff\xff\x7f\x00\x00" + ) self.assertInvalid( - b"\x0c\x00\x00\x00\x0e\x00" - b"\x00\x00\x00\x00\x00\x00") + b"\x15\x00\x00\x00\x03foo\x00\x0c" b"\x00\x00\x00\x08bar\x00\x01\x00\x00" + ) self.assertInvalid( - b"\x12\x00\x00\x00\x0e\x00" - b"\xff\xff\xff\xfffoobar\x00\x00") + b"\x1c\x00\x00\x00\x03foo\x00" + b"\x12\x00\x00\x00\x02bar\x00" + b"\x05\x00\x00\x00baz\x00\x00\x00" + ) + self.assertInvalid(b"\x10\x00\x00\x00\x02a\x00" b"\x04\x00\x00\x00abc\xff\x00") + + def test_bad_string_lengths(self): + self.assertInvalid(b"\x0c\x00\x00\x00\x02\x00" b"\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x12\x00\x00\x00\x02\x00" b"\xff\xff\xff\xfffoobar\x00\x00") + self.assertInvalid(b"\x0c\x00\x00\x00\x0e\x00" b"\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x12\x00\x00\x00\x0e\x00" b"\xff\xff\xff\xfffoobar\x00\x00") self.assertInvalid( - b"\x18\x00\x00\x00\x0c\x00" - b"\x00\x00\x00\x00\x00RY\xb5j" - b"\xfa[\xd8A\xd6X]\x99\x00") + b"\x18\x00\x00\x00\x0c\x00" b"\x00\x00\x00\x00\x00RY\xb5j" b"\xfa[\xd8A\xd6X]\x99\x00" + ) self.assertInvalid( b"\x1e\x00\x00\x00\x0c\x00" b"\xff\xff\xff\xfffoobar\x00" - b"RY\xb5j\xfa[\xd8A\xd6X]\x99\x00") - self.assertInvalid( - b"\x0c\x00\x00\x00\r\x00" - b"\x00\x00\x00\x00\x00\x00") - self.assertInvalid( - b"\x0c\x00\x00\x00\r\x00" - b"\xff\xff\xff\xff\x00\x00") + b"RY\xb5j\xfa[\xd8A\xd6X]\x99\x00" + ) + self.assertInvalid(b"\x0c\x00\x00\x00\r\x00" b"\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x0c\x00\x00\x00\r\x00" b"\xff\xff\xff\xff\x00\x00") self.assertInvalid( b"\x1c\x00\x00\x00\x0f\x00" b"\x15\x00\x00\x00\x00\x00" b"\x00\x00\x00\x0c\x00\x00" b"\x00\x02\x00\x01\x00\x00" - b"\x00\x00\x00\x00") + b"\x00\x00\x00\x00" + ) self.assertInvalid( b"\x1c\x00\x00\x00\x0f\x00" b"\x15\x00\x00\x00\xff\xff" b"\xff\xff\x00\x0c\x00\x00" b"\x00\x02\x00\x01\x00\x00" - b"\x00\x00\x00\x00") + b"\x00\x00\x00\x00" + ) self.assertInvalid( b"\x1c\x00\x00\x00\x0f\x00" b"\x15\x00\x00\x00\x01\x00" b"\x00\x00\x00\x0c\x00\x00" b"\x00\x02\x00\x00\x00\x00" - b"\x00\x00\x00\x00") + b"\x00\x00\x00\x00" + ) self.assertInvalid( b"\x1c\x00\x00\x00\x0f\x00" b"\x15\x00\x00\x00\x01\x00" b"\x00\x00\x00\x0c\x00\x00" b"\x00\x02\x00\xff\xff\xff" - b"\xff\x00\x00\x00") + b"\xff\x00\x00\x00" + ) def test_random_data_is_not_bson(self): - qcheck.check_unittest(self, qcheck.isnt(is_valid), - qcheck.gen_string(qcheck.gen_range(0, 40))) + qcheck.check_unittest( + self, qcheck.isnt(is_valid), qcheck.gen_string(qcheck.gen_range(0, 40)) + ) def test_basic_decode(self): - self.assertEqual({"test": "hello world"}, - decode(b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74\x00\x0C" - b"\x00\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F" - b"\x72\x6C\x64\x00\x00")) - self.assertEqual([{"test": "hello world"}, {}], - decode_all(b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00\x00")) - self.assertEqual([{"test": "hello world"}, {}], - list(decode_iter( - b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00\x00"))) - self.assertEqual([{"test": "hello world"}, {}], - list(decode_file_iter(BytesIO( - b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00\x00")))) + self.assertEqual( + {"test": "hello world"}, + decode( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74\x00\x0C" + b"\x00\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F" + b"\x72\x6C\x64\x00\x00" + ), + ) + self.assertEqual( + [{"test": "hello world"}, {}], + decode_all( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\x00" + ), + ) + self.assertEqual( + [{"test": "hello world"}, {}], + list( + decode_iter( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\x00" + ) + ), + ) + self.assertEqual( + [{"test": "hello world"}, {}], + list( + decode_file_iter( + BytesIO( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\x00" + ) + ) + ), + ) def test_decode_all_buffer_protocol(self): - docs = [{'foo': 'bar'}, {}] + docs = [{"foo": "bar"}, {}] bs = b"".join(map(encode, docs)) # type: ignore[arg-type] self.assertEqual(docs, decode_all(bytearray(bs))) self.assertEqual(docs, decode_all(memoryview(bs))) - self.assertEqual(docs, decode_all(memoryview(b'1' + bs + b'1')[1:-1])) - self.assertEqual(docs, decode_all(array.array('B', bs))) + self.assertEqual(docs, decode_all(memoryview(b"1" + bs + b"1")[1:-1])) + self.assertEqual(docs, decode_all(array.array("B", bs))) with mmap.mmap(-1, len(bs)) as mm: mm.write(bs) mm.seek(0) self.assertEqual(docs, decode_all(mm)) def test_decode_buffer_protocol(self): - doc = {'foo': 'bar'} + doc = {"foo": "bar"} bs = encode(doc) self.assertEqual(doc, decode(bs)) self.assertEqual(doc, decode(bytearray(bs))) self.assertEqual(doc, decode(memoryview(bs))) - self.assertEqual(doc, decode(memoryview(b'1' + bs + b'1')[1:-1])) - self.assertEqual(doc, decode(array.array('B', bs))) + self.assertEqual(doc, decode(memoryview(b"1" + bs + b"1")[1:-1])) + self.assertEqual(doc, decode(array.array("B", bs))) with mmap.mmap(-1, len(bs)) as mm: mm.write(bs) mm.seek(0) @@ -329,8 +339,7 @@ def test_invalid_decodes(self): # Invalid object size (not enough bytes in document for even # an object size of first object. # NOTE: decode_all and decode_iter don't care, not sure if they should? - self.assertRaises(InvalidBSON, list, - decode_file_iter(BytesIO(b"\x1B"))) + self.assertRaises(InvalidBSON, list, decode_file_iter(BytesIO(b"\x1B"))) bad_bsons = [ # An object size that's too small to even include the object size, @@ -338,21 +347,27 @@ def test_invalid_decodes(self): b"\x01\x00\x00\x00\x00", # One object, but with object size listed smaller than it is in the # data. - (b"\x1A\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00\x00"), + ( + b"\x1A\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\x00" + ), # One object, missing the EOO at the end. - (b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00"), + ( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00" + ), # One object, sized correctly, with a spot for an EOO, but the EOO # isn't 0x00. - (b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00\xFF"), + ( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\xFF" + ), ] for i, data in enumerate(bad_bsons): msg = "bad_bson[{}]".format(i) @@ -371,14 +386,17 @@ def test_invalid_decodes(self): def test_invalid_field_name(self): # Decode a truncated field with self.assertRaises(InvalidBSON) as ctx: - decode(b'\x0b\x00\x00\x00\x02field\x00') + decode(b"\x0b\x00\x00\x00\x02field\x00") # Assert that the InvalidBSON error message is not empty. self.assertTrue(str(ctx.exception)) def test_data_timestamp(self): - self.assertEqual({"test": Timestamp(4, 20)}, - decode(b"\x13\x00\x00\x00\x11\x74\x65\x73\x74\x00\x14" - b"\x00\x00\x00\x04\x00\x00\x00\x00")) + self.assertEqual( + {"test": Timestamp(4, 20)}, + decode( + b"\x13\x00\x00\x00\x11\x74\x65\x73\x74\x00\x14" b"\x00\x00\x00\x04\x00\x00\x00\x00" + ), + ) def test_basic_encode(self): self.assertRaises(TypeError, encode, 100) @@ -388,83 +406,102 @@ def test_basic_encode(self): self.assertEqual(encode({}), BSON(b"\x05\x00\x00\x00\x00")) self.assertEqual(encode({}), b"\x05\x00\x00\x00\x00") - self.assertEqual(encode({"test": "hello world"}), - b"\x1B\x00\x00\x00\x02\x74\x65\x73\x74\x00\x0C\x00" - b"\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F\x72\x6C" - b"\x64\x00\x00") - self.assertEqual(encode({"mike": 100}), - b"\x0F\x00\x00\x00\x10\x6D\x69\x6B\x65\x00\x64\x00" - b"\x00\x00\x00") - self.assertEqual(encode({"hello": 1.5}), - b"\x14\x00\x00\x00\x01\x68\x65\x6C\x6C\x6F\x00\x00" - b"\x00\x00\x00\x00\x00\xF8\x3F\x00") - self.assertEqual(encode({"true": True}), - b"\x0C\x00\x00\x00\x08\x74\x72\x75\x65\x00\x01\x00") - self.assertEqual(encode({"false": False}), - b"\x0D\x00\x00\x00\x08\x66\x61\x6C\x73\x65\x00\x00" - b"\x00") - self.assertEqual(encode({"empty": []}), - b"\x11\x00\x00\x00\x04\x65\x6D\x70\x74\x79\x00\x05" - b"\x00\x00\x00\x00\x00") - self.assertEqual(encode({"none": {}}), - b"\x10\x00\x00\x00\x03\x6E\x6F\x6E\x65\x00\x05\x00" - b"\x00\x00\x00\x00") - self.assertEqual(encode({"test": Binary(b"test", 0)}), - b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" - b"\x00\x00\x00\x74\x65\x73\x74\x00") - self.assertEqual(encode({"test": Binary(b"test", 2)}), - b"\x18\x00\x00\x00\x05\x74\x65\x73\x74\x00\x08\x00" - b"\x00\x00\x02\x04\x00\x00\x00\x74\x65\x73\x74\x00") - self.assertEqual(encode({"test": Binary(b"test", 128)}), - b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" - b"\x00\x00\x80\x74\x65\x73\x74\x00") - self.assertEqual(encode({"test": None}), - b"\x0B\x00\x00\x00\x0A\x74\x65\x73\x74\x00\x00") - self.assertEqual(encode({"date": datetime.datetime(2007, 1, 8, - 0, 30, 11)}), - b"\x13\x00\x00\x00\x09\x64\x61\x74\x65\x00\x38\xBE" - b"\x1C\xFF\x0F\x01\x00\x00\x00") - self.assertEqual(encode({"regex": re.compile(b"a*b", - re.IGNORECASE)}), - b"\x12\x00\x00\x00\x0B\x72\x65\x67\x65\x78\x00\x61" - b"\x2A\x62\x00\x69\x00\x00") - self.assertEqual(encode({"$where": Code("test")}), - b"\x16\x00\x00\x00\r$where\x00\x05\x00\x00\x00test" - b"\x00\x00") - self.assertEqual(encode({"$field": - Code("function(){ return true;}", scope=None)}), - b"+\x00\x00\x00\r$field\x00\x1a\x00\x00\x00" - b"function(){ return true;}\x00\x00") - self.assertEqual(encode({"$field": - Code("return function(){ return x; }", - scope={'x': False})}), - b"=\x00\x00\x00\x0f$field\x000\x00\x00\x00\x1f\x00" - b"\x00\x00return function(){ return x; }\x00\t\x00" - b"\x00\x00\x08x\x00\x00\x00\x00") + self.assertEqual( + encode({"test": "hello world"}), + b"\x1B\x00\x00\x00\x02\x74\x65\x73\x74\x00\x0C\x00" + b"\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F\x72\x6C" + b"\x64\x00\x00", + ) + self.assertEqual( + encode({"mike": 100}), + b"\x0F\x00\x00\x00\x10\x6D\x69\x6B\x65\x00\x64\x00" b"\x00\x00\x00", + ) + self.assertEqual( + encode({"hello": 1.5}), + b"\x14\x00\x00\x00\x01\x68\x65\x6C\x6C\x6F\x00\x00" b"\x00\x00\x00\x00\x00\xF8\x3F\x00", + ) + self.assertEqual( + encode({"true": True}), b"\x0C\x00\x00\x00\x08\x74\x72\x75\x65\x00\x01\x00" + ) + self.assertEqual( + encode({"false": False}), b"\x0D\x00\x00\x00\x08\x66\x61\x6C\x73\x65\x00\x00" b"\x00" + ) + self.assertEqual( + encode({"empty": []}), + b"\x11\x00\x00\x00\x04\x65\x6D\x70\x74\x79\x00\x05" b"\x00\x00\x00\x00\x00", + ) + self.assertEqual( + encode({"none": {}}), + b"\x10\x00\x00\x00\x03\x6E\x6F\x6E\x65\x00\x05\x00" b"\x00\x00\x00\x00", + ) + self.assertEqual( + encode({"test": Binary(b"test", 0)}), + b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" b"\x00\x00\x00\x74\x65\x73\x74\x00", + ) + self.assertEqual( + encode({"test": Binary(b"test", 2)}), + b"\x18\x00\x00\x00\x05\x74\x65\x73\x74\x00\x08\x00" + b"\x00\x00\x02\x04\x00\x00\x00\x74\x65\x73\x74\x00", + ) + self.assertEqual( + encode({"test": Binary(b"test", 128)}), + b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" b"\x00\x00\x80\x74\x65\x73\x74\x00", + ) + self.assertEqual(encode({"test": None}), b"\x0B\x00\x00\x00\x0A\x74\x65\x73\x74\x00\x00") + self.assertEqual( + encode({"date": datetime.datetime(2007, 1, 8, 0, 30, 11)}), + b"\x13\x00\x00\x00\x09\x64\x61\x74\x65\x00\x38\xBE" b"\x1C\xFF\x0F\x01\x00\x00\x00", + ) + self.assertEqual( + encode({"regex": re.compile(b"a*b", re.IGNORECASE)}), + b"\x12\x00\x00\x00\x0B\x72\x65\x67\x65\x78\x00\x61" b"\x2A\x62\x00\x69\x00\x00", + ) + self.assertEqual( + encode({"$where": Code("test")}), + b"\x16\x00\x00\x00\r$where\x00\x05\x00\x00\x00test" b"\x00\x00", + ) + self.assertEqual( + encode({"$field": Code("function(){ return true;}", scope=None)}), + b"+\x00\x00\x00\r$field\x00\x1a\x00\x00\x00" b"function(){ return true;}\x00\x00", + ) + self.assertEqual( + encode({"$field": Code("return function(){ return x; }", scope={"x": False})}), + b"=\x00\x00\x00\x0f$field\x000\x00\x00\x00\x1f\x00" + b"\x00\x00return function(){ return x; }\x00\t\x00" + b"\x00\x00\x08x\x00\x00\x00\x00", + ) unicode_empty_scope = Code("function(){ return 'héllo';}", {}) - self.assertEqual(encode({'$field': unicode_empty_scope}), - b"8\x00\x00\x00\x0f$field\x00+\x00\x00\x00\x1e\x00" - b"\x00\x00function(){ return 'h\xc3\xa9llo';}\x00\x05" - b"\x00\x00\x00\x00\x00") + self.assertEqual( + encode({"$field": unicode_empty_scope}), + b"8\x00\x00\x00\x0f$field\x00+\x00\x00\x00\x1e\x00" + b"\x00\x00function(){ return 'h\xc3\xa9llo';}\x00\x05" + b"\x00\x00\x00\x00\x00", + ) a = ObjectId(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B") - self.assertEqual(encode({"oid": a}), - b"\x16\x00\x00\x00\x07\x6F\x69\x64\x00\x00\x01\x02" - b"\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00") - self.assertEqual(encode({"ref": DBRef("coll", a)}), - b"\x2F\x00\x00\x00\x03ref\x00\x25\x00\x00\x00\x02" - b"$ref\x00\x05\x00\x00\x00coll\x00\x07$id\x00\x00" - b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00" - b"\x00") + self.assertEqual( + encode({"oid": a}), + b"\x16\x00\x00\x00\x07\x6F\x69\x64\x00\x00\x01\x02" + b"\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00", + ) + self.assertEqual( + encode({"ref": DBRef("coll", a)}), + b"\x2F\x00\x00\x00\x03ref\x00\x25\x00\x00\x00\x02" + b"$ref\x00\x05\x00\x00\x00coll\x00\x07$id\x00\x00" + b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00" + b"\x00", + ) def test_unknown_type(self): # Repr value differs with major python version - part = "type %r for fieldname 'foo'" % (b'\x14',) + part = "type %r for fieldname 'foo'" % (b"\x14",) docs = [ - b'\x0e\x00\x00\x00\x14foo\x00\x01\x00\x00\x00\x00', - (b'\x16\x00\x00\x00\x04foo\x00\x0c\x00\x00\x00\x140' - b'\x00\x01\x00\x00\x00\x00\x00'), - (b' \x00\x00\x00\x04bar\x00\x16\x00\x00\x00\x030\x00\x0e\x00\x00' - b'\x00\x14foo\x00\x01\x00\x00\x00\x00\x00\x00')] + b"\x0e\x00\x00\x00\x14foo\x00\x01\x00\x00\x00\x00", + (b"\x16\x00\x00\x00\x04foo\x00\x0c\x00\x00\x00\x140" b"\x00\x01\x00\x00\x00\x00\x00"), + ( + b" \x00\x00\x00\x04bar\x00\x16\x00\x00\x00\x030\x00\x0e\x00\x00" + b"\x00\x14foo\x00\x01\x00\x00\x00\x00\x00\x00" + ), + ] for bs in docs: try: decode(bs) @@ -481,21 +518,19 @@ def test_dbpointer(self): # not support creation of the DBPointer type, but will decode # DBPointer to DBRef. - bs = (b"\x18\x00\x00\x00\x0c\x00\x01\x00\x00" - b"\x00\x00RY\xb5j\xfa[\xd8A\xd6X]\x99\x00") + bs = b"\x18\x00\x00\x00\x0c\x00\x01\x00\x00" b"\x00\x00RY\xb5j\xfa[\xd8A\xd6X]\x99\x00" - self.assertEqual({'': DBRef('', ObjectId('5259b56afa5bd841d6585d99'))}, - decode(bs)) + self.assertEqual({"": DBRef("", ObjectId("5259b56afa5bd841d6585d99"))}, decode(bs)) def test_bad_dbref(self): - ref_only = {'ref': {'$ref': 'collection'}} - id_only = {'ref': {'$id': ObjectId()}} + ref_only = {"ref": {"$ref": "collection"}} + id_only = {"ref": {"$id": ObjectId()}} self.assertEqual(ref_only, decode(encode(ref_only))) self.assertEqual(id_only, decode(encode(id_only))) def test_bytes_as_keys(self): - doc = {b"foo": 'bar'} + doc = {b"foo": "bar"} # Since `bytes` are stored as Binary you can't use them # as keys in python 3.x. Using binary data as a key makes # no sense in BSON anyway and little sense in python. @@ -528,15 +563,12 @@ def test_large_datetime_truncation(self): self.assertEqual(dt2.second, dt1.second) def test_aware_datetime(self): - aware = datetime.datetime(1993, 4, 4, 2, - tzinfo=FixedOffset(555, "SomeZone")) + aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone")) offset = aware.utcoffset() assert offset is not None as_utc = (aware - offset).replace(tzinfo=utc) - self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45, tzinfo=utc), - as_utc) - after = decode(encode({"date": aware}), CodecOptions(tz_aware=True))[ - "date"] + self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45, tzinfo=utc), as_utc) + after = decode(encode({"date": aware}), CodecOptions(tz_aware=True))["date"] self.assertEqual(utc, after.tzinfo) self.assertEqual(as_utc, after) @@ -545,54 +577,47 @@ def test_local_datetime(self): tz = DSTAwareTimezone(60, "sixty-minutes", 4, 7) # It's not DST. - local = datetime.datetime(year=2025, month=12, hour=2, day=1, - tzinfo=tz) + local = datetime.datetime(year=2025, month=12, hour=2, day=1, tzinfo=tz) options = CodecOptions(tz_aware=True, tzinfo=tz) # Encode with this timezone, then decode to UTC. - encoded = encode({'date': local}, codec_options=options) - self.assertEqual(local.replace(hour=1, tzinfo=None), - decode(encoded)['date']) + encoded = encode({"date": local}, codec_options=options) + self.assertEqual(local.replace(hour=1, tzinfo=None), decode(encoded)["date"]) # It's DST. - local = datetime.datetime(year=2025, month=4, hour=1, day=1, - tzinfo=tz) - encoded = encode({'date': local}, codec_options=options) - self.assertEqual(local.replace(month=3, day=31, hour=23, tzinfo=None), - decode(encoded)['date']) + local = datetime.datetime(year=2025, month=4, hour=1, day=1, tzinfo=tz) + encoded = encode({"date": local}, codec_options=options) + self.assertEqual( + local.replace(month=3, day=31, hour=23, tzinfo=None), decode(encoded)["date"] + ) # Encode UTC, then decode in a different timezone. - encoded = encode({'date': local.replace(tzinfo=utc)}) - decoded = decode(encoded, options)['date'] + encoded = encode({"date": local.replace(tzinfo=utc)}) + decoded = decode(encoded, options)["date"] self.assertEqual(local.replace(hour=3), decoded) self.assertEqual(tz, decoded.tzinfo) # Test round-tripping. self.assertEqual( - local, decode(encode( - {'date': local}, codec_options=options), options)['date']) + local, decode(encode({"date": local}, codec_options=options), options)["date"] + ) # Test around the Unix Epoch. epochs = ( EPOCH_AWARE, - EPOCH_AWARE.astimezone(FixedOffset(120, 'one twenty')), - EPOCH_AWARE.astimezone(FixedOffset(-120, 'minus one twenty')) + EPOCH_AWARE.astimezone(FixedOffset(120, "one twenty")), + EPOCH_AWARE.astimezone(FixedOffset(-120, "minus one twenty")), ) utc_co = CodecOptions(tz_aware=True) for epoch in epochs: - doc = {'epoch': epoch} + doc = {"epoch": epoch} # We always retrieve datetimes in UTC unless told to do otherwise. - self.assertEqual( - EPOCH_AWARE, - decode(encode(doc), codec_options=utc_co)['epoch']) + self.assertEqual(EPOCH_AWARE, decode(encode(doc), codec_options=utc_co)["epoch"]) # Round-trip the epoch. local_co = CodecOptions(tz_aware=True, tzinfo=epoch.tzinfo) - self.assertEqual( - epoch, - decode(encode(doc), codec_options=local_co)['epoch']) + self.assertEqual(epoch, decode(encode(doc), codec_options=local_co)["epoch"]) def test_naive_decode(self): - aware = datetime.datetime(1993, 4, 4, 2, - tzinfo=FixedOffset(555, "SomeZone")) + aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone")) offset = aware.utcoffset() assert offset is not None naive_utc = (aware - offset).replace(tzinfo=None) @@ -605,32 +630,30 @@ def test_dst(self): d = {"x": datetime.datetime(1993, 4, 4, 2)} self.assertEqual(d, decode(encode(d))) - @unittest.skip('Disabled due to http://bugs.python.org/issue25222') + @unittest.skip("Disabled due to http://bugs.python.org/issue25222") def test_bad_encode(self): - evil_list: dict = {'a': []} - evil_list['a'].append(evil_list) + evil_list: dict = {"a": []} + evil_list["a"].append(evil_list) evil_dict: dict = {} - evil_dict['a'] = evil_dict + evil_dict["a"] = evil_dict for evil_data in [evil_dict, evil_list]: self.assertRaises(Exception, encode, evil_data) def test_overflow(self): self.assertTrue(encode({"x": 9223372036854775807})) - self.assertRaises(OverflowError, encode, - {"x": 9223372036854775808}) + self.assertRaises(OverflowError, encode, {"x": 9223372036854775808}) self.assertTrue(encode({"x": -9223372036854775808})) - self.assertRaises(OverflowError, encode, - {"x": -9223372036854775809}) + self.assertRaises(OverflowError, encode, {"x": -9223372036854775809}) def test_small_long_encode_decode(self): - encoded1 = encode({'x': 256}) - decoded1 = decode(encoded1)['x'] + encoded1 = encode({"x": 256}) + decoded1 = decode(encoded1)["x"] self.assertEqual(256, decoded1) self.assertEqual(type(256), type(decoded1)) - encoded2 = encode({'x': Int64(256)}) - decoded2 = decode(encoded2)['x'] + encoded2 = encode({"x": Int64(256)}) + decoded2 = decode(encoded2)["x"] expected = Int64(256) self.assertEqual(expected, decoded2) self.assertEqual(type(expected), type(decoded2)) @@ -638,18 +661,16 @@ def test_small_long_encode_decode(self): self.assertNotEqual(type(decoded1), type(decoded2)) def test_tuple(self): - self.assertEqual({"tuple": [1, 2]}, - decode(encode({"tuple": (1, 2)}))) + self.assertEqual({"tuple": [1, 2]}, decode(encode({"tuple": (1, 2)}))) def test_uuid(self): id = uuid.uuid4() # The default uuid_representation is UNSPECIFIED - with self.assertRaisesRegex(ValueError, 'cannot encode native uuid'): - bson.decode_all(encode({'uuid': id})) + with self.assertRaisesRegex(ValueError, "cannot encode native uuid"): + bson.decode_all(encode({"uuid": id})) opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) - transformed_id = decode(encode({"id": id}, codec_options=opts), - codec_options=opts)["id"] + transformed_id = decode(encode({"id": id}, codec_options=opts), codec_options=opts)["id"] self.assertTrue(isinstance(transformed_id, uuid.UUID)) self.assertEqual(id, transformed_id) self.assertNotEqual(uuid.uuid4(), transformed_id) @@ -666,7 +687,7 @@ def test_uuid_legacy(self): # The C extension was segfaulting on unicode RegExs, so we have this test # that doesn't really test anything but the lack of a segfault. def test_unicode_regex(self): - regex = re.compile('revisi\xf3n') + regex = re.compile("revisi\xf3n") decode(encode({"regex": regex})) def test_non_string_keys(self): @@ -677,12 +698,12 @@ def test_utf8(self): self.assertEqual(w, decode(encode(w))) # b'a\xe9' == "aé".encode("iso-8859-1") - iso8859_bytes = b'a\xe9' + iso8859_bytes = b"a\xe9" y = {"hello": iso8859_bytes} # Stored as BSON binary subtype 0. out = decode(encode(y)) - self.assertTrue(isinstance(out['hello'], bytes)) - self.assertEqual(out['hello'], iso8859_bytes) + self.assertTrue(isinstance(out["hello"], bytes)) + self.assertEqual(out["hello"], iso8859_bytes) def test_null_character(self): doc = {"a": "\x00"} @@ -694,28 +715,27 @@ def test_null_character(self): self.assertRaises(InvalidDocument, encode, {b"\x00": "a"}) self.assertRaises(InvalidDocument, encode, {"\x00": "a"}) - self.assertRaises(InvalidDocument, encode, - {"a": re.compile(b"ab\x00c")}) - self.assertRaises(InvalidDocument, encode, - {"a": re.compile("ab\x00c")}) + self.assertRaises(InvalidDocument, encode, {"a": re.compile(b"ab\x00c")}) + self.assertRaises(InvalidDocument, encode, {"a": re.compile("ab\x00c")}) def test_move_id(self): - self.assertEqual(b"\x19\x00\x00\x00\x02_id\x00\x02\x00\x00\x00a\x00" - b"\x02a\x00\x02\x00\x00\x00a\x00\x00", - encode(SON([("a", "a"), ("_id", "a")]))) - - self.assertEqual(b"\x2c\x00\x00\x00" - b"\x02_id\x00\x02\x00\x00\x00b\x00" - b"\x03b\x00" - b"\x19\x00\x00\x00\x02a\x00\x02\x00\x00\x00a\x00" - b"\x02_id\x00\x02\x00\x00\x00a\x00\x00\x00", - encode(SON([("b", - SON([("a", "a"), ("_id", "a")])), - ("_id", "b")]))) + self.assertEqual( + b"\x19\x00\x00\x00\x02_id\x00\x02\x00\x00\x00a\x00" + b"\x02a\x00\x02\x00\x00\x00a\x00\x00", + encode(SON([("a", "a"), ("_id", "a")])), + ) + + self.assertEqual( + b"\x2c\x00\x00\x00" + b"\x02_id\x00\x02\x00\x00\x00b\x00" + b"\x03b\x00" + b"\x19\x00\x00\x00\x02a\x00\x02\x00\x00\x00a\x00" + b"\x02_id\x00\x02\x00\x00\x00a\x00\x00\x00", + encode(SON([("b", SON([("a", "a"), ("_id", "a")])), ("_id", "b")])), + ) def test_dates(self): - doc = {"early": datetime.datetime(1686, 5, 5), - "late": datetime.datetime(2086, 5, 5)} + doc = {"early": datetime.datetime(1686, 5, 5), "late": datetime.datetime(2086, 5, 5)} try: self.assertEqual(doc, decode(encode(doc))) except ValueError: @@ -728,15 +748,12 @@ def test_dates(self): def test_custom_class(self): self.assertIsInstance(decode(encode({})), dict) self.assertNotIsInstance(decode(encode({})), SON) - self.assertIsInstance( - decode(encode({}), CodecOptions(document_class=SON)), SON) + self.assertIsInstance(decode(encode({}), CodecOptions(document_class=SON)), SON) - self.assertEqual( - 1, decode(encode({"x": 1}), CodecOptions(document_class=SON))["x"]) + self.assertEqual(1, decode(encode({"x": 1}), CodecOptions(document_class=SON))["x"]) x = encode({"x": [{"y": 1}]}) - self.assertIsInstance( - decode(x, CodecOptions(document_class=SON))["x"][0], SON) + self.assertIsInstance(decode(x, CodecOptions(document_class=SON))["x"][0], SON) def test_subclasses(self): # make sure we can serialize subclasses of native Python types. @@ -749,9 +766,7 @@ class _myfloat(float): class _myunicode(str): pass - d = {'a': _myint(42), 'b': _myfloat(63.9), - 'c': _myunicode('hello world') - } + d = {"a": _myint(42), "b": _myfloat(63.9), "c": _myunicode("hello world")} d2 = decode(encode(d)) for key, value in d2.items(): orig_value = d[key] @@ -761,65 +776,60 @@ class _myunicode(str): def test_ordered_dict(self): d = OrderedDict([("one", 1), ("two", 2), ("three", 3), ("four", 4)]) - self.assertEqual( - d, decode(encode(d), CodecOptions(document_class=OrderedDict))) + self.assertEqual(d, decode(encode(d), CodecOptions(document_class=OrderedDict))) def test_bson_regex(self): # Invalid Python regex, though valid PCRE. - bson_re1 = Regex(r'[\w-\.]') - self.assertEqual(r'[\w-\.]', bson_re1.pattern) + bson_re1 = Regex(r"[\w-\.]") + self.assertEqual(r"[\w-\.]", bson_re1.pattern) self.assertEqual(0, bson_re1.flags) - doc1 = {'r': bson_re1} + doc1 = {"r": bson_re1} doc1_bson = ( - b'\x11\x00\x00\x00' # document length - b'\x0br\x00[\\w-\\.]\x00\x00' # r: regex - b'\x00') # document terminator + b"\x11\x00\x00\x00" b"\x0br\x00[\\w-\\.]\x00\x00" b"\x00" # document length # r: regex + ) # document terminator self.assertEqual(doc1_bson, encode(doc1)) self.assertEqual(doc1, decode(doc1_bson)) # Valid Python regex, with flags. - re2 = re.compile('.*', re.I | re.M | re.S | re.U | re.X) - bson_re2 = Regex('.*', re.I | re.M | re.S | re.U | re.X) + re2 = re.compile(".*", re.I | re.M | re.S | re.U | re.X) + bson_re2 = Regex(".*", re.I | re.M | re.S | re.U | re.X) - doc2_with_re = {'r': re2} - doc2_with_bson_re = {'r': bson_re2} + doc2_with_re = {"r": re2} + doc2_with_bson_re = {"r": bson_re2} doc2_bson = ( - b"\x11\x00\x00\x00" # document length - b"\x0br\x00.*\x00imsux\x00" # r: regex - b"\x00") # document terminator + b"\x11\x00\x00\x00" b"\x0br\x00.*\x00imsux\x00" b"\x00" # document length # r: regex + ) # document terminator self.assertEqual(doc2_bson, encode(doc2_with_re)) self.assertEqual(doc2_bson, encode(doc2_with_bson_re)) - self.assertEqual(re2.pattern, decode(doc2_bson)['r'].pattern) - self.assertEqual(re2.flags, decode(doc2_bson)['r'].flags) + self.assertEqual(re2.pattern, decode(doc2_bson)["r"].pattern) + self.assertEqual(re2.flags, decode(doc2_bson)["r"].flags) def test_regex_from_native(self): - self.assertEqual('.*', Regex.from_native(re.compile('.*')).pattern) - self.assertEqual(0, Regex.from_native(re.compile(b'')).flags) + self.assertEqual(".*", Regex.from_native(re.compile(".*")).pattern) + self.assertEqual(0, Regex.from_native(re.compile(b"")).flags) - regex = re.compile(b'', re.I | re.L | re.M | re.S | re.X) - self.assertEqual( - re.I | re.L | re.M | re.S | re.X, - Regex.from_native(regex).flags) + regex = re.compile(b"", re.I | re.L | re.M | re.S | re.X) + self.assertEqual(re.I | re.L | re.M | re.S | re.X, Regex.from_native(regex).flags) - unicode_regex = re.compile('', re.U) + unicode_regex = re.compile("", re.U) self.assertEqual(re.U, Regex.from_native(unicode_regex).flags) def test_regex_hash(self): - self.assertRaises(TypeError, hash, Regex('hello')) + self.assertRaises(TypeError, hash, Regex("hello")) def test_regex_comparison(self): - re1 = Regex('a') - re2 = Regex('b') + re1 = Regex("a") + re2 = Regex("b") self.assertNotEqual(re1, re2) - re1 = Regex('a', re.I) - re2 = Regex('a', re.M) + re1 = Regex("a", re.I) + re2 = Regex("a", re.M) self.assertNotEqual(re1, re2) - re1 = Regex('a', re.I) - re2 = Regex('a', re.I) + re1 = Regex("a", re.I) + re2 = Regex("a", re.I) self.assertEqual(re1, re2) def test_exception_wrapping(self): @@ -827,13 +837,12 @@ def test_exception_wrapping(self): # the final exception always matches InvalidBSON. # {'s': '\xff'}, will throw attempting to decode utf-8. - bad_doc = b'\x0f\x00\x00\x00\x02s\x00\x03\x00\x00\x00\xff\x00\x00\x00' + bad_doc = b"\x0f\x00\x00\x00\x02s\x00\x03\x00\x00\x00\xff\x00\x00\x00" with self.assertRaises(InvalidBSON) as context: decode_all(bad_doc) - self.assertIn("codec can't decode byte 0xff", - str(context.exception)) + self.assertIn("codec can't decode byte 0xff", str(context.exception)) def test_minkey_maxkey_comparison(self): # MinKey's <, <=, >, >=, !=, and ==. @@ -907,29 +916,25 @@ def test_timestamp_comparison(self): self.assertFalse(Timestamp(1, 0) > Timestamp(1, 0)) def test_timestamp_highorder_bits(self): - doc = {'a': Timestamp(0xFFFFFFFF, 0xFFFFFFFF)} - doc_bson = (b'\x10\x00\x00\x00' - b'\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff' - b'\x00') + doc = {"a": Timestamp(0xFFFFFFFF, 0xFFFFFFFF)} + doc_bson = b"\x10\x00\x00\x00" b"\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff" b"\x00" self.assertEqual(doc_bson, encode(doc)) self.assertEqual(doc, decode(doc_bson)) def test_bad_id_keys(self): - self.assertRaises(InvalidDocument, encode, - {"_id": {"$bad": 123}}, True) - self.assertRaises(InvalidDocument, encode, - {"_id": {'$oid': "52d0b971b3ba219fdeb4170e"}}, True) - encode({"_id": {'$oid': "52d0b971b3ba219fdeb4170e"}}) + self.assertRaises(InvalidDocument, encode, {"_id": {"$bad": 123}}, True) + self.assertRaises( + InvalidDocument, encode, {"_id": {"$oid": "52d0b971b3ba219fdeb4170e"}}, True + ) + encode({"_id": {"$oid": "52d0b971b3ba219fdeb4170e"}}) def test_bson_encode_thread_safe(self): - def target(i): for j in range(1000): - my_int = type('MyInt_%s_%s' % (i, j), (int,), {}) - bson.encode({'my_int': my_int()}) + my_int = type("MyInt_%s_%s" % (i, j), (int,), {}) + bson.encode({"my_int": my_int()}) - threads = [ExceptionCatchingThread(target=target, args=(i,)) - for i in range(3)] + threads = [ExceptionCatchingThread(target=target, args=(i,)) for i in range(3)] for t in threads: t.start() @@ -947,11 +952,11 @@ def __init__(self, val): def __repr__(self): return repr(self.val) - self.assertEqual('1', repr(Wrapper(1))) + self.assertEqual("1", repr(Wrapper(1))) with self.assertRaisesRegex( - InvalidDocument, - "cannot encode object: 1, of type: " + repr(Wrapper)): - encode({'t': Wrapper(1)}) + InvalidDocument, "cannot encode object: 1, of type: " + repr(Wrapper) + ): + encode({"t": Wrapper(1)}) class TestCodecOptions(unittest.TestCase): @@ -969,69 +974,67 @@ def test_uuid_representation(self): self.assertRaises(ValueError, CodecOptions, uuid_representation=2) def test_tzinfo(self): - self.assertRaises(TypeError, CodecOptions, tzinfo='pacific') - tz = FixedOffset(42, 'forty-two') + self.assertRaises(TypeError, CodecOptions, tzinfo="pacific") + tz = FixedOffset(42, "forty-two") self.assertRaises(ValueError, CodecOptions, tzinfo=tz) self.assertEqual(tz, CodecOptions(tz_aware=True, tzinfo=tz).tzinfo) def test_codec_options_repr(self): - r = ("CodecOptions(document_class=dict, tz_aware=False, " - "uuid_representation=UuidRepresentation.UNSPECIFIED, " - "unicode_decode_error_handler='strict', " - "tzinfo=None, type_registry=TypeRegistry(type_codecs=[], " - "fallback_encoder=None))") + r = ( + "CodecOptions(document_class=dict, tz_aware=False, " + "uuid_representation=UuidRepresentation.UNSPECIFIED, " + "unicode_decode_error_handler='strict', " + "tzinfo=None, type_registry=TypeRegistry(type_codecs=[], " + "fallback_encoder=None))" + ) self.assertEqual(r, repr(CodecOptions())) def test_decode_all_defaults(self): # Test decode_all()'s default document_class is dict and tz_aware is # False. - doc = {'sub_document': {}, - 'dt': datetime.datetime.utcnow()} + doc = {"sub_document": {}, "dt": datetime.datetime.utcnow()} decoded = bson.decode_all(bson.encode(doc))[0] - self.assertIsInstance(decoded['sub_document'], dict) - self.assertIsNone(decoded['dt'].tzinfo) + self.assertIsInstance(decoded["sub_document"], dict) + self.assertIsNone(decoded["dt"].tzinfo) # The default uuid_representation is UNSPECIFIED - with self.assertRaisesRegex(ValueError, 'cannot encode native uuid'): - bson.decode_all(bson.encode({'uuid': uuid.uuid4()})) + with self.assertRaisesRegex(ValueError, "cannot encode native uuid"): + bson.decode_all(bson.encode({"uuid": uuid.uuid4()})) def test_unicode_decode_error_handler(self): enc = encode({"keystr": "foobar"}) # Test handling of bad key value, bad string value, and both. - invalid_key = enc[:7] + b'\xe9' + enc[8:] - invalid_val = enc[:18] + b'\xe9' + enc[19:] - invalid_both = enc[:7] + b'\xe9' + enc[8:18] + b'\xe9' + enc[19:] + invalid_key = enc[:7] + b"\xe9" + enc[8:] + invalid_val = enc[:18] + b"\xe9" + enc[19:] + invalid_both = enc[:7] + b"\xe9" + enc[8:18] + b"\xe9" + enc[19:] # Ensure that strict mode raises an error. for invalid in [invalid_key, invalid_val, invalid_both]: - self.assertRaises(InvalidBSON, decode, invalid, CodecOptions( - unicode_decode_error_handler="strict")) + self.assertRaises( + InvalidBSON, decode, invalid, CodecOptions(unicode_decode_error_handler="strict") + ) self.assertRaises(InvalidBSON, decode, invalid, CodecOptions()) self.assertRaises(InvalidBSON, decode, invalid) # Test all other error handlers. - for handler in ['replace', 'backslashreplace', 'surrogateescape', - 'ignore']: - expected_key = b'ke\xe9str'.decode('utf-8', handler) - expected_val = b'fo\xe9bar'.decode('utf-8', handler) - doc = decode(invalid_key, - CodecOptions(unicode_decode_error_handler=handler)) + for handler in ["replace", "backslashreplace", "surrogateescape", "ignore"]: + expected_key = b"ke\xe9str".decode("utf-8", handler) + expected_val = b"fo\xe9bar".decode("utf-8", handler) + doc = decode(invalid_key, CodecOptions(unicode_decode_error_handler=handler)) self.assertEqual(doc, {expected_key: "foobar"}) - doc = decode(invalid_val, - CodecOptions(unicode_decode_error_handler=handler)) + doc = decode(invalid_val, CodecOptions(unicode_decode_error_handler=handler)) self.assertEqual(doc, {"keystr": expected_val}) - doc = decode(invalid_both, - CodecOptions(unicode_decode_error_handler=handler)) + doc = decode(invalid_both, CodecOptions(unicode_decode_error_handler=handler)) self.assertEqual(doc, {expected_key: expected_val}) # Test handling bad error mode. - dec = decode(enc, - CodecOptions(unicode_decode_error_handler="junk")) + dec = decode(enc, CodecOptions(unicode_decode_error_handler="junk")) self.assertEqual(dec, {"keystr": "foobar"}) - self.assertRaises(InvalidBSON, decode, invalid_both, CodecOptions( - unicode_decode_error_handler="junk")) + self.assertRaises( + InvalidBSON, decode, invalid_both, CodecOptions(unicode_decode_error_handler="junk") + ) def round_trip_pickle(self, obj, pickled_with_older): pickled_with_older_obj = pickle.loads(pickled_with_older) @@ -1043,61 +1046,75 @@ def round_trip_pickle(self, obj, pickled_with_older): def test_regex_pickling(self): reg = Regex(".?") - pickled_with_3 = (b'\x80\x04\x959\x00\x00\x00\x00\x00\x00\x00\x8c\n' - b'bson.regex\x94\x8c\x05Regex\x94\x93\x94)\x81\x94}' - b'\x94(\x8c\x07pattern\x94\x8c\x02.?\x94\x8c\x05flag' - b's\x94K\x00ub.') + pickled_with_3 = ( + b"\x80\x04\x959\x00\x00\x00\x00\x00\x00\x00\x8c\n" + b"bson.regex\x94\x8c\x05Regex\x94\x93\x94)\x81\x94}" + b"\x94(\x8c\x07pattern\x94\x8c\x02.?\x94\x8c\x05flag" + b"s\x94K\x00ub." + ) self.round_trip_pickle(reg, pickled_with_3) def test_timestamp_pickling(self): ts = Timestamp(0, 1) - pickled_with_3 = (b'\x80\x04\x95Q\x00\x00\x00\x00\x00\x00\x00\x8c' - b'\x0ebson.timestamp\x94\x8c\tTimestamp\x94\x93\x94)' - b'\x81\x94}\x94(' - b'\x8c\x10_Timestamp__time\x94K\x00\x8c' - b'\x0f_Timestamp__inc\x94K\x01ub.') + pickled_with_3 = ( + b"\x80\x04\x95Q\x00\x00\x00\x00\x00\x00\x00\x8c" + b"\x0ebson.timestamp\x94\x8c\tTimestamp\x94\x93\x94)" + b"\x81\x94}\x94(" + b"\x8c\x10_Timestamp__time\x94K\x00\x8c" + b"\x0f_Timestamp__inc\x94K\x01ub." + ) self.round_trip_pickle(ts, pickled_with_3) def test_dbref_pickling(self): dbr = DBRef("foo", 5) - pickled_with_3 = (b'\x80\x04\x95q\x00\x00\x00\x00\x00\x00\x00\x8c\n' - b'bson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}' - b'\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94' - b'\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database' - b'\x94N\x8c\x0e_DBRef__kwargs\x94}\x94ub.') + pickled_with_3 = ( + b"\x80\x04\x95q\x00\x00\x00\x00\x00\x00\x00\x8c\n" + b"bson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}" + b"\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94" + b"\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database" + b"\x94N\x8c\x0e_DBRef__kwargs\x94}\x94ub." + ) self.round_trip_pickle(dbr, pickled_with_3) - dbr = DBRef("foo", 5, database='db', kwargs1=None) - pickled_with_3 = (b'\x80\x04\x95\x81\x00\x00\x00\x00\x00\x00\x00\x8c' - b'\nbson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}' - b'\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94' - b'\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database' - b'\x94\x8c\x02db\x94\x8c\x0e_DBRef__kwargs\x94}\x94' - b'\x8c\x07kwargs1\x94Nsub.') + dbr = DBRef("foo", 5, database="db", kwargs1=None) + pickled_with_3 = ( + b"\x80\x04\x95\x81\x00\x00\x00\x00\x00\x00\x00\x8c" + b"\nbson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}" + b"\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94" + b"\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database" + b"\x94\x8c\x02db\x94\x8c\x0e_DBRef__kwargs\x94}\x94" + b"\x8c\x07kwargs1\x94Nsub." + ) self.round_trip_pickle(dbr, pickled_with_3) def test_minkey_pickling(self): mink = MinKey() - pickled_with_3 = (b'\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c' - b'\x0cbson.min_key\x94\x8c\x06MinKey\x94\x93\x94)' - b'\x81\x94.') + pickled_with_3 = ( + b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c" + b"\x0cbson.min_key\x94\x8c\x06MinKey\x94\x93\x94)" + b"\x81\x94." + ) self.round_trip_pickle(mink, pickled_with_3) def test_maxkey_pickling(self): maxk = MaxKey() - pickled_with_3 = (b'\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c' - b'\x0cbson.max_key\x94\x8c\x06MaxKey\x94\x93\x94)' - b'\x81\x94.') + pickled_with_3 = ( + b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c" + b"\x0cbson.max_key\x94\x8c\x06MaxKey\x94\x93\x94)" + b"\x81\x94." + ) self.round_trip_pickle(maxk, pickled_with_3) def test_int64_pickling(self): i64 = Int64(9) - pickled_with_3 = (b'\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c\n' - b'bson.int64\x94\x8c\x05Int64\x94\x93\x94K\t\x85\x94' - b'\x81\x94.') + pickled_with_3 = ( + b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c\n" + b"bson.int64\x94\x8c\x05Int64\x94\x93\x94K\t\x85\x94" + b"\x81\x94." + ) self.round_trip_pickle(i64, pickled_with_3) diff --git a/test/test_bson_corpus.py b/test/test_bson_corpus.py index cbb702e405..4a46276573 100644 --- a/test/test_bson_corpus.py +++ b/test/test_bson_corpus.py @@ -21,54 +21,52 @@ import json import os import sys - from decimal import DecimalException sys.path[0:0] = [""] +from test import unittest + from bson import decode, encode, json_util from bson.binary import STANDARD from bson.codec_options import CodecOptions -from bson.decimal128 import Decimal128 from bson.dbref import DBRef +from bson.decimal128 import Decimal128 from bson.errors import InvalidBSON, InvalidDocument, InvalidId from bson.json_util import JSONMode from bson.son import SON -from test import unittest - -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'bson_corpus') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "bson_corpus") _TESTS_TO_SKIP = { # Python cannot decode dates after year 9999. - 'Y10K', + "Y10K", } _NON_PARSE_ERRORS = { # {"$date": } is our legacy format which we still need to parse. - 'Bad $date (number, not string or hash)', + "Bad $date (number, not string or hash)", # This variant of $numberLong may have been generated by an old version # of mongoexport. - 'Bad $numberLong (number, not string)', + "Bad $numberLong (number, not string)", # Python's UUID constructor is very permissive. - '$uuid invalid value--misplaced hyphens', + "$uuid invalid value--misplaced hyphens", # We parse Regex flags with extra characters, including nulls. - 'Null byte in $regularExpression options', + "Null byte in $regularExpression options", } _IMPLCIT_LOSSY_TESTS = { # JSON decodes top-level $ref+$id as a DBRef but BSON doesn't. - 'Document with key names similar to those of a DBRef' + "Document with key names similar to those of a DBRef" } _DEPRECATED_BSON_TYPES = { # Symbol - '0x0E': str, + "0x0E": str, # Undefined - '0x06': type(None), + "0x06": type(None), # DBPointer - '0x0C': DBRef + "0x0C": DBRef, } @@ -78,27 +76,23 @@ # We normally encode UUID as binary subtype 0x03, # but we'll need to encode to subtype 0x04 for one of the tests. codec_options_uuid_04 = codec_options._replace(uuid_representation=STANDARD) -json_options_uuid_04 = json_util.JSONOptions(json_mode=JSONMode.CANONICAL, - uuid_representation=STANDARD) +json_options_uuid_04 = json_util.JSONOptions( + json_mode=JSONMode.CANONICAL, uuid_representation=STANDARD +) json_options_iso8601 = json_util.JSONOptions( - datetime_representation=json_util.DatetimeRepresentation.ISO8601, - json_mode=JSONMode.LEGACY) -to_extjson = functools.partial(json_util.dumps, - json_options=json_util.CANONICAL_JSON_OPTIONS) -to_extjson_uuid_04 = functools.partial(json_util.dumps, - json_options=json_options_uuid_04) -to_extjson_iso8601 = functools.partial(json_util.dumps, - json_options=json_options_iso8601) -to_relaxed_extjson = functools.partial( - json_util.dumps, json_options=json_util.RELAXED_JSON_OPTIONS) -to_bson_uuid_04 = functools.partial(encode, - codec_options=codec_options_uuid_04) + datetime_representation=json_util.DatetimeRepresentation.ISO8601, json_mode=JSONMode.LEGACY +) +to_extjson = functools.partial(json_util.dumps, json_options=json_util.CANONICAL_JSON_OPTIONS) +to_extjson_uuid_04 = functools.partial(json_util.dumps, json_options=json_options_uuid_04) +to_extjson_iso8601 = functools.partial(json_util.dumps, json_options=json_options_iso8601) +to_relaxed_extjson = functools.partial(json_util.dumps, json_options=json_util.RELAXED_JSON_OPTIONS) +to_bson_uuid_04 = functools.partial(encode, codec_options=codec_options_uuid_04) to_bson = functools.partial(encode, codec_options=codec_options) decode_bson = functools.partial(decode, codec_options=codec_options_no_tzaware) decode_extjson = functools.partial( json_util.loads, - json_options=json_util.JSONOptions(json_mode=JSONMode.CANONICAL, - document_class=SON)) + json_options=json_util.JSONOptions(json_mode=JSONMode.CANONICAL, document_class=SON), +) loads = functools.partial(json.loads, object_pairs_hook=SON) @@ -113,65 +107,62 @@ def assertJsonEqual(self, first, second, msg=None): def create_test(case_spec): - bson_type = case_spec['bson_type'] + bson_type = case_spec["bson_type"] # Test key is absent when testing top-level documents. - test_key = case_spec.get('test_key') - deprecated = case_spec.get('deprecated') + test_key = case_spec.get("test_key") + deprecated = case_spec.get("deprecated") def run_test(self): - for valid_case in case_spec.get('valid', []): - description = valid_case['description'] + for valid_case in case_spec.get("valid", []): + description = valid_case["description"] if description in _TESTS_TO_SKIP: continue # Special case for testing encoding UUID as binary subtype 0x04. - if description.startswith('subtype 0x04'): + if description.startswith("subtype 0x04"): encode_extjson = to_extjson_uuid_04 encode_bson = to_bson_uuid_04 else: encode_extjson = to_extjson encode_bson = to_bson - cB = binascii.unhexlify(valid_case['canonical_bson'].encode('utf8')) - cEJ = valid_case['canonical_extjson'] - rEJ = valid_case.get('relaxed_extjson') - dEJ = valid_case.get('degenerate_extjson') + cB = binascii.unhexlify(valid_case["canonical_bson"].encode("utf8")) + cEJ = valid_case["canonical_extjson"] + rEJ = valid_case.get("relaxed_extjson") + dEJ = valid_case.get("degenerate_extjson") if description in _IMPLCIT_LOSSY_TESTS: - valid_case.setdefault('lossy', True) - lossy = valid_case.get('lossy') + valid_case.setdefault("lossy", True) + lossy = valid_case.get("lossy") # BSON double, use lowercase 'e+' to match Python's encoding - if bson_type == '0x01': - cEJ = cEJ.replace('E+', 'e+') + if bson_type == "0x01": + cEJ = cEJ.replace("E+", "e+") decoded_bson = decode_bson(cB) if not lossy: # Make sure we can parse the legacy (default) JSON format. legacy_json = json_util.dumps( - decoded_bson, json_options=json_util.LEGACY_JSON_OPTIONS) - self.assertEqual( - decode_extjson(legacy_json), decoded_bson, description) + decoded_bson, json_options=json_util.LEGACY_JSON_OPTIONS + ) + self.assertEqual(decode_extjson(legacy_json), decoded_bson, description) if deprecated: - if 'converted_bson' in valid_case: - converted_bson = binascii.unhexlify( - valid_case['converted_bson'].encode('utf8')) + if "converted_bson" in valid_case: + converted_bson = binascii.unhexlify(valid_case["converted_bson"].encode("utf8")) self.assertEqual(encode_bson(decoded_bson), converted_bson) self.assertJsonEqual( - encode_extjson(decode_bson(converted_bson)), - valid_case['converted_extjson']) + encode_extjson(decode_bson(converted_bson)), valid_case["converted_extjson"] + ) # Make sure we can decode the type. self.assertEqual(decoded_bson, decode_extjson(cEJ)) if test_key is not None: - self.assertIsInstance(decoded_bson[test_key], - _DEPRECATED_BSON_TYPES[bson_type]) + self.assertIsInstance(decoded_bson[test_key], _DEPRECATED_BSON_TYPES[bson_type]) continue # Jython can't handle NaN with a payload from # struct.(un)pack if endianness is specified in the format string. - if not (sys.platform.startswith("java") and - description == 'NaN with payload'): + if not (sys.platform.startswith("java") and description == "NaN with payload"): # Test round-tripping canonical bson. self.assertEqual(encode_bson(decoded_bson), cB, description) self.assertJsonEqual(encode_extjson(decoded_bson), cEJ) @@ -183,8 +174,8 @@ def run_test(self): self.assertEqual(encode_bson(decoded_json), cB) # Test round-tripping degenerate bson. - if 'degenerate_bson' in valid_case: - dB = binascii.unhexlify(valid_case['degenerate_bson'].encode('utf8')) + if "degenerate_bson" in valid_case: + dB = binascii.unhexlify(valid_case["degenerate_bson"].encode("utf8")) self.assertEqual(encode_bson(decode_bson(dB)), cB) # Test round-tripping degenerate extended json. @@ -200,53 +191,48 @@ def run_test(self): decoded_json = decode_extjson(rEJ) self.assertJsonEqual(to_relaxed_extjson(decoded_json), rEJ) - for decode_error_case in case_spec.get('decodeErrors', []): + for decode_error_case in case_spec.get("decodeErrors", []): with self.assertRaises(InvalidBSON): - decode_bson( - binascii.unhexlify(decode_error_case['bson'].encode('utf8'))) + decode_bson(binascii.unhexlify(decode_error_case["bson"].encode("utf8"))) - for parse_error_case in case_spec.get('parseErrors', []): - description = parse_error_case['description'] + for parse_error_case in case_spec.get("parseErrors", []): + description = parse_error_case["description"] if description in _NON_PARSE_ERRORS: - decode_extjson(parse_error_case['string']) + decode_extjson(parse_error_case["string"]) continue - if bson_type == '0x13': - self.assertRaises( - DecimalException, Decimal128, parse_error_case['string']) - elif bson_type == '0x00': + if bson_type == "0x13": + self.assertRaises(DecimalException, Decimal128, parse_error_case["string"]) + elif bson_type == "0x00": try: - doc = decode_extjson(parse_error_case['string']) + doc = decode_extjson(parse_error_case["string"]) # Null bytes are validated when encoding to BSON. - if 'Null' in description: + if "Null" in description: to_bson(doc) - raise AssertionError('exception not raised for test ' - 'case: ' + description) - except (ValueError, KeyError, TypeError, InvalidId, - InvalidDocument): + raise AssertionError("exception not raised for test " "case: " + description) + except (ValueError, KeyError, TypeError, InvalidId, InvalidDocument): pass - elif bson_type == '0x05': + elif bson_type == "0x05": try: - decode_extjson(parse_error_case['string']) - raise AssertionError('exception not raised for test ' - 'case: ' + description) + decode_extjson(parse_error_case["string"]) + raise AssertionError("exception not raised for test " "case: " + description) except (TypeError, ValueError): pass else: - raise AssertionError('cannot test parseErrors for type ' + - bson_type) + raise AssertionError("cannot test parseErrors for type " + bson_type) + return run_test def create_tests(): - for filename in glob.glob(os.path.join(_TEST_PATH, '*.json')): + for filename in glob.glob(os.path.join(_TEST_PATH, "*.json")): test_suffix, _ = os.path.splitext(os.path.basename(filename)) - with codecs.open(filename, encoding='utf-8') as bson_test_file: + with codecs.open(filename, encoding="utf-8") as bson_test_file: test_method = create_test(json.load(bson_test_file)) - setattr(TestBSONCorpus, 'test_' + test_suffix, test_method) + setattr(TestBSONCorpus, "test_" + test_suffix, test_method) create_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_bulk.py b/test/test_bulk.py index a895dfddc3..fae1c7e201 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -21,24 +21,27 @@ sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import ( + remove_all_users, + rs_or_single_client_noauth, + single_client, + wait_until, +) + from bson.binary import Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.objectid import ObjectId from pymongo.collection import Collection from pymongo.common import partition_node -from pymongo.errors import (BulkWriteError, - ConfigurationError, - InvalidOperation, - OperationFailure) +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + InvalidOperation, + OperationFailure, +) from pymongo.operations import * from pymongo.write_concern import WriteConcern -from test import (client_context, - unittest, - IntegrationTest) -from test.utils import (remove_all_users, - rs_or_single_client_noauth, - single_client, - wait_until) class BulkTestBase(IntegrationTest): @@ -58,87 +61,91 @@ def setUp(self): def assertEqualResponse(self, expected, actual): """Compare response from bulk.execute() to expected response.""" for key, value in expected.items(): - if key == 'nModified': - self.assertEqual(value, actual['nModified']) - elif key == 'upserted': + if key == "nModified": + self.assertEqual(value, actual["nModified"]) + elif key == "upserted": expected_upserts = value - actual_upserts = actual['upserted'] + actual_upserts = actual["upserted"] self.assertEqual( - len(expected_upserts), len(actual_upserts), - 'Expected %d elements in "upserted", got %d' % ( - len(expected_upserts), len(actual_upserts))) + len(expected_upserts), + len(actual_upserts), + 'Expected %d elements in "upserted", got %d' + % (len(expected_upserts), len(actual_upserts)), + ) for e, a in zip(expected_upserts, actual_upserts): self.assertEqualUpsert(e, a) - elif key == 'writeErrors': + elif key == "writeErrors": expected_errors = value - actual_errors = actual['writeErrors'] + actual_errors = actual["writeErrors"] self.assertEqual( - len(expected_errors), len(actual_errors), - 'Expected %d elements in "writeErrors", got %d' % ( - len(expected_errors), len(actual_errors))) + len(expected_errors), + len(actual_errors), + 'Expected %d elements in "writeErrors", got %d' + % (len(expected_errors), len(actual_errors)), + ) for e, a in zip(expected_errors, actual_errors): self.assertEqualWriteError(e, a) else: self.assertEqual( - actual.get(key), value, - '%r value of %r does not match expected %r' % - (key, actual.get(key), value)) + actual.get(key), + value, + "%r value of %r does not match expected %r" % (key, actual.get(key), value), + ) def assertEqualUpsert(self, expected, actual): """Compare bulk.execute()['upserts'] to expected value. Like: {'index': 0, '_id': ObjectId()} """ - self.assertEqual(expected['index'], actual['index']) - if expected['_id'] == '...': + self.assertEqual(expected["index"], actual["index"]) + if expected["_id"] == "...": # Unspecified value. - self.assertTrue('_id' in actual) + self.assertTrue("_id" in actual) else: - self.assertEqual(expected['_id'], actual['_id']) + self.assertEqual(expected["_id"], actual["_id"]) def assertEqualWriteError(self, expected, actual): """Compare bulk.execute()['writeErrors'] to expected value. Like: {'index': 0, 'code': 123, 'errmsg': '...', 'op': { ... }} """ - self.assertEqual(expected['index'], actual['index']) - self.assertEqual(expected['code'], actual['code']) - if expected['errmsg'] == '...': + self.assertEqual(expected["index"], actual["index"]) + self.assertEqual(expected["code"], actual["code"]) + if expected["errmsg"] == "...": # Unspecified value. - self.assertTrue('errmsg' in actual) + self.assertTrue("errmsg" in actual) else: - self.assertEqual(expected['errmsg'], actual['errmsg']) + self.assertEqual(expected["errmsg"], actual["errmsg"]) - expected_op = expected['op'].copy() - actual_op = actual['op'].copy() - if expected_op.get('_id') == '...': + expected_op = expected["op"].copy() + actual_op = actual["op"].copy() + if expected_op.get("_id") == "...": # Unspecified _id. - self.assertTrue('_id' in actual_op) - actual_op.pop('_id') - expected_op.pop('_id') + self.assertTrue("_id" in actual_op) + actual_op.pop("_id") + expected_op.pop("_id") self.assertEqual(expected_op, actual_op) class TestBulk(BulkTestBase): - def test_empty(self): self.assertRaises(InvalidOperation, self.coll.bulk_write, []) def test_insert(self): expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } result = self.coll.bulk_write([InsertOne({})]) @@ -149,14 +156,14 @@ def test_insert(self): def _test_update_many(self, update): expected = { - 'nMatched': 2, - 'nModified': 2, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 2, + "nModified": 2, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } self.coll.insert_many([{}, {}]) @@ -166,11 +173,11 @@ def _test_update_many(self, update): self.assertTrue(result.modified_count in (2, None)) def test_update_many(self): - self._test_update_many({'$set': {'foo': 'bar'}}) + self._test_update_many({"$set": {"foo": "bar"}}) @client_context.require_version_min(4, 1, 11) def test_update_many_pipeline(self): - self._test_update_many([{'$set': {'foo': 'bar'}}]) + self._test_update_many([{"$set": {"foo": "bar"}}]) def test_array_filters_validation(self): self.assertRaises(TypeError, UpdateMany, {}, {}, array_filters={}) @@ -178,23 +185,21 @@ def test_array_filters_validation(self): def test_array_filters_unacknowledged(self): coll = self.coll_w0 - update_one = UpdateOne( - {}, {'$set': {'y.$[i].b': 5}}, array_filters=[{'i.b': 1}]) - update_many = UpdateMany( - {}, {'$set': {'y.$[i].b': 5}}, array_filters=[{'i.b': 1}]) + update_one = UpdateOne({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + update_many = UpdateMany({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) self.assertRaises(ConfigurationError, coll.bulk_write, [update_one]) self.assertRaises(ConfigurationError, coll.bulk_write, [update_many]) def _test_update_one(self, update): expected = { - 'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 1, + "nModified": 1, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } self.coll.insert_many([{}, {}]) @@ -205,28 +210,28 @@ def _test_update_one(self, update): self.assertTrue(result.modified_count in (1, None)) def test_update_one(self): - self._test_update_one({'$set': {'foo': 'bar'}}) + self._test_update_one({"$set": {"foo": "bar"}}) @client_context.require_version_min(4, 1, 11) def test_update_one_pipeline(self): - self._test_update_one([{'$set': {'foo': 'bar'}}]) + self._test_update_one([{"$set": {"foo": "bar"}}]) def test_replace_one(self): expected = { - 'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 1, + "nModified": 1, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } self.coll.insert_many([{}, {}]) - result = self.coll.bulk_write([ReplaceOne({}, {'foo': 'bar'})]) + result = self.coll.bulk_write([ReplaceOne({}, {"foo": "bar"})]) self.assertEqualResponse(expected, result.bulk_api_result) self.assertEqual(1, result.matched_count) self.assertTrue(result.modified_count in (1, None)) @@ -234,14 +239,14 @@ def test_replace_one(self): def test_remove(self): # Test removing all documents, ordered. expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 2, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 2, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } self.coll.insert_many([{}, {}]) @@ -253,14 +258,14 @@ def test_remove_one(self): # Test removing one document, empty selector. self.coll.insert_many([{}, {}]) expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 1, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 1, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } result = self.coll.bulk_write([DeleteOne({})]) @@ -271,24 +276,22 @@ def test_remove_one(self): def test_upsert(self): expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 1, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': '...'}] + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 0, + "nRemoved": 0, + "upserted": [{"index": 0, "_id": "..."}], } - result = self.coll.bulk_write([ReplaceOne({}, - {'foo': 'bar'}, - upsert=True)]) + result = self.coll.bulk_write([ReplaceOne({}, {"foo": "bar"}, upsert=True)]) self.assertEqualResponse(expected, result.bulk_api_result) self.assertEqual(1, result.upserted_count) assert result.upserted_ids is not None self.assertEqual(1, len(result.upserted_ids)) self.assertTrue(isinstance(result.upserted_ids.get(0), ObjectId)) - self.assertEqual(self.coll.count_documents({'foo': 'bar'}), 1) + self.assertEqual(self.coll.count_documents({"foo": "bar"}), 1) def test_numerous_inserts(self): # Ensure we don't exceed server's maxWriteBatchSize size limit. @@ -311,23 +314,23 @@ def test_bulk_max_message_size(self): # Generate a list of documents such that the first batched OP_MSG is # as close as possible to the 48MB limit. docs = [ - {'_id': 1, 'l': 's' * _16_MB}, - {'_id': 2, 'l': 's' * _16_MB}, - {'_id': 3, 'l': 's' * (_16_MB - 10000)}, + {"_id": 1, "l": "s" * _16_MB}, + {"_id": 2, "l": "s" * _16_MB}, + {"_id": 3, "l": "s" * (_16_MB - 10000)}, ] # Fill in the remaining ~10000 bytes with small documents. for i in range(4, 10000): - docs.append({'_id': i}) + docs.append({"_id": i}) result = self.coll.insert_many(docs) self.assertEqual(len(docs), len(result.inserted_ids)) def test_generator_insert(self): def gen(): - yield {'a': 1, 'b': 1} - yield {'a': 1, 'b': 2} - yield {'a': 2, 'b': 3} - yield {'a': 3, 'b': 5} - yield {'a': 5, 'b': 8} + yield {"a": 1, "b": 1} + yield {"a": 1, "b": 2} + yield {"a": 2, "b": 3} + yield {"a": 3, "b": 5} + yield {"a": 5, "b": 8} result = self.coll.insert_many(gen()) self.assertEqual(5, len(result.inserted_ids)) @@ -353,134 +356,166 @@ def test_bulk_write_invalid_arguments(self): self.coll.bulk_write([{}]) # type: ignore[list-item] def test_upsert_large(self): - big = 'a' * (client_context.max_bson_size - 37) - result = self.coll.bulk_write([ - UpdateOne({'x': 1}, {'$set': {'s': big}}, upsert=True)]) + big = "a" * (client_context.max_bson_size - 37) + result = self.coll.bulk_write([UpdateOne({"x": 1}, {"$set": {"s": big}}, upsert=True)]) self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 1, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': '...'}]}, - result.bulk_api_result) - - self.assertEqual(1, self.coll.count_documents({'x': 1})) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 0, + "nRemoved": 0, + "upserted": [{"index": 0, "_id": "..."}], + }, + result.bulk_api_result, + ) + + self.assertEqual(1, self.coll.count_documents({"x": 1})) def test_client_generated_upsert_id(self): - result = self.coll.bulk_write([ - UpdateOne({'_id': 0}, {'$set': {'a': 0}}, upsert=True), - ReplaceOne({'a': 1}, {'_id': 1}, upsert=True), - # This is just here to make the counts right in all cases. - ReplaceOne({'_id': 2}, {'_id': 2}, upsert=True), - ]) + result = self.coll.bulk_write( + [ + UpdateOne({"_id": 0}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": 1}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": 2}, {"_id": 2}, upsert=True), + ] + ) self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 3, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': 0}, - {'index': 1, '_id': 1}, - {'index': 2, '_id': 2}]}, - result.bulk_api_result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": 0}, + {"index": 1, "_id": 1}, + {"index": 2, "_id": 2}, + ], + }, + result.bulk_api_result, + ) def test_upsert_uuid_standard(self): options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) coll = self.coll.with_options(codec_options=options) uuids = [uuid.uuid4() for _ in range(3)] - result = coll.bulk_write([ - UpdateOne({'_id': uuids[0]}, {'$set': {'a': 0}}, upsert=True), - ReplaceOne({'a': 1}, {'_id': uuids[1]}, upsert=True), - # This is just here to make the counts right in all cases. - ReplaceOne({'_id': uuids[2]}, {'_id': uuids[2]}, upsert=True), - ]) + result = coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 3, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': uuids[0]}, - {'index': 1, '_id': uuids[1]}, - {'index': 2, '_id': uuids[2]}]}, - result.bulk_api_result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) def test_upsert_uuid_unspecified(self): options = CodecOptions(uuid_representation=UuidRepresentation.UNSPECIFIED) coll = self.coll.with_options(codec_options=options) uuids = [Binary.from_uuid(uuid.uuid4()) for _ in range(3)] - result = coll.bulk_write([ - UpdateOne({'_id': uuids[0]}, {'$set': {'a': 0}}, upsert=True), - ReplaceOne({'a': 1}, {'_id': uuids[1]}, upsert=True), - # This is just here to make the counts right in all cases. - ReplaceOne({'_id': uuids[2]}, {'_id': uuids[2]}, upsert=True), - ]) + result = coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 3, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': uuids[0]}, - {'index': 1, '_id': uuids[1]}, - {'index': 2, '_id': uuids[2]}]}, - result.bulk_api_result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) def test_upsert_uuid_standard_subdocuments(self): options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) coll = self.coll.with_options(codec_options=options) - ids: list = [ - {'f': Binary(bytes(i)), 'f2': uuid.uuid4()} - for i in range(3) - ] + ids: list = [{"f": Binary(bytes(i)), "f2": uuid.uuid4()} for i in range(3)] - result = coll.bulk_write([ - UpdateOne({'_id': ids[0]}, {'$set': {'a': 0}}, upsert=True), - ReplaceOne({'a': 1}, {'_id': ids[1]}, upsert=True), - # This is just here to make the counts right in all cases. - ReplaceOne({'_id': ids[2]}, {'_id': ids[2]}, upsert=True), - ]) + result = coll.bulk_write( + [ + UpdateOne({"_id": ids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": ids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": ids[2]}, {"_id": ids[2]}, upsert=True), + ] + ) # The `Binary` values are returned as `bytes` objects. for _id in ids: - _id['f'] = bytes(_id['f']) + _id["f"] = bytes(_id["f"]) self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 3, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': ids[0]}, - {'index': 1, '_id': ids[1]}, - {'index': 2, '_id': ids[2]}]}, - result.bulk_api_result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": ids[0]}, + {"index": 1, "_id": ids[1]}, + {"index": 2, "_id": ids[2]}, + ], + }, + result.bulk_api_result, + ) def test_single_ordered_batch(self): - result = self.coll.bulk_write([ - InsertOne({'a': 1}), - UpdateOne({'a': 1}, {'$set': {'b': 1}}), - UpdateOne({'a': 2}, {'$set': {'b': 2}}, upsert=True), - InsertOne({'a': 3}), - DeleteOne({'a': 3}), - ]) + result = self.coll.bulk_write( + [ + InsertOne({"a": 1}), + UpdateOne({"a": 1}, {"$set": {"b": 1}}), + UpdateOne({"a": 2}, {"$set": {"b": 2}}, upsert=True), + InsertOne({"a": 3}), + DeleteOne({"a": 3}), + ] + ) self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 1, - 'nInserted': 2, - 'nRemoved': 1, - 'upserted': [{'index': 2, '_id': '...'}]}, - result.bulk_api_result) + { + "nMatched": 1, + "nModified": 1, + "nUpserted": 1, + "nInserted": 2, + "nRemoved": 1, + "upserted": [{"index": 2, "_id": "..."}], + }, + result.bulk_api_result, + ) def test_single_error_ordered_batch(self): - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) requests: list = [ - InsertOne({'b': 1, 'a': 1}), - UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), - InsertOne({'b': 3, 'a': 2}), + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 3, "a": 2}), ] try: self.coll.bulk_write(requests) @@ -491,33 +526,41 @@ def test_single_error_ordered_batch(self): self.fail("Error not raised") self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 1, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 2}, - 'u': {'$set': {'a': 1}}, - 'multi': False, - 'upsert': True}}]}, - result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) def test_multiple_error_ordered_batch(self): - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) requests: list = [ - InsertOne({'b': 1, 'a': 1}), - UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), - UpdateOne({'b': 3}, {'$set': {'a': 2}}, upsert=True), - UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), - InsertOne({'b': 4, 'a': 3}), - InsertOne({'b': 5, 'a': 1}), + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + UpdateOne({"b": 3}, {"$set": {"a": 2}}, upsert=True), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 4, "a": 3}), + InsertOne({"b": 5, "a": 1}), ] try: @@ -529,50 +572,61 @@ def test_multiple_error_ordered_batch(self): self.fail("Error not raised") self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 1, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 2}, - 'u': {'$set': {'a': 1}}, - 'multi': False, - 'upsert': True}}]}, - result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) def test_single_unordered_batch(self): requests: list = [ - InsertOne({'a': 1}), - UpdateOne({'a': 1}, {'$set': {'b': 1}}), - UpdateOne({'a': 2}, {'$set': {'b': 2}}, upsert=True), - InsertOne({'a': 3}), - DeleteOne({'a': 3}), + InsertOne({"a": 1}), + UpdateOne({"a": 1}, {"$set": {"b": 1}}), + UpdateOne({"a": 2}, {"$set": {"b": 2}}, upsert=True), + InsertOne({"a": 3}), + DeleteOne({"a": 3}), ] result = self.coll.bulk_write(requests, ordered=False) self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 1, - 'nInserted': 2, - 'nRemoved': 1, - 'upserted': [{'index': 2, '_id': '...'}], - 'writeErrors': [], - 'writeConcernErrors': []}, - result.bulk_api_result) + { + "nMatched": 1, + "nModified": 1, + "nUpserted": 1, + "nInserted": 2, + "nRemoved": 1, + "upserted": [{"index": 2, "_id": "..."}], + "writeErrors": [], + "writeConcernErrors": [], + }, + result.bulk_api_result, + ) def test_single_error_unordered_batch(self): - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) requests: list = [ - InsertOne({'b': 1, 'a': 1}), - UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), - InsertOne({'b': 3, 'a': 2}), + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 3, "a": 2}), ] try: @@ -584,33 +638,41 @@ def test_single_error_unordered_batch(self): self.fail("Error not raised") self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 2, - 'nRemoved': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 1, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 2}, - 'u': {'$set': {'a': 1}}, - 'multi': False, - 'upsert': True}}]}, - result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 2, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) def test_multiple_error_unordered_batch(self): - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) requests: list = [ - InsertOne({'b': 1, 'a': 1}), - UpdateOne({'b': 2}, {'$set': {'a': 3}}, upsert=True), - UpdateOne({'b': 3}, {'$set': {'a': 4}}, upsert=True), - UpdateOne({'b': 4}, {'$set': {'a': 3}}, upsert=True), - InsertOne({'b': 5, 'a': 2}), - InsertOne({'b': 6, 'a': 1}), + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 3}}, upsert=True), + UpdateOne({"b": 3}, {"$set": {"a": 4}}, upsert=True), + UpdateOne({"b": 4}, {"$set": {"a": 3}}, upsert=True), + InsertOne({"b": 5, "a": 2}), + InsertOne({"b": 6, "a": 1}), ] try: @@ -623,35 +685,43 @@ def test_multiple_error_unordered_batch(self): # Assume the update at index 1 runs before the update at index 3, # although the spec does not require it. Same for inserts. self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 2, - 'nInserted': 2, - 'nRemoved': 0, - 'upserted': [ - {'index': 1, '_id': '...'}, - {'index': 2, '_id': '...'}], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 3, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 4}, - 'u': {'$set': {'a': 3}}, - 'multi': False, - 'upsert': True}}, - {'index': 5, - 'code': 11000, - 'errmsg': '...', - 'op': {'_id': '...', 'b': 6, 'a': 1}}]}, - result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 2, + "nInserted": 2, + "nRemoved": 0, + "upserted": [{"index": 1, "_id": "..."}, {"index": 2, "_id": "..."}], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 3, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 4}, + "u": {"$set": {"a": 3}}, + "multi": False, + "upsert": True, + }, + }, + { + "index": 5, + "code": 11000, + "errmsg": "...", + "op": {"_id": "...", "b": 6, "a": 1}, + }, + ], + }, + result, + ) def test_large_inserts_ordered(self): - big = 'x' * client_context.max_bson_size + big = "x" * client_context.max_bson_size requests = [ - InsertOne({'b': 1, 'a': 1}), - InsertOne({'big': big}), - InsertOne({'b': 2, 'a': 2}), + InsertOne({"b": 1, "a": 1}), + InsertOne({"big": big}), + InsertOne({"b": 2, "a": 2}), ] try: @@ -662,29 +732,31 @@ def test_large_inserts_ordered(self): else: self.fail("Error not raised") - self.assertEqual(1, result['nInserted']) + self.assertEqual(1, result["nInserted"]) self.coll.delete_many({}) - big = 'x' * (1024 * 1024 * 4) - write_result = self.coll.bulk_write([ - InsertOne({'a': 1, 'big': big}), - InsertOne({'a': 2, 'big': big}), - InsertOne({'a': 3, 'big': big}), - InsertOne({'a': 4, 'big': big}), - InsertOne({'a': 5, 'big': big}), - InsertOne({'a': 6, 'big': big}), - ]) + big = "x" * (1024 * 1024 * 4) + write_result = self.coll.bulk_write( + [ + InsertOne({"a": 1, "big": big}), + InsertOne({"a": 2, "big": big}), + InsertOne({"a": 3, "big": big}), + InsertOne({"a": 4, "big": big}), + InsertOne({"a": 5, "big": big}), + InsertOne({"a": 6, "big": big}), + ] + ) self.assertEqual(6, write_result.inserted_count) self.assertEqual(6, self.coll.count_documents({})) def test_large_inserts_unordered(self): - big = 'x' * client_context.max_bson_size + big = "x" * client_context.max_bson_size requests = [ - InsertOne({'b': 1, 'a': 1}), - InsertOne({'big': big}), - InsertOne({'b': 2, 'a': 2}), + InsertOne({"b": 1, "a": 1}), + InsertOne({"big": big}), + InsertOne({"b": 2, "a": 2}), ] try: @@ -695,26 +767,28 @@ def test_large_inserts_unordered(self): else: self.fail("Error not raised") - self.assertEqual(2, details['nInserted']) + self.assertEqual(2, details["nInserted"]) self.coll.delete_many({}) - big = 'x' * (1024 * 1024 * 4) - result = self.coll.bulk_write([ - InsertOne({'a': 1, 'big': big}), - InsertOne({'a': 2, 'big': big}), - InsertOne({'a': 3, 'big': big}), - InsertOne({'a': 4, 'big': big}), - InsertOne({'a': 5, 'big': big}), - InsertOne({'a': 6, 'big': big}), - ], ordered=False) + big = "x" * (1024 * 1024 * 4) + result = self.coll.bulk_write( + [ + InsertOne({"a": 1, "big": big}), + InsertOne({"a": 2, "big": big}), + InsertOne({"a": 3, "big": big}), + InsertOne({"a": 4, "big": big}), + InsertOne({"a": 5, "big": big}), + InsertOne({"a": 6, "big": big}), + ], + ordered=False, + ) self.assertEqual(6, result.inserted_count) self.assertEqual(6, self.coll.count_documents({})) class BulkAuthorizationTestBase(BulkTestBase): - @classmethod @client_context.require_auth @client_context.require_no_api_version @@ -723,117 +797,112 @@ def setUpClass(cls): def setUp(self): super(BulkAuthorizationTestBase, self).setUp() - client_context.create_user( - self.db.name, 'readonly', 'pw', ['read']) + client_context.create_user(self.db.name, "readonly", "pw", ["read"]) self.db.command( - 'createRole', 'noremove', - privileges=[{ - 'actions': ['insert', 'update', 'find'], - 'resource': {'db': 'pymongo_test', 'collection': 'test'} - }], - roles=[]) - - client_context.create_user(self.db.name, 'noremove', 'pw', ['noremove']) + "createRole", + "noremove", + privileges=[ + { + "actions": ["insert", "update", "find"], + "resource": {"db": "pymongo_test", "collection": "test"}, + } + ], + roles=[], + ) + + client_context.create_user(self.db.name, "noremove", "pw", ["noremove"]) def tearDown(self): - self.db.command('dropRole', 'noremove') + self.db.command("dropRole", "noremove") remove_all_users(self.db) class TestBulkUnacknowledged(BulkTestBase): - def tearDown(self): self.coll.delete_many({}) def test_no_results_ordered_success(self): requests: list = [ - InsertOne({'a': 1}), - UpdateOne({'a': 3}, {'$set': {'b': 1}}, upsert=True), - InsertOne({'a': 2}), - DeleteOne({'a': 1}), + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"a": 2}), + DeleteOne({"a": 1}), ] result = self.coll_w0.bulk_write(requests) self.assertFalse(result.acknowledged) - wait_until(lambda: 2 == self.coll.count_documents({}), - 'insert 2 documents') - wait_until(lambda: self.coll.find_one({'_id': 1}) is None, - 'removed {"_id": 1}') + wait_until(lambda: 2 == self.coll.count_documents({}), "insert 2 documents") + wait_until(lambda: self.coll.find_one({"_id": 1}) is None, 'removed {"_id": 1}') def test_no_results_ordered_failure(self): requests: list = [ - InsertOne({'_id': 1}), - UpdateOne({'_id': 3}, {'$set': {'b': 1}}, upsert=True), - InsertOne({'_id': 2}), + InsertOne({"_id": 1}), + UpdateOne({"_id": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"_id": 2}), # Fails with duplicate key error. - InsertOne({'_id': 1}), + InsertOne({"_id": 1}), # Should not be executed since the batch is ordered. - DeleteOne({'_id': 1}), + DeleteOne({"_id": 1}), ] result = self.coll_w0.bulk_write(requests) self.assertFalse(result.acknowledged) - wait_until(lambda: 3 == self.coll.count_documents({}), - 'insert 3 documents') - self.assertEqual({'_id': 1}, self.coll.find_one({'_id': 1})) + wait_until(lambda: 3 == self.coll.count_documents({}), "insert 3 documents") + self.assertEqual({"_id": 1}, self.coll.find_one({"_id": 1})) def test_no_results_unordered_success(self): requests: list = [ - InsertOne({'a': 1}), - UpdateOne({'a': 3}, {'$set': {'b': 1}}, upsert=True), - InsertOne({'a': 2}), - DeleteOne({'a': 1}), + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"a": 2}), + DeleteOne({"a": 1}), ] result = self.coll_w0.bulk_write(requests, ordered=False) self.assertFalse(result.acknowledged) - wait_until(lambda: 2 == self.coll.count_documents({}), - 'insert 2 documents') - wait_until(lambda: self.coll.find_one({'_id': 1}) is None, - 'removed {"_id": 1}') + wait_until(lambda: 2 == self.coll.count_documents({}), "insert 2 documents") + wait_until(lambda: self.coll.find_one({"_id": 1}) is None, 'removed {"_id": 1}') def test_no_results_unordered_failure(self): requests: list = [ - InsertOne({'_id': 1}), - UpdateOne({'_id': 3}, {'$set': {'b': 1}}, upsert=True), - InsertOne({'_id': 2}), + InsertOne({"_id": 1}), + UpdateOne({"_id": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"_id": 2}), # Fails with duplicate key error. - InsertOne({'_id': 1}), + InsertOne({"_id": 1}), # Should be executed since the batch is unordered. - DeleteOne({'_id': 1}), + DeleteOne({"_id": 1}), ] result = self.coll_w0.bulk_write(requests, ordered=False) self.assertFalse(result.acknowledged) - wait_until(lambda: 2 == self.coll.count_documents({}), - 'insert 2 documents') - wait_until(lambda: self.coll.find_one({'_id': 1}) is None, - 'removed {"_id": 1}') + wait_until(lambda: 2 == self.coll.count_documents({}), "insert 2 documents") + wait_until(lambda: self.coll.find_one({"_id": 1}) is None, 'removed {"_id": 1}') class TestBulkAuthorization(BulkAuthorizationTestBase): - def test_readonly(self): # We test that an authorization failure aborts the batch and is raised # as OperationFailure. - cli = rs_or_single_client_noauth(username='readonly', password='pw', - authSource='pymongo_test') + cli = rs_or_single_client_noauth( + username="readonly", password="pw", authSource="pymongo_test" + ) coll = cli.pymongo_test.test coll.find_one() - self.assertRaises(OperationFailure, coll.bulk_write, - [InsertOne({'x': 1})]) + self.assertRaises(OperationFailure, coll.bulk_write, [InsertOne({"x": 1})]) def test_no_remove(self): # We test that an authorization failure aborts the batch and is raised # as OperationFailure. - cli = rs_or_single_client_noauth(username='noremove', password='pw', - authSource='pymongo_test') + cli = rs_or_single_client_noauth( + username="noremove", password="pw", authSource="pymongo_test" + ) coll = cli.pymongo_test.test coll.find_one() requests = [ - InsertOne({'x': 1}), - ReplaceOne({'x': 2}, {'x': 2}, upsert=True), - DeleteMany({}), # Prohibited. - InsertOne({'x': 3}), # Never attempted. + InsertOne({"x": 1}), + ReplaceOne({"x": 2}, {"x": 2}, upsert=True), + DeleteMany({}), # Prohibited. + InsertOne({"x": 3}), # Never attempted. ] self.assertRaises(OperationFailure, coll.bulk_write, requests) - self.assertEqual(set([1, 2]), set(self.coll.distinct('x'))) + self.assertEqual(set([1, 2]), set(self.coll.distinct("x"))) class TestBulkWriteConcern(BulkTestBase): @@ -846,8 +915,8 @@ def setUpClass(cls): cls.w = client_context.w cls.secondary = None if cls.w is not None and cls.w > 1: - for member in client_context.hello['hosts']: - if member != client_context.hello['primary']: + for member in client_context.hello["hosts"]: + if member != client_context.hello["primary"]: cls.secondary = single_client(*partition_node(member)) break @@ -862,32 +931,23 @@ def cause_wtimeout(self, requests, ordered): # Use the rsSyncApplyStop failpoint to pause replication on a # secondary which will cause a wtimeout error. - self.secondary.admin.command('configureFailPoint', - 'rsSyncApplyStop', - mode='alwaysOn') + self.secondary.admin.command("configureFailPoint", "rsSyncApplyStop", mode="alwaysOn") try: - coll = self.coll.with_options( - write_concern=WriteConcern(w=self.w, wtimeout=1)) + coll = self.coll.with_options(write_concern=WriteConcern(w=self.w, wtimeout=1)) return coll.bulk_write(requests, ordered=ordered) finally: - self.secondary.admin.command('configureFailPoint', - 'rsSyncApplyStop', - mode='off') + self.secondary.admin.command("configureFailPoint", "rsSyncApplyStop", mode="off") @client_context.require_replica_set @client_context.require_secondaries_count(1) def test_write_concern_failure_ordered(self): # Ensure we don't raise on wnote. coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) - result = coll_ww.bulk_write([ - DeleteOne({"something": "that does no exist"})]) + result = coll_ww.bulk_write([DeleteOne({"something": "that does no exist"})]) self.assertTrue(result.acknowledged) - requests = [ - InsertOne({'a': 1}), - InsertOne({'a': 2}) - ] + requests = [InsertOne({"a": 1}), InsertOne({"a": 2})] # Replication wtimeout is a 'soft' error. # It shouldn't stop batch processing. try: @@ -899,34 +959,37 @@ def test_write_concern_failure_ordered(self): self.fail("Error not raised") self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 2, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': []}, - details) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 2, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + }, + details, + ) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(details['writeConcernErrors']) > 0) + self.assertTrue(len(details["writeConcernErrors"]) > 0) - failed = details['writeConcernErrors'][0] - self.assertEqual(64, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], str)) + failed = details["writeConcernErrors"][0] + self.assertEqual(64, failed["code"]) + self.assertTrue(isinstance(failed["errmsg"], str)) self.coll.delete_many({}) - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) # Fail due to write concern support as well # as duplicate key error on ordered batch. requests = [ - InsertOne({'a': 1}), - ReplaceOne({'a': 3}, {'b': 1}, upsert=True), - InsertOne({'a': 1}), - InsertOne({'a': 2}), + InsertOne({"a": 1}), + ReplaceOne({"a": 3}, {"b": 1}, upsert=True), + InsertOne({"a": 1}), + InsertOne({"a": 2}), ] try: self.cause_wtimeout(requests, ordered=True) @@ -937,36 +1000,36 @@ def test_write_concern_failure_ordered(self): self.fail("Error not raised") self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 1, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [{'index': 1, '_id': '...'}], - 'writeErrors': [ - {'index': 2, - 'code': 11000, - 'errmsg': '...', - 'op': {'_id': '...', 'a': 1}}]}, - details) - - self.assertTrue(len(details['writeConcernErrors']) > 1) - failed = details['writeErrors'][0] - self.assertTrue("duplicate" in failed['errmsg']) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 1, + "nRemoved": 0, + "upserted": [{"index": 1, "_id": "..."}], + "writeErrors": [ + {"index": 2, "code": 11000, "errmsg": "...", "op": {"_id": "...", "a": 1}} + ], + }, + details, + ) + + self.assertTrue(len(details["writeConcernErrors"]) > 1) + failed = details["writeErrors"][0] + self.assertTrue("duplicate" in failed["errmsg"]) @client_context.require_replica_set @client_context.require_secondaries_count(1) def test_write_concern_failure_unordered(self): # Ensure we don't raise on wnote. coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) - result = coll_ww.bulk_write([ - DeleteOne({"something": "that does no exist"})], ordered=False) + result = coll_ww.bulk_write([DeleteOne({"something": "that does no exist"})], ordered=False) self.assertTrue(result.acknowledged) requests = [ - InsertOne({'a': 1}), - UpdateOne({'a': 3}, {'$set': {'a': 3, 'b': 1}}, upsert=True), - InsertOne({'a': 2}), + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"a": 3, "b": 1}}, upsert=True), + InsertOne({"a": 2}), ] # Replication wtimeout is a 'soft' error. # It shouldn't stop batch processing. @@ -978,24 +1041,24 @@ def test_write_concern_failure_unordered(self): else: self.fail("Error not raised") - self.assertEqual(2, details['nInserted']) - self.assertEqual(1, details['nUpserted']) - self.assertEqual(0, len(details['writeErrors'])) + self.assertEqual(2, details["nInserted"]) + self.assertEqual(1, details["nUpserted"]) + self.assertEqual(0, len(details["writeErrors"])) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(details['writeConcernErrors']) > 1) + self.assertTrue(len(details["writeConcernErrors"]) > 1) self.coll.delete_many({}) - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) # Fail due to write concern support as well # as duplicate key error on unordered batch. requests: list = [ - InsertOne({'a': 1}), - UpdateOne({'a': 3}, {'$set': {'a': 3, 'b': 1}}, upsert=True), - InsertOne({'a': 1}), - InsertOne({'a': 2}), + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"a": 3, "b": 1}}, upsert=True), + InsertOne({"a": 1}), + InsertOne({"a": 2}), ] try: self.cause_wtimeout(requests, ordered=False) @@ -1005,27 +1068,27 @@ def test_write_concern_failure_unordered(self): else: self.fail("Error not raised") - self.assertEqual(2, details['nInserted']) - self.assertEqual(1, details['nUpserted']) - self.assertEqual(1, len(details['writeErrors'])) + self.assertEqual(2, details["nInserted"]) + self.assertEqual(1, details["nUpserted"]) + self.assertEqual(1, len(details["writeErrors"])) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(details['writeConcernErrors']) > 1) + self.assertTrue(len(details["writeConcernErrors"]) > 1) - failed = details['writeErrors'][0] - self.assertEqual(2, failed['index']) - self.assertEqual(11000, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], str)) - self.assertEqual(1, failed['op']['a']) + failed = details["writeErrors"][0] + self.assertEqual(2, failed["index"]) + self.assertEqual(11000, failed["code"]) + self.assertTrue(isinstance(failed["errmsg"], str)) + self.assertEqual(1, failed["op"]["a"]) - failed = details['writeConcernErrors'][0] - self.assertEqual(64, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], str)) + failed = details["writeConcernErrors"][0] + self.assertEqual(64, failed["code"]) + self.assertTrue(isinstance(failed["errmsg"], str)) - upserts = details['upserted'] + upserts = details["upserted"] self.assertEqual(1, len(upserts)) - self.assertEqual(1, upserts[0]['index']) - self.assertTrue(upserts[0].get('_id')) + self.assertEqual(1, upserts[0]["index"]) + self.assertTrue(upserts[0].get("_id")) if __name__ == "__main__": diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 655b99e801..73768fd0f6 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -14,40 +14,42 @@ """Test the change_stream module.""" -import random import os +import random import re -import sys import string +import sys import threading import time import uuid - from itertools import product from typing import no_type_check -sys.path[0:0] = [''] +sys.path[0:0] = [""] -from bson import ObjectId, SON, Timestamp, encode, json_util -from bson.binary import (ALL_UUID_REPRESENTATIONS, - Binary, - STANDARD, - PYTHON_LEGACY) +from test import IntegrationTest, client_context, unittest +from test.unified_format import generate_test_classes +from test.utils import ( + AllowListEventListener, + EventListener, + rs_or_single_client, + wait_until, +) + +from bson import SON, ObjectId, Timestamp, encode, json_util +from bson.binary import ALL_UUID_REPRESENTATIONS, PYTHON_LEGACY, STANDARD, Binary from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument - from pymongo import MongoClient from pymongo.command_cursor import CommandCursor -from pymongo.errors import (InvalidOperation, OperationFailure, - ServerSelectionTimeoutError) +from pymongo.errors import ( + InvalidOperation, + OperationFailure, + ServerSelectionTimeoutError, +) from pymongo.message import _CursorAddress from pymongo.read_concern import ReadConcern from pymongo.write_concern import WriteConcern -from test import client_context, unittest, IntegrationTest -from test.unified_format import generate_test_classes -from test.utils import ( - EventListener, AllowListEventListener, rs_or_single_client, wait_until) - class TestChangeStreamBase(IntegrationTest): RUN_ON_LOAD_BALANCER = True @@ -70,7 +72,7 @@ def client_with_listener(self, *commands): def watched_collection(self, *args, **kwargs): """Return a collection that is watched by self.change_stream().""" # Construct a unique collection for each test. - collname = '.'.join(self.id().rsplit('.', 2)[1:]) + collname = ".".join(self.id().rsplit(".", 2)[1:]) return self.db.get_collection(collname, *args, **kwargs) def generate_invalidate_event(self, change_stream): @@ -81,27 +83,25 @@ def generate_unique_collnames(self, numcolls): """Generate numcolls collection names unique to a test.""" collnames = [] for idx in range(1, numcolls + 1): - collnames.append(self.id() + '_' + str(idx)) + collnames.append(self.id() + "_" + str(idx)) return collnames def get_resume_token(self, invalidate=False): """Get a resume token to use for starting a change stream.""" # Ensure targeted collection exists before starting. - coll = self.watched_collection(write_concern=WriteConcern('majority')) + coll = self.watched_collection(write_concern=WriteConcern("majority")) coll.insert_one({}) if invalidate: - with self.change_stream( - [{'$match': {'operationType': 'invalidate'}}]) as cs: + with self.change_stream([{"$match": {"operationType": "invalidate"}}]) as cs: if isinstance(cs._target, MongoClient): - self.skipTest( - "cluster-level change streams cannot be invalidated") + self.skipTest("cluster-level change streams cannot be invalidated") self.generate_invalidate_event(cs) - return cs.next()['_id'] + return cs.next()["_id"] else: with self.change_stream() as cs: - coll.insert_one({'data': 1}) - return cs.next()['_id'] + coll.insert_one({"data": 1}) + return cs.next()["_id"] def get_start_at_operation_time(self): """Get an operationTime. Advances the operation clock beyond the most @@ -125,18 +125,18 @@ class APITestsMixin(object): @no_type_check def test_watch(self): with self.change_stream( - [{'$project': {'foo': 0}}], full_document='updateLookup', - max_await_time_ms=1000, batch_size=100) as change_stream: - self.assertEqual([{'$project': {'foo': 0}}], - change_stream._pipeline) - self.assertEqual('updateLookup', change_stream._full_document) + [{"$project": {"foo": 0}}], + full_document="updateLookup", + max_await_time_ms=1000, + batch_size=100, + ) as change_stream: + self.assertEqual([{"$project": {"foo": 0}}], change_stream._pipeline) + self.assertEqual("updateLookup", change_stream._full_document) self.assertEqual(1000, change_stream._max_await_time_ms) self.assertEqual(100, change_stream._batch_size) self.assertIsInstance(change_stream._cursor, CommandCursor) - self.assertEqual( - 1000, change_stream._cursor._CommandCursor__max_await_time_ms) - self.watched_collection( - write_concern=WriteConcern("majority")).insert_one({}) + self.assertEqual(1000, change_stream._cursor._CommandCursor__max_await_time_ms) + self.watched_collection(write_concern=WriteConcern("majority")).insert_one({}) _ = change_stream.next() resume_token = change_stream.resume_token with self.assertRaises(TypeError): @@ -150,37 +150,33 @@ def test_watch(self): @no_type_check def test_try_next(self): # ChangeStreams only read majority committed data so use w:majority. - coll = self.watched_collection().with_options( - write_concern=WriteConcern("majority")) + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) coll.drop() coll.insert_one({}) self.addCleanup(coll.drop) with self.change_stream(max_await_time_ms=250) as stream: - self.assertIsNone(stream.try_next()) # No changes initially. - coll.insert_one({}) # Generate a change. + self.assertIsNone(stream.try_next()) # No changes initially. + coll.insert_one({}) # Generate a change. # On sharded clusters, even majority-committed changes only show # up once an event that sorts after it shows up on the other # shard. So, we wait on try_next to eventually return changes. - wait_until(lambda: stream.try_next() is not None, - "get change from try_next") + wait_until(lambda: stream.try_next() is not None, "get change from try_next") @no_type_check def test_try_next_runs_one_getmore(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. - client.admin.command('ping') + client.admin.command("ping") listener.results.clear() # ChangeStreams only read majority committed data so use w:majority. - coll = self.watched_collection().with_options( - write_concern=WriteConcern("majority")) + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) coll.drop() # Create the watched collection before starting the change stream to # skip any "create" events. - coll.insert_one({'_id': 1}) + coll.insert_one({"_id": 1}) self.addCleanup(coll.drop) - with self.change_stream_with_client( - client, max_await_time_ms=250) as stream: + with self.change_stream_with_client(client, max_await_time_ms=250) as stream: self.assertEqual(listener.started_command_names(), ["aggregate"]) listener.results.clear() @@ -194,9 +190,8 @@ def test_try_next_runs_one_getmore(self): listener.results.clear() # Get at least one change before resuming. - coll.insert_one({'_id': 2}) - wait_until(lambda: stream.try_next() is not None, - "get change from try_next") + coll.insert_one({"_id": 2}) + wait_until(lambda: stream.try_next() is not None, "get change from try_next") listener.results.clear() # Cause the next request to initiate the resume process. @@ -208,16 +203,13 @@ def test_try_next_runs_one_getmore(self): # - resume with aggregate command # - no results, return immediately without another getMore self.assertIsNone(stream.try_next()) - self.assertEqual( - listener.started_command_names(), ["getMore", "aggregate"]) + self.assertEqual(listener.started_command_names(), ["getMore", "aggregate"]) listener.results.clear() # Stream still works after a resume. - coll.insert_one({'_id': 3}) - wait_until(lambda: stream.try_next() is not None, - "get change from try_next") - self.assertEqual(set(listener.started_command_names()), - set(["getMore"])) + coll.insert_one({"_id": 3}) + wait_until(lambda: stream.try_next() is not None, "get change from try_next") + self.assertEqual(set(listener.started_command_names()), set(["getMore"])) self.assertIsNone(stream.try_next()) @no_type_check @@ -225,27 +217,25 @@ def test_batch_size_is_honored(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. - client.admin.command('ping') + client.admin.command("ping") listener.results.clear() # ChangeStreams only read majority committed data so use w:majority. - coll = self.watched_collection().with_options( - write_concern=WriteConcern("majority")) + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) coll.drop() # Create the watched collection before starting the change stream to # skip any "create" events. - coll.insert_one({'_id': 1}) + coll.insert_one({"_id": 1}) self.addCleanup(coll.drop) # Expected batchSize. - expected = {'batchSize': 23} - with self.change_stream_with_client( - client, max_await_time_ms=250, batch_size=23) as stream: + expected = {"batchSize": 23} + with self.change_stream_with_client(client, max_await_time_ms=250, batch_size=23) as stream: # Confirm that batchSize is honored for initial batch. - cmd = listener.results['started'][0].command - self.assertEqual(cmd['cursor'], expected) + cmd = listener.results["started"][0].command + self.assertEqual(cmd["cursor"], expected) listener.results.clear() # Confirm that batchSize is honored by getMores. self.assertIsNone(stream.try_next()) - cmd = listener.results['started'][0].command + cmd = listener.results["started"][0].command key = next(iter(expected)) self.assertEqual(expected[key], cmd[key]) @@ -255,8 +245,7 @@ def test_batch_size_is_honored(self): def test_start_at_operation_time(self): optime = self.get_start_at_operation_time() - coll = self.watched_collection( - write_concern=WriteConcern("majority")) + coll = self.watched_collection(write_concern=WriteConcern("majority")) ndocs = 3 coll.insert_many([{"data": i} for i in range(ndocs)]) @@ -268,17 +257,16 @@ def test_start_at_operation_time(self): def _test_full_pipeline(self, expected_cs_stage): client, listener = self.client_with_listener("aggregate") results = listener.results - with self.change_stream_with_client( - client, [{'$project': {'foo': 0}}]) as _: + with self.change_stream_with_client(client, [{"$project": {"foo": 0}}]) as _: pass - self.assertEqual(1, len(results['started'])) - command = results['started'][0] - self.assertEqual('aggregate', command.command_name) - self.assertEqual([ - {'$changeStream': expected_cs_stage}, - {'$project': {'foo': 0}}], - command.command['pipeline']) + self.assertEqual(1, len(results["started"])) + command = results["started"][0] + self.assertEqual("aggregate", command.command_name) + self.assertEqual( + [{"$changeStream": expected_cs_stage}, {"$project": {"foo": 0}}], + command.command["pipeline"], + ) @no_type_check def test_full_pipeline(self): @@ -291,11 +279,10 @@ def test_full_pipeline(self): def test_iteration(self): with self.change_stream(batch_size=2) as change_stream: num_inserted = 10 - self.watched_collection().insert_many( - [{} for _ in range(num_inserted)]) + self.watched_collection().insert_many([{} for _ in range(num_inserted)]) inserts_received = 0 for change in change_stream: - self.assertEqual(change['operationType'], 'insert') + self.assertEqual(change["operationType"], "insert") inserts_received += 1 if inserts_received == num_inserted: break @@ -303,10 +290,9 @@ def test_iteration(self): @no_type_check def _test_next_blocks(self, change_stream): - inserted_doc = {'_id': ObjectId()} + inserted_doc = {"_id": ObjectId()} changes = [] - t = threading.Thread( - target=lambda: changes.append(change_stream.next())) + t = threading.Thread(target=lambda: changes.append(change_stream.next())) t.start() # Sleep for a bit to prove that the call to next() blocks. time.sleep(1) @@ -318,8 +304,8 @@ def _test_next_blocks(self, change_stream): t.join(30) self.assertFalse(t.is_alive()) self.assertEqual(1, len(changes)) - self.assertEqual(changes[0]['operationType'], 'insert') - self.assertEqual(changes[0]['fullDocument'], inserted_doc) + self.assertEqual(changes[0]["operationType"], "insert") + self.assertEqual(changes[0]["fullDocument"], inserted_doc) @no_type_check def test_next_blocks(self): @@ -332,7 +318,8 @@ def test_next_blocks(self): def test_aggregate_cursor_blocks(self): """Test that an aggregate cursor blocks until a change is readable.""" with self.watched_collection().aggregate( - [{'$changeStream': {}}], maxAwaitTimeMS=250) as change_stream: + [{"$changeStream": {}}], maxAwaitTimeMS=250 + ) as change_stream: self._test_next_blocks(change_stream) @no_type_check @@ -340,9 +327,11 @@ def test_concurrent_close(self): """Ensure a ChangeStream can be closed from another thread.""" # Use a short await time to speed up the test. with self.change_stream(max_await_time_ms=250) as change_stream: + def iterate_cursor(): for _ in change_stream: pass + t = threading.Thread(target=iterate_cursor) t.start() self.watched_collection().insert_one({}) @@ -353,10 +342,9 @@ def iterate_cursor(): @no_type_check def test_unknown_full_document(self): - """Must rely on the server to raise an error on unknown fullDocument. - """ + """Must rely on the server to raise an error on unknown fullDocument.""" try: - with self.change_stream(full_document='notValidatedByPyMongo'): + with self.change_stream(full_document="notValidatedByPyMongo"): pass except OperationFailure: pass @@ -364,47 +352,46 @@ def test_unknown_full_document(self): @no_type_check def test_change_operations(self): """Test each operation type.""" - expected_ns = {'db': self.watched_collection().database.name, - 'coll': self.watched_collection().name} + expected_ns = { + "db": self.watched_collection().database.name, + "coll": self.watched_collection().name, + } with self.change_stream() as change_stream: # Insert. - inserted_doc = {'_id': ObjectId(), 'foo': 'bar'} + inserted_doc = {"_id": ObjectId(), "foo": "bar"} self.watched_collection().insert_one(inserted_doc) change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['ns'], expected_ns) - self.assertEqual(change['fullDocument'], inserted_doc) + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], expected_ns) + self.assertEqual(change["fullDocument"], inserted_doc) # Update. - update_spec = {'$set': {'new': 1}, '$unset': {'foo': 1}} + update_spec = {"$set": {"new": 1}, "$unset": {"foo": 1}} self.watched_collection().update_one(inserted_doc, update_spec) change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'update') - self.assertEqual(change['ns'], expected_ns) - self.assertNotIn('fullDocument', change) - - expected_update_description = { - 'updatedFields': {'new': 1}, - 'removedFields': ['foo']} + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "update") + self.assertEqual(change["ns"], expected_ns) + self.assertNotIn("fullDocument", change) + + expected_update_description = {"updatedFields": {"new": 1}, "removedFields": ["foo"]} if client_context.version.at_least(4, 5, 0): - expected_update_description['truncatedArrays'] = [] - self.assertEqual(expected_update_description, - change['updateDescription']) + expected_update_description["truncatedArrays"] = [] + self.assertEqual(expected_update_description, change["updateDescription"]) # Replace. - self.watched_collection().replace_one({'new': 1}, {'foo': 'bar'}) + self.watched_collection().replace_one({"new": 1}, {"foo": "bar"}) change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'replace') - self.assertEqual(change['ns'], expected_ns) - self.assertEqual(change['fullDocument'], inserted_doc) + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "replace") + self.assertEqual(change["ns"], expected_ns) + self.assertEqual(change["fullDocument"], inserted_doc) # Delete. - self.watched_collection().delete_one({'foo': 'bar'}) + self.watched_collection().delete_one({"foo": "bar"}) change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'delete') - self.assertEqual(change['ns'], expected_ns) - self.assertNotIn('fullDocument', change) + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "delete") + self.assertEqual(change["ns"], expected_ns) + self.assertNotIn("fullDocument", change) # Invalidate. self._test_get_invalidate_event(change_stream) @@ -419,30 +406,29 @@ def test_start_after(self): # start_after can resume after invalidate. with self.change_stream(start_after=resume_token) as change_stream: - self.watched_collection().insert_one({'_id': 2}) + self.watched_collection().insert_one({"_id": 2}) change = change_stream.next() - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['fullDocument'], {'_id': 2}) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) @no_type_check @client_context.require_version_min(4, 1, 1) def test_start_after_resume_process_with_changes(self): resume_token = self.get_resume_token(invalidate=True) - with self.change_stream(start_after=resume_token, - max_await_time_ms=250) as change_stream: - self.watched_collection().insert_one({'_id': 2}) + with self.change_stream(start_after=resume_token, max_await_time_ms=250) as change_stream: + self.watched_collection().insert_one({"_id": 2}) change = change_stream.next() - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['fullDocument'], {'_id': 2}) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) self.assertIsNone(change_stream.try_next()) self.kill_change_stream_cursor(change_stream) - self.watched_collection().insert_one({'_id': 3}) + self.watched_collection().insert_one({"_id": 3}) change = change_stream.next() - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['fullDocument'], {'_id': 3}) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 3}) @no_type_check @client_context.require_no_mongos # Remove after SERVER-41196 @@ -450,15 +436,14 @@ def test_start_after_resume_process_with_changes(self): def test_start_after_resume_process_without_changes(self): resume_token = self.get_resume_token(invalidate=True) - with self.change_stream(start_after=resume_token, - max_await_time_ms=250) as change_stream: + with self.change_stream(start_after=resume_token, max_await_time_ms=250) as change_stream: self.assertIsNone(change_stream.try_next()) self.kill_change_stream_cursor(change_stream) - self.watched_collection().insert_one({'_id': 2}) + self.watched_collection().insert_one({"_id": 2}) change = change_stream.next() - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['fullDocument'], {'_id': 2}) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) class ProseSpecTestsMixin(object): @@ -471,46 +456,42 @@ def _client_with_listener(self, *commands): @no_type_check def _populate_and_exhaust_change_stream(self, change_stream, batch_size=3): - self.watched_collection().insert_many( - [{"data": k} for k in range(batch_size)]) + self.watched_collection().insert_many([{"data": k} for k in range(batch_size)]) for _ in range(batch_size): change = next(change_stream) return change - def _get_expected_resume_token_legacy(self, stream, - listener, previous_change=None): + def _get_expected_resume_token_legacy(self, stream, listener, previous_change=None): """Predicts what the resume token should currently be for server versions that don't support postBatchResumeToken. Assumes the stream has never returned any changes if previous_change is None.""" if previous_change is None: - agg_cmd = listener.results['started'][0] + agg_cmd = listener.results["started"][0] stage = agg_cmd.command["pipeline"][0]["$changeStream"] return stage.get("resumeAfter") or stage.get("startAfter") - return previous_change['_id'] + return previous_change["_id"] - def _get_expected_resume_token(self, stream, listener, - previous_change=None): + def _get_expected_resume_token(self, stream, listener, previous_change=None): """Predicts what the resume token should currently be for server versions that support postBatchResumeToken. Assumes the stream has never returned any changes if previous_change is None. Assumes listener is a AllowListEventListener that listens for aggregate and getMore commands.""" if previous_change is None or stream._cursor._has_next(): - token = self._get_expected_resume_token_legacy( - stream, listener, previous_change) + token = self._get_expected_resume_token_legacy(stream, listener, previous_change) if token is not None: return token - response = listener.results['succeeded'][-1].reply - return response['cursor']['postBatchResumeToken'] + response = listener.results["succeeded"][-1].reply + return response["cursor"]["postBatchResumeToken"] @no_type_check def _test_raises_error_on_missing_id(self, expected_exception): """ChangeStream will raise an exception if the server response is missing the resume token. """ - with self.change_stream([{'$project': {'_id': 0}}]) as change_stream: + with self.change_stream([{"$project": {"_id": 0}}]) as change_stream: self.watched_collection().insert_one({}) with self.assertRaises(expected_exception): next(change_stream) @@ -522,17 +503,17 @@ def _test_raises_error_on_missing_id(self, expected_exception): def _test_update_resume_token(self, expected_rt_getter): """ChangeStream must continuously track the last seen resumeToken.""" client, listener = self._client_with_listener("aggregate", "getMore") - coll = self.watched_collection(write_concern=WriteConcern('majority')) + coll = self.watched_collection(write_concern=WriteConcern("majority")) with self.change_stream_with_client(client) as change_stream: self.assertEqual( - change_stream.resume_token, - expected_rt_getter(change_stream, listener)) + change_stream.resume_token, expected_rt_getter(change_stream, listener) + ) for _ in range(3): coll.insert_one({}) change = next(change_stream) self.assertEqual( - change_stream.resume_token, - expected_rt_getter(change_stream, listener, change)) + change_stream.resume_token, expected_rt_getter(change_stream, listener, change) + ) # Prose test no. 1 @client_context.require_version_min(4, 0, 7) @@ -561,18 +542,17 @@ def test_raises_error_on_missing_id_418minus(self): @no_type_check def test_resume_on_error(self): with self.change_stream() as change_stream: - self.insert_one_and_check(change_stream, {'_id': 1}) + self.insert_one_and_check(change_stream, {"_id": 1}) # Cause a cursor not found error on the next getMore. self.kill_change_stream_cursor(change_stream) - self.insert_one_and_check(change_stream, {'_id': 2}) + self.insert_one_and_check(change_stream, {"_id": 2}) # Prose test no. 4 @no_type_check @client_context.require_failCommand_fail_point def test_no_resume_attempt_if_aggregate_command_fails(self): # Set non-retryable error on aggregate command. - fail_point = {'mode': {'times': 1}, - 'data': {'errorCode': 2, 'failCommands': ['aggregate']}} + fail_point = {"mode": {"times": 1}, "data": {"errorCode": 2, "failCommands": ["aggregate"]}} client, listener = self._client_with_listener("aggregate", "getMore") with self.fail_point(fail_point): try: @@ -581,9 +561,8 @@ def test_no_resume_attempt_if_aggregate_command_fails(self): pass # Driver should have attempted aggregate command only once. - self.assertEqual(len(listener.results['started']), 1) - self.assertEqual(listener.results['started'][0].command_name, - 'aggregate') + self.assertEqual(len(listener.results["started"]), 1) + self.assertEqual(listener.results["started"][0].command_name, "aggregate") # Prose test no. 5 - REMOVED # Prose test no. 6 - SKIPPED @@ -607,14 +586,15 @@ def test_initial_empty_batch(self): @no_type_check def test_kill_cursors(self): def raise_error(): - raise ServerSelectionTimeoutError('mock error') + raise ServerSelectionTimeoutError("mock error") + with self.change_stream() as change_stream: - self.insert_one_and_check(change_stream, {'_id': 1}) + self.insert_one_and_check(change_stream, {"_id": 1}) # Cause a cursor not found error on the next getMore. cursor = change_stream._cursor self.kill_change_stream_cursor(change_stream) cursor.close = raise_error - self.insert_one_and_check(change_stream, {'_id': 2}) + self.insert_one_and_check(change_stream, {"_id": 2}) # Prose test no. 9 @no_type_check @@ -626,21 +606,21 @@ def test_start_at_operation_time_caching(self): with self.change_stream_with_client(client) as cs: self.kill_change_stream_cursor(cs) cs.try_next() - cmd = listener.results['started'][-1].command - self.assertIsNotNone(cmd["pipeline"][0]["$changeStream"].get( - "startAtOperationTime")) + cmd = listener.results["started"][-1].command + self.assertIsNotNone(cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime")) # Case 2: change stream started with startAtOperationTime listener.results.clear() optime = self.get_start_at_operation_time() - with self.change_stream_with_client( - client, start_at_operation_time=optime) as cs: + with self.change_stream_with_client(client, start_at_operation_time=optime) as cs: self.kill_change_stream_cursor(cs) cs.try_next() - cmd = listener.results['started'][-1].command - self.assertEqual(cmd["pipeline"][0]["$changeStream"].get( - "startAtOperationTime"), optime, str([k.command for k in - listener.results['started']])) + cmd = listener.results["started"][-1].command + self.assertEqual( + cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime"), + optime, + str([k.command for k in listener.results["started"]]), + ) # Prose test no. 10 - SKIPPED # This test is identical to prose test no. 3. @@ -654,9 +634,8 @@ def test_resumetoken_empty_batch(self): self.assertIsNone(change_stream.try_next()) resume_token = change_stream.resume_token - response = listener.results['succeeded'][0].reply - self.assertEqual(resume_token, - response["cursor"]["postBatchResumeToken"]) + response = listener.results["succeeded"][0].reply + self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) # Prose test no. 11 @no_type_check @@ -667,9 +646,8 @@ def test_resumetoken_exhausted_batch(self): self._populate_and_exhaust_change_stream(change_stream) resume_token = change_stream.resume_token - response = listener.results['succeeded'][-1].reply - self.assertEqual(resume_token, - response["cursor"]["postBatchResumeToken"]) + response = listener.results["succeeded"][-1].reply + self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) # Prose test no. 12 @no_type_check @@ -696,7 +674,7 @@ def test_resumetoken_exhausted_batch_legacy(self): with self.change_stream() as change_stream: change = self._populate_and_exhaust_change_stream(change_stream) self.assertEqual(change_stream.resume_token, change["_id"]) - resume_point = change['_id'] + resume_point = change["_id"] # Resume token is _id of last change even if resumeAfter is specified. with self.change_stream(resume_after=resume_point) as change_stream: @@ -709,9 +687,9 @@ def test_resumetoken_partially_iterated_batch(self): # When batch has been iterated up to but not including the last element. # Resume token should be _id of previous change document. with self.change_stream() as change_stream: - self.watched_collection( - write_concern=WriteConcern('majority')).insert_many( - [{"data": k} for k in range(3)]) + self.watched_collection(write_concern=WriteConcern("majority")).insert_many( + [{"data": k} for k in range(3)] + ) for _ in range(2): change = next(change_stream) resume_token = change_stream.resume_token @@ -725,13 +703,12 @@ def _test_resumetoken_uniterated_nonempty_batch(self, resume_option): resume_point = self.get_resume_token() # Insert some documents so that firstBatch isn't empty. - self.watched_collection( - write_concern=WriteConcern("majority")).insert_many( - [{'a': 1}, {'b': 2}, {'c': 3}]) + self.watched_collection(write_concern=WriteConcern("majority")).insert_many( + [{"a": 1}, {"b": 2}, {"c": 3}] + ) # Resume token should be same as the resume option. - with self.change_stream( - **{resume_option: resume_point}) as change_stream: + with self.change_stream(**{resume_option: resume_point}) as change_stream: self.assertTrue(change_stream._cursor._has_next()) resume_token = change_stream.resume_token self.assertEqual(resume_token, resume_point) @@ -757,18 +734,15 @@ def test_startafter_resume_uses_startafter_after_empty_getMore(self): resume_point = self.get_resume_token() client, listener = self._client_with_listener("aggregate") - with self.change_stream_with_client( - client, start_after=resume_point) as change_stream: + with self.change_stream_with_client(client, start_after=resume_point) as change_stream: self.assertFalse(change_stream._cursor._has_next()) # No changes - change_stream.try_next() # No changes + change_stream.try_next() # No changes self.kill_change_stream_cursor(change_stream) - change_stream.try_next() # Resume attempt + change_stream.try_next() # Resume attempt - response = listener.results['started'][-1] - self.assertIsNone( - response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) - self.assertIsNotNone( - response.command["pipeline"][0]["$changeStream"].get("startAfter")) + response = listener.results["started"][-1] + self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) + self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) # Prose test no. 18 @no_type_check @@ -778,19 +752,16 @@ def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): resume_point = self.get_resume_token() client, listener = self._client_with_listener("aggregate") - with self.change_stream_with_client( - client, start_after=resume_point) as change_stream: + with self.change_stream_with_client(client, start_after=resume_point) as change_stream: self.assertFalse(change_stream._cursor._has_next()) # No changes self.watched_collection().insert_one({}) - next(change_stream) # Changes + next(change_stream) # Changes self.kill_change_stream_cursor(change_stream) - change_stream.try_next() # Resume attempt + change_stream.try_next() # Resume attempt - response = listener.results['started'][-1] - self.assertIsNotNone( - response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) - self.assertIsNone( - response.command["pipeline"][0]["$changeStream"].get("startAfter")) + response = listener.results["started"][-1] + self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) + self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin): @@ -828,10 +799,9 @@ def _insert_and_check(self, change_stream, db, collname, doc): coll = db[collname] coll.insert_one(doc) change = next(change_stream) - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['ns'], {'db': db.name, - 'coll': collname}) - self.assertEqual(change['fullDocument'], doc) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], {"db": db.name, "coll": collname}) + self.assertEqual(change["fullDocument"], doc) def insert_one_and_check(self, change_stream, doc): db = random.choice(self.dbs) @@ -842,22 +812,20 @@ def test_simple(self): collnames = self.generate_unique_collnames(3) with self.change_stream() as change_stream: for db, collname in product(self.dbs, collnames): - self._insert_and_check( - change_stream, db, collname, {'_id': collname} - ) + self._insert_and_check(change_stream, db, collname, {"_id": collname}) def test_aggregate_cursor_blocks(self): """Test that an aggregate cursor blocks until a change is readable.""" with self.client.admin.aggregate( - [{'$changeStream': {'allChangesForCluster': True}}], - maxAwaitTimeMS=250) as change_stream: + [{"$changeStream": {"allChangesForCluster": True}}], maxAwaitTimeMS=250 + ) as change_stream: self._test_next_blocks(change_stream) def test_full_pipeline(self): """$changeStream must be the first stage in a change stream pipeline sent to the server. """ - self._test_full_pipeline({'allChangesForCluster': True}) + self._test_full_pipeline({"allChangesForCluster": True}) class TestDatabaseChangeStream(TestChangeStreamBase, APITestsMixin): @@ -883,22 +851,22 @@ def _test_get_invalidate_event(self, change_stream): change = change_stream.next() # 4.1+ returns "drop" events for each collection in dropped database # and a "dropDatabase" event for the database itself. - if change['operationType'] == 'drop': - self.assertTrue(change['_id']) + if change["operationType"] == "drop": + self.assertTrue(change["_id"]) for _ in range(len(dropped_colls)): - ns = change['ns'] - self.assertEqual(ns['db'], change_stream._target.name) - self.assertIn(ns['coll'], dropped_colls) + ns = change["ns"] + self.assertEqual(ns["db"], change_stream._target.name) + self.assertIn(ns["coll"], dropped_colls) change = change_stream.next() - self.assertEqual(change['operationType'], 'dropDatabase') - self.assertTrue(change['_id']) - self.assertEqual(change['ns'], {'db': change_stream._target.name}) + self.assertEqual(change["operationType"], "dropDatabase") + self.assertTrue(change["_id"]) + self.assertEqual(change["ns"], {"db": change_stream._target.name}) # Get next change. change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'invalidate') - self.assertNotIn('ns', change) - self.assertNotIn('fullDocument', change) + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "invalidate") + self.assertNotIn("ns", change) + self.assertNotIn("fullDocument", change) # The ChangeStream should be dead. with self.assertRaises(StopIteration): change_stream.next() @@ -908,10 +876,9 @@ def _test_invalidate_stops_iteration(self, change_stream): change_stream._client.drop_database(self.db.name) # Check drop and dropDatabase events. for change in change_stream: - self.assertIn(change['operationType'], ( - 'drop', 'dropDatabase', 'invalidate')) + self.assertIn(change["operationType"], ("drop", "dropDatabase", "invalidate")) # Last change must be invalidate. - self.assertEqual(change['operationType'], 'invalidate') + self.assertEqual(change["operationType"], "invalidate") # Change stream must not allow further iteration. with self.assertRaises(StopIteration): change_stream.next() @@ -922,10 +889,9 @@ def _insert_and_check(self, change_stream, collname, doc): coll = self.db[collname] coll.insert_one(doc) change = next(change_stream) - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['ns'], {'db': self.db.name, - 'coll': collname}) - self.assertEqual(change['fullDocument'], doc) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], {"db": self.db.name, "coll": collname}) + self.assertEqual(change["fullDocument"], doc) def insert_one_and_check(self, change_stream, doc): self._insert_and_check(change_stream, self.id(), doc) @@ -935,26 +901,21 @@ def test_simple(self): with self.change_stream() as change_stream: for collname in collnames: self._insert_and_check( - change_stream, collname, - {'_id': Binary.from_uuid(uuid.uuid4())}) + change_stream, collname, {"_id": Binary.from_uuid(uuid.uuid4())} + ) def test_isolation(self): # Ensure inserts to other dbs don't show up in our ChangeStream. other_db = self.client.pymongo_test_temp - self.assertNotEqual( - other_db, self.db, msg="Isolation must be tested on separate DBs") + self.assertNotEqual(other_db, self.db, msg="Isolation must be tested on separate DBs") collname = self.id() with self.change_stream() as change_stream: - other_db[collname].insert_one( - {'_id': Binary.from_uuid(uuid.uuid4())}) - self._insert_and_check( - change_stream, collname, - {'_id': Binary.from_uuid(uuid.uuid4())}) + other_db[collname].insert_one({"_id": Binary.from_uuid(uuid.uuid4())}) + self._insert_and_check(change_stream, collname, {"_id": Binary.from_uuid(uuid.uuid4())}) self.client.drop_database(other_db) -class TestCollectionChangeStream(TestChangeStreamBase, APITestsMixin, - ProseSpecTestsMixin): +class TestCollectionChangeStream(TestChangeStreamBase, APITestsMixin, ProseSpecTestsMixin): @classmethod @client_context.require_version_min(3, 5, 11) @client_context.require_no_mmap @@ -968,8 +929,11 @@ def setUp(self): self.watched_collection().insert_one({}) def change_stream_with_client(self, client, *args, **kwargs): - return client[self.db.name].get_collection( - self.watched_collection().name).watch(*args, **kwargs) + return ( + client[self.db.name] + .get_collection(self.watched_collection().name) + .watch(*args, **kwargs) + ) def generate_invalidate_event(self, change_stream): # Dropping the collection invalidates the change stream. @@ -979,9 +943,9 @@ def _test_invalidate_stops_iteration(self, change_stream): self.generate_invalidate_event(change_stream) # Check drop and dropDatabase events. for change in change_stream: - self.assertIn(change['operationType'], ('drop', 'invalidate')) + self.assertIn(change["operationType"], ("drop", "invalidate")) # Last change must be invalidate. - self.assertEqual(change['operationType'], 'invalidate') + self.assertEqual(change["operationType"], "invalidate") # Change stream must not allow further iteration. with self.assertRaises(StopIteration): change_stream.next() @@ -993,17 +957,18 @@ def _test_get_invalidate_event(self, change_stream): change_stream._target.drop() change = change_stream.next() # 4.1+ returns a "drop" change document. - if change['operationType'] == 'drop': - self.assertTrue(change['_id']) - self.assertEqual(change['ns'], { - 'db': change_stream._target.database.name, - 'coll': change_stream._target.name}) + if change["operationType"] == "drop": + self.assertTrue(change["_id"]) + self.assertEqual( + change["ns"], + {"db": change_stream._target.database.name, "coll": change_stream._target.name}, + ) # Last change should be invalidate. change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'invalidate') - self.assertNotIn('ns', change) - self.assertNotIn('fullDocument', change) + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "invalidate") + self.assertNotIn("ns", change) + self.assertNotIn("fullDocument", change) # The ChangeStream should be dead. with self.assertRaises(StopIteration): change_stream.next() @@ -1011,38 +976,36 @@ def _test_get_invalidate_event(self, change_stream): def insert_one_and_check(self, change_stream, doc): self.watched_collection().insert_one(doc) change = next(change_stream) - self.assertEqual(change['operationType'], 'insert') + self.assertEqual(change["operationType"], "insert") self.assertEqual( - change['ns'], {'db': self.watched_collection().database.name, - 'coll': self.watched_collection().name}) - self.assertEqual(change['fullDocument'], doc) + change["ns"], + {"db": self.watched_collection().database.name, "coll": self.watched_collection().name}, + ) + self.assertEqual(change["fullDocument"], doc) def test_raw(self): """Test with RawBSONDocument.""" - raw_coll = self.watched_collection( - codec_options=DEFAULT_RAW_BSON_OPTIONS) + raw_coll = self.watched_collection(codec_options=DEFAULT_RAW_BSON_OPTIONS) with raw_coll.watch() as change_stream: - raw_doc = RawBSONDocument(encode({'_id': 1})) + raw_doc = RawBSONDocument(encode({"_id": 1})) self.watched_collection().insert_one(raw_doc) change = next(change_stream) self.assertIsInstance(change, RawBSONDocument) - self.assertEqual(change['operationType'], 'insert') - self.assertEqual( - change['ns']['db'], self.watched_collection().database.name) - self.assertEqual( - change['ns']['coll'], self.watched_collection().name) - self.assertEqual(change['fullDocument'], raw_doc) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"]["db"], self.watched_collection().database.name) + self.assertEqual(change["ns"]["coll"], self.watched_collection().name) + self.assertEqual(change["fullDocument"], raw_doc) def test_uuid_representations(self): """Test with uuid document _ids and different uuid_representation.""" for uuid_representation in ALL_UUID_REPRESENTATIONS: for id_subtype in (STANDARD, PYTHON_LEGACY): options = self.watched_collection().codec_options.with_options( - uuid_representation=uuid_representation) + uuid_representation=uuid_representation + ) coll = self.watched_collection(codec_options=options) with coll.watch() as change_stream: - coll.insert_one( - {'_id': Binary(uuid.uuid4().bytes, id_subtype)}) + coll.insert_one({"_id": Binary(uuid.uuid4().bytes, id_subtype)}) _ = change_stream.next() resume_token = change_stream.resume_token @@ -1051,12 +1014,12 @@ def test_uuid_representations(self): def test_document_id_order(self): """Test with document _ids that need their order preserved.""" - random_keys = random.sample(string.ascii_letters, - len(string.ascii_letters)) - random_doc = {'_id': SON([(key, key) for key in random_keys])} + random_keys = random.sample(string.ascii_letters, len(string.ascii_letters)) + random_doc = {"_id": SON([(key, key) for key in random_keys])} for document_class in (dict, SON, RawBSONDocument): options = self.watched_collection().codec_options.with_options( - document_class=document_class) + document_class=document_class + ) coll = self.watched_collection(codec_options=options) with coll.watch() as change_stream: coll.insert_one(random_doc) @@ -1072,12 +1035,12 @@ def test_document_id_order(self): def test_read_concern(self): """Test readConcern is not validated by the driver.""" # Read concern 'local' is not allowed for $changeStream. - coll = self.watched_collection(read_concern=ReadConcern('local')) + coll = self.watched_collection(read_concern=ReadConcern("local")) with self.assertRaises(OperationFailure): coll.watch() # Does not error. - coll = self.watched_collection(read_concern=ReadConcern('majority')) + coll = self.watched_collection(read_concern=ReadConcern("majority")) with coll.watch(): pass @@ -1103,10 +1066,13 @@ def setUp(self): self.listener.results.clear() def setUpCluster(self, scenario_dict): - assets = [(scenario_dict["database_name"], - scenario_dict["collection_name"]), - (scenario_dict.get("database2_name", "db2"), - scenario_dict.get("collection2_name", "coll2"))] + assets = [ + (scenario_dict["database_name"], scenario_dict["collection_name"]), + ( + scenario_dict.get("database2_name", "db2"), + scenario_dict.get("collection2_name", "coll2"), + ), + ] for db, coll in assets: self.client.drop_database(db) self.client[db].create_collection(coll) @@ -1118,12 +1084,15 @@ def setFailPoint(self, scenario_dict): elif not client_context.test_commands_enabled: self.skipTest("Test commands must be enabled") - fail_cmd = SON([('configureFailPoint', 'failCommand')]) + fail_cmd = SON([("configureFailPoint", "failCommand")]) fail_cmd.update(fail_point) client_context.client.admin.command(fail_cmd) self.addCleanup( client_context.client.admin.command, - 'configureFailPoint', fail_cmd['configureFailPoint'], mode='off') + "configureFailPoint", + fail_cmd["configureFailPoint"], + mode="off", + ) def assert_list_contents_are_subset(self, superlist, sublist): """Check that each element in sublist is a subset of the corresponding @@ -1143,7 +1112,7 @@ def assert_dict_is_subset(self, superdict, subdict): exempt_fields = ["documentKey", "_id", "getMore"] for key, value in subdict.items(): if key not in superdict: - self.fail('Key %s not found in %s' % (key, superdict)) + self.fail("Key %s not found in %s" % (key, superdict)) if isinstance(value, dict): self.assert_dict_is_subset(superdict[key], value) continue @@ -1169,14 +1138,13 @@ def tearDown(self): self.listener.results.clear() -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'change_streams') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "change_streams") def camel_to_snake(camel): # Regex to convert CamelCase to snake_case. - snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower() + snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() def get_change_stream(client, scenario_def, test): @@ -1207,12 +1175,11 @@ def run_operation(client, operation): # Apply specified operations opname = camel_to_snake(operation["name"]) arguments = operation.get("arguments", {}) - if opname == 'rename': + if opname == "rename": # Special case for rename operation. - arguments = {'new_name': arguments["to"]} - cmd = getattr(client.get_database( - operation["database"]).get_collection( - operation["collection"]), opname + arguments = {"new_name": arguments["to"]} + cmd = getattr( + client.get_database(operation["database"]).get_collection(operation["collection"]), opname ) return cmd(**arguments) @@ -1224,15 +1191,12 @@ def run_scenario(self): self.setFailPoint(test) is_error = test["result"].get("error", False) try: - with get_change_stream( - self.client, scenario_def, test - ) as change_stream: + with get_change_stream(self.client, scenario_def, test) as change_stream: for operation in test["operations"]: # Run specified operations run_operation(self.client, operation) num_expected_changes = len(test["result"].get("success", [])) - changes = [ - change_stream.next() for _ in range(num_expected_changes)] + changes = [change_stream.next() for _ in range(num_expected_changes)] # Run a next() to induce an error if one is expected and # there are no changes. if is_error and not changes: @@ -1266,7 +1230,7 @@ def run_scenario(self): def create_tests(): - for dirpath, _, filenames in os.walk(os.path.join(_TEST_PATH, 'legacy')): + for dirpath, _, filenames in os.walk(os.path.join(_TEST_PATH, "legacy")): dirname = os.path.split(dirpath)[-1] for filename in filenames: @@ -1275,31 +1239,25 @@ def create_tests(): test_type = os.path.splitext(filename)[0] - for test in scenario_def['tests']: + for test in scenario_def["tests"]: new_test = create_test(scenario_def, test) new_test = client_context.require_no_mmap(new_test) - if 'minServerVersion' in test: - min_ver = tuple( - int(elt) for - elt in test['minServerVersion'].split('.')) - new_test = client_context.require_version_min(*min_ver)( - new_test) - if 'maxServerVersion' in test: - max_ver = tuple( - int(elt) for - elt in test['maxServerVersion'].split('.')) - new_test = client_context.require_version_max(*max_ver)( - new_test) - - topologies = test['topology'] - new_test = client_context.require_cluster_type(topologies)( - new_test) - - test_name = 'test_%s_%s_%s' % ( + if "minServerVersion" in test: + min_ver = tuple(int(elt) for elt in test["minServerVersion"].split(".")) + new_test = client_context.require_version_min(*min_ver)(new_test) + if "maxServerVersion" in test: + max_ver = tuple(int(elt) for elt in test["maxServerVersion"].split(".")) + new_test = client_context.require_version_max(*max_ver)(new_test) + + topologies = test["topology"] + new_test = client_context.require_cluster_type(topologies)(new_test) + + test_name = "test_%s_%s_%s" % ( dirname, test_type.replace("-", "_"), - str(test['description'].replace(" ", "_"))) + str(test["description"].replace(" ", "_")), + ) new_test.__name__ = test_name setattr(TestAllLegacyScenarios, new_test.__name__, new_test) @@ -1308,10 +1266,13 @@ def create_tests(): create_tests() -globals().update(generate_test_classes( - os.path.join(_TEST_PATH, 'unified'), - module=__name__,)) +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_client.py b/test/test_client.py index 9ca9989052..0487161b1e 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -14,6 +14,7 @@ """Test the mongo_client module.""" +import _thread as thread import contextlib import copy import datetime @@ -23,108 +24,116 @@ import socket import struct import sys -import time -import _thread as thread import threading +import time import warnings - -from typing import no_type_check, Type +from typing import Type, no_type_check sys.path[0:0] = [""] +from test import ( + HAVE_IPADDRESS, + IntegrationTest, + MockClientTest, + SkipTest, + client_context, + client_knobs, + db_pwd, + db_user, + unittest, +) +from test.pymongo_mocks import MockClient +from test.utils import ( + NTHREADS, + CMAPListener, + FunctionCallRecorder, + assertRaisesExactly, + connected, + delay, + get_pool, + gevent_monkey_patched, + is_greenthread_patched, + lazy_client_trial, + one, + remove_all_users, + rs_client, + rs_or_single_client, + rs_or_single_client_noauth, + single_client, + wait_until, +) + +import pymongo from bson import encode from bson.codec_options import CodecOptions, TypeEncoder, TypeRegistry from bson.son import SON from bson.tz_util import utc -import pymongo from pymongo import event_loggers, message, monitoring from pymongo.client_options import ClientOptions from pymongo.command_cursor import CommandCursor -from pymongo.common import CONNECT_TIMEOUT, _UUID_REPRESENTATIONS +from pymongo.common import _UUID_REPRESENTATIONS, CONNECT_TIMEOUT from pymongo.compression_support import _HAVE_SNAPPY, _HAVE_ZSTD from pymongo.cursor import Cursor, CursorType from pymongo.database import Database from pymongo.driver_info import DriverInfo -from pymongo.errors import (AutoReconnect, - ConfigurationError, - ConnectionFailure, - InvalidName, - InvalidURI, - NetworkTimeout, - OperationFailure, - ServerSelectionTimeoutError, - WriteConcernError, - InvalidOperation) +from pymongo.errors import ( + AutoReconnect, + ConfigurationError, + ConnectionFailure, + InvalidName, + InvalidOperation, + InvalidURI, + NetworkTimeout, + OperationFailure, + ServerSelectionTimeoutError, + WriteConcernError, +) from pymongo.hello import HelloCompat from pymongo.mongo_client import MongoClient -from pymongo.monitoring import (ServerHeartbeatListener, - ServerHeartbeatStartedEvent) -from pymongo.pool import SocketInfo, _METADATA, PoolOptions +from pymongo.monitoring import ServerHeartbeatListener, ServerHeartbeatStartedEvent +from pymongo.pool import _METADATA, PoolOptions, SocketInfo from pymongo.read_preferences import ReadPreference from pymongo.server_description import ServerDescription -from pymongo.server_selectors import (readable_server_selector, - writable_server_selector) +from pymongo.server_selectors import readable_server_selector, writable_server_selector from pymongo.server_type import SERVER_TYPE from pymongo.settings import TOPOLOGY_TYPE from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.topology import _ErrorContext -from pymongo.topology_description import TopologyDescription, _updated_topology_description_srv_polling +from pymongo.topology_description import ( + TopologyDescription, + _updated_topology_description_srv_polling, +) from pymongo.write_concern import WriteConcern -from test import (client_context, - client_knobs, - SkipTest, - unittest, - IntegrationTest, - db_pwd, - db_user, - MockClientTest, - HAVE_IPADDRESS) -from test.pymongo_mocks import MockClient -from test.utils import (assertRaisesExactly, - connected, - CMAPListener, - delay, - FunctionCallRecorder, - get_pool, - gevent_monkey_patched, - is_greenthread_patched, - lazy_client_trial, - NTHREADS, - one, - remove_all_users, - rs_client, - rs_or_single_client, - rs_or_single_client_noauth, - single_client, - wait_until) class ClientUnitTest(unittest.TestCase): """MongoClient tests that don't require a server.""" + client: MongoClient @classmethod @client_context.require_connection def setUpClass(cls): - cls.client = rs_or_single_client(connect=False, - serverSelectionTimeoutMS=100) + cls.client = rs_or_single_client(connect=False, serverSelectionTimeoutMS=100) @classmethod def tearDownClass(cls): cls.client.close() def test_keyword_arg_defaults(self): - client = MongoClient(socketTimeoutMS=None, - connectTimeoutMS=20000, - waitQueueTimeoutMS=None, - replicaSet=None, - read_preference=ReadPreference.PRIMARY, - ssl=False, - tlsCertificateKeyFile=None, - tlsAllowInvalidCertificates=True, - tlsCAFile=None, - connect=False, - serverSelectionTimeoutMS=12000) + client = MongoClient( + socketTimeoutMS=None, + connectTimeoutMS=20000, + waitQueueTimeoutMS=None, + replicaSet=None, + read_preference=ReadPreference.PRIMARY, + ssl=False, + tlsCertificateKeyFile=None, + tlsAllowInvalidCertificates=True, + tlsCAFile=None, + connect=False, + serverSelectionTimeoutMS=12000, + ) options = client._MongoClient__options pool_opts = options.pool_options @@ -138,19 +147,17 @@ def test_keyword_arg_defaults(self): self.assertAlmostEqual(12, client.options.server_selection_timeout) def test_connect_timeout(self): - client = MongoClient(connect=False, connectTimeoutMS=None, - socketTimeoutMS=None) + client = MongoClient(connect=False, connectTimeoutMS=None, socketTimeoutMS=None) pool_opts = client._MongoClient__options.pool_options self.assertEqual(None, pool_opts.socket_timeout) self.assertEqual(None, pool_opts.connect_timeout) - client = MongoClient(connect=False, connectTimeoutMS=0, - socketTimeoutMS=0) + client = MongoClient(connect=False, connectTimeoutMS=0, socketTimeoutMS=0) pool_opts = client._MongoClient__options.pool_options self.assertEqual(None, pool_opts.socket_timeout) self.assertEqual(None, pool_opts.connect_timeout) client = MongoClient( - 'mongodb://localhost/?connectTimeoutMS=0&socketTimeoutMS=0', - connect=False) + "mongodb://localhost/?connectTimeoutMS=0&socketTimeoutMS=0", connect=False + ) pool_opts = client._MongoClient__options.pool_options self.assertEqual(None, pool_opts.socket_timeout) self.assertEqual(None, pool_opts.connect_timeout) @@ -168,18 +175,9 @@ def test_max_pool_size_zero(self): MongoClient(maxPoolSize=0) def test_uri_detection(self): - self.assertRaises( - ConfigurationError, - MongoClient, - "/foo") - self.assertRaises( - ConfigurationError, - MongoClient, - "://") - self.assertRaises( - ConfigurationError, - MongoClient, - "foo/") + self.assertRaises(ConfigurationError, MongoClient, "/foo") + self.assertRaises(ConfigurationError, MongoClient, "://") + self.assertRaises(ConfigurationError, MongoClient, "foo/") def test_get_db(self): def make_db(base, name): @@ -199,15 +197,14 @@ def make_db(base, name): def test_get_database(self): codec_options = CodecOptions(tz_aware=True) write_concern = WriteConcern(w=2, j=True) - db = self.client.get_database( - 'foo', codec_options, ReadPreference.SECONDARY, write_concern) - self.assertEqual('foo', db.name) + db = self.client.get_database("foo", codec_options, ReadPreference.SECONDARY, write_concern) + self.assertEqual("foo", db.name) self.assertEqual(codec_options, db.codec_options) self.assertEqual(ReadPreference.SECONDARY, db.read_preference) self.assertEqual(write_concern, db.write_concern) def test_getattr(self): - self.assertTrue(isinstance(self.client['_does_not_exist'], Database)) + self.assertTrue(isinstance(self.client["_does_not_exist"], Database)) with self.assertRaises(AttributeError) as context: self.client._does_not_exist @@ -215,8 +212,7 @@ def test_getattr(self): # Message should be: # "AttributeError: MongoClient has no attribute '_does_not_exist'. To # access the _does_not_exist database, use client['_does_not_exist']". - self.assertIn("has no attribute '_does_not_exist'", - str(context.exception)) + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) def test_iteration(self): def iterate(): @@ -225,108 +221,111 @@ def iterate(): self.assertRaises(TypeError, iterate) def test_get_default_database(self): - c = rs_or_single_client("mongodb://%s:%d/foo" % (client_context.host, - client_context.port), - connect=False) - self.assertEqual(Database(c, 'foo'), c.get_default_database()) + c = rs_or_single_client( + "mongodb://%s:%d/foo" % (client_context.host, client_context.port), connect=False + ) + self.assertEqual(Database(c, "foo"), c.get_default_database()) # Test that default doesn't override the URI value. - self.assertEqual(Database(c, 'foo'), c.get_default_database('bar')) + self.assertEqual(Database(c, "foo"), c.get_default_database("bar")) codec_options = CodecOptions(tz_aware=True) write_concern = WriteConcern(w=2, j=True) - db = c.get_default_database( - None, codec_options, ReadPreference.SECONDARY, write_concern) - self.assertEqual('foo', db.name) + db = c.get_default_database(None, codec_options, ReadPreference.SECONDARY, write_concern) + self.assertEqual("foo", db.name) self.assertEqual(codec_options, db.codec_options) self.assertEqual(ReadPreference.SECONDARY, db.read_preference) self.assertEqual(write_concern, db.write_concern) - c = rs_or_single_client("mongodb://%s:%d/" % (client_context.host, - client_context.port), - connect=False) - self.assertEqual(Database(c, 'foo'), c.get_default_database('foo')) + c = rs_or_single_client( + "mongodb://%s:%d/" % (client_context.host, client_context.port), connect=False + ) + self.assertEqual(Database(c, "foo"), c.get_default_database("foo")) def test_get_default_database_error(self): # URI with no database. - c = rs_or_single_client("mongodb://%s:%d/" % (client_context.host, - client_context.port), - connect=False) + c = rs_or_single_client( + "mongodb://%s:%d/" % (client_context.host, client_context.port), connect=False + ) self.assertRaises(ConfigurationError, c.get_default_database) def test_get_default_database_with_authsource(self): # Ensure we distinguish database name from authSource. - uri = "mongodb://%s:%d/foo?authSource=src" % ( - client_context.host, client_context.port) + uri = "mongodb://%s:%d/foo?authSource=src" % (client_context.host, client_context.port) c = rs_or_single_client(uri, connect=False) - self.assertEqual(Database(c, 'foo'), c.get_default_database()) + self.assertEqual(Database(c, "foo"), c.get_default_database()) def test_get_database_default(self): - c = rs_or_single_client("mongodb://%s:%d/foo" % (client_context.host, - client_context.port), - connect=False) - self.assertEqual(Database(c, 'foo'), c.get_database()) + c = rs_or_single_client( + "mongodb://%s:%d/foo" % (client_context.host, client_context.port), connect=False + ) + self.assertEqual(Database(c, "foo"), c.get_database()) def test_get_database_default_error(self): # URI with no database. - c = rs_or_single_client("mongodb://%s:%d/" % (client_context.host, - client_context.port), - connect=False) + c = rs_or_single_client( + "mongodb://%s:%d/" % (client_context.host, client_context.port), connect=False + ) self.assertRaises(ConfigurationError, c.get_database) def test_get_database_default_with_authsource(self): # Ensure we distinguish database name from authSource. - uri = "mongodb://%s:%d/foo?authSource=src" % ( - client_context.host, client_context.port) + uri = "mongodb://%s:%d/foo?authSource=src" % (client_context.host, client_context.port) c = rs_or_single_client(uri, connect=False) - self.assertEqual(Database(c, 'foo'), c.get_database()) + self.assertEqual(Database(c, "foo"), c.get_database()) def test_primary_read_pref_with_tags(self): # No tags allowed with "primary". with self.assertRaises(ConfigurationError): - MongoClient('mongodb://host/?readpreferencetags=dc:east') + MongoClient("mongodb://host/?readpreferencetags=dc:east") with self.assertRaises(ConfigurationError): - MongoClient('mongodb://host/?' - 'readpreference=primary&readpreferencetags=dc:east') + MongoClient("mongodb://host/?" "readpreference=primary&readpreferencetags=dc:east") def test_read_preference(self): c = rs_or_single_client( - "mongodb://host", connect=False, - readpreference=ReadPreference.NEAREST.mongos_mode) + "mongodb://host", connect=False, readpreference=ReadPreference.NEAREST.mongos_mode + ) self.assertEqual(c.read_preference, ReadPreference.NEAREST) def test_metadata(self): metadata = copy.deepcopy(_METADATA) - metadata['application'] = {'name': 'foobar'} - client = MongoClient( - "mongodb://foo:27017/?appname=foobar&connect=false") + metadata["application"] = {"name": "foobar"} + client = MongoClient("mongodb://foo:27017/?appname=foobar&connect=false") options = client._MongoClient__options self.assertEqual(options.pool_options.metadata, metadata) - client = MongoClient('foo', 27017, appname='foobar', connect=False) + client = MongoClient("foo", 27017, appname="foobar", connect=False) options = client._MongoClient__options self.assertEqual(options.pool_options.metadata, metadata) # No error - MongoClient(appname='x' * 128) - self.assertRaises(ValueError, MongoClient, appname='x' * 129) + MongoClient(appname="x" * 128) + self.assertRaises(ValueError, MongoClient, appname="x" * 129) # Bad "driver" options. - self.assertRaises(TypeError, DriverInfo, 'Foo', 1, 'a') - self.assertRaises(TypeError, DriverInfo, version="1", platform='a') + self.assertRaises(TypeError, DriverInfo, "Foo", 1, "a") + self.assertRaises(TypeError, DriverInfo, version="1", platform="a") self.assertRaises(TypeError, DriverInfo) self.assertRaises(TypeError, MongoClient, driver=1) - self.assertRaises(TypeError, MongoClient, driver='abc') - self.assertRaises(TypeError, MongoClient, driver=('Foo', '1', 'a')) + self.assertRaises(TypeError, MongoClient, driver="abc") + self.assertRaises(TypeError, MongoClient, driver=("Foo", "1", "a")) # Test appending to driver info. - metadata['driver']['name'] = 'PyMongo|FooDriver' - metadata['driver']['version'] = '%s|1.2.3' % ( - _METADATA['driver']['version'],) - client = MongoClient('foo', 27017, appname='foobar', - driver=DriverInfo('FooDriver', '1.2.3', None), connect=False) + metadata["driver"]["name"] = "PyMongo|FooDriver" + metadata["driver"]["version"] = "%s|1.2.3" % (_METADATA["driver"]["version"],) + client = MongoClient( + "foo", + 27017, + appname="foobar", + driver=DriverInfo("FooDriver", "1.2.3", None), + connect=False, + ) options = client._MongoClient__options self.assertEqual(options.pool_options.metadata, metadata) - metadata['platform'] = '%s|FooPlatform' % ( - _METADATA['platform'],) - client = MongoClient('foo', 27017, appname='foobar', - driver=DriverInfo('FooDriver', '1.2.3', 'FooPlatform'), connect=False) + metadata["platform"] = "%s|FooPlatform" % (_METADATA["platform"],) + client = MongoClient( + "foo", + 27017, + appname="foobar", + driver=DriverInfo("FooDriver", "1.2.3", "FooPlatform"), + connect=False, + ) options = client._MongoClient__options self.assertEqual(options.pool_options.metadata, metadata) @@ -334,12 +333,14 @@ def test_kwargs_codec_options(self): class MyFloatType(object): def __init__(self, x): self.__x = x + @property def x(self): return self.__x class MyFloatAsIntEncoder(TypeEncoder): python_type = MyFloatType + def transform_python(self, value): return int(value) @@ -347,8 +348,8 @@ def transform_python(self, value): document_class: Type[SON] = SON type_registry = TypeRegistry([MyFloatAsIntEncoder()]) tz_aware = True - uuid_representation_label = 'javaLegacy' - unicode_decode_error_handler = 'ignore' + uuid_representation_label = "javaLegacy" + unicode_decode_error_handler = "ignore" tzinfo = utc c = MongoClient( document_class=document_class, @@ -357,63 +358,62 @@ def transform_python(self, value): uuidrepresentation=uuid_representation_label, unicode_decode_error_handler=unicode_decode_error_handler, tzinfo=tzinfo, - connect=False + connect=False, ) self.assertEqual(c.codec_options.document_class, document_class) self.assertEqual(c.codec_options.type_registry, type_registry) self.assertEqual(c.codec_options.tz_aware, tz_aware) self.assertEqual( - c.codec_options.uuid_representation, - _UUID_REPRESENTATIONS[uuid_representation_label]) - self.assertEqual( - c.codec_options.unicode_decode_error_handler, - unicode_decode_error_handler) + c.codec_options.uuid_representation, _UUID_REPRESENTATIONS[uuid_representation_label] + ) + self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) self.assertEqual(c.codec_options.tzinfo, tzinfo) def test_uri_codec_options(self): # Ensure codec options are passed in correctly - uuid_representation_label = 'javaLegacy' - unicode_decode_error_handler = 'ignore' - uri = ("mongodb://%s:%d/foo?tz_aware=true&uuidrepresentation=" - "%s&unicode_decode_error_handler=%s" % ( - client_context.host, - client_context.port, - uuid_representation_label, - unicode_decode_error_handler)) + uuid_representation_label = "javaLegacy" + unicode_decode_error_handler = "ignore" + uri = ( + "mongodb://%s:%d/foo?tz_aware=true&uuidrepresentation=" + "%s&unicode_decode_error_handler=%s" + % ( + client_context.host, + client_context.port, + uuid_representation_label, + unicode_decode_error_handler, + ) + ) c = MongoClient(uri, connect=False) self.assertEqual(c.codec_options.tz_aware, True) self.assertEqual( - c.codec_options.uuid_representation, - _UUID_REPRESENTATIONS[uuid_representation_label]) - self.assertEqual( - c.codec_options.unicode_decode_error_handler, - unicode_decode_error_handler) + c.codec_options.uuid_representation, _UUID_REPRESENTATIONS[uuid_representation_label] + ) + self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) def test_uri_option_precedence(self): # Ensure kwarg options override connection string options. - uri = ("mongodb://localhost/?ssl=true&replicaSet=name" - "&readPreference=primary") - c = MongoClient(uri, ssl=False, replicaSet="newname", - readPreference="secondaryPreferred") + uri = "mongodb://localhost/?ssl=true&replicaSet=name" "&readPreference=primary" + c = MongoClient(uri, ssl=False, replicaSet="newname", readPreference="secondaryPreferred") clopts = c._MongoClient__options opts = clopts._options - self.assertEqual(opts['tls'], False) + self.assertEqual(opts["tls"], False) self.assertEqual(clopts.replica_set_name, "newname") - self.assertEqual( - clopts.read_preference, ReadPreference.SECONDARY_PREFERRED) + self.assertEqual(clopts.read_preference, ReadPreference.SECONDARY_PREFERRED) - @unittest.skipUnless( - _HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") + @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") def test_connection_timeout_ms_propagates_to_DNS_resolver(self): # Patch the resolver. from pymongo.srv_resolver import _resolve + patched_resolver = FunctionCallRecorder(_resolve) pymongo.srv_resolver._resolve = patched_resolver + def reset_resolver(): pymongo.srv_resolver._resolve = _resolve + self.addCleanup(reset_resolver) # Setup. @@ -427,7 +427,7 @@ def test_scenario(args, kwargs, expected_value): patched_resolver.reset() MongoClient(*args, **kwargs) for _, kw in patched_resolver.call_list(): - self.assertAlmostEqual(kw['lifetime'], expected_value) + self.assertAlmostEqual(kw["lifetime"], expected_value) # No timeout specified. test_scenario((base_uri,), {}, CONNECT_TIMEOUT) @@ -436,7 +436,7 @@ def test_scenario(args, kwargs, expected_value): test_scenario((uri_with_timeout,), {}, expected_uri_value) # Timeout only specified in keyword arguments. - kwarg = {'connectTimeoutMS': connectTimeoutMS} + kwarg = {"connectTimeoutMS": connectTimeoutMS} test_scenario((base_uri,), kwarg, expected_kw_value) # Timeout specified in both kwargs and connection string. @@ -445,23 +445,27 @@ def test_scenario(args, kwargs, expected_value): def test_uri_security_options(self): # Ensure that we don't silently override security-related options. with self.assertRaises(InvalidURI): - MongoClient('mongodb://localhost/?ssl=true', tls=False, - connect=False) + MongoClient("mongodb://localhost/?ssl=true", tls=False, connect=False) # Matching SSL and TLS options should not cause errors. - c = MongoClient('mongodb://localhost/?ssl=false', tls=False, - connect=False) - self.assertEqual(c._MongoClient__options._options['tls'], False) + c = MongoClient("mongodb://localhost/?ssl=false", tls=False, connect=False) + self.assertEqual(c._MongoClient__options._options["tls"], False) # Conflicting tlsInsecure options should raise an error. with self.assertRaises(InvalidURI): - MongoClient('mongodb://localhost/?tlsInsecure=true', - connect=False, tlsAllowInvalidHostnames=True) + MongoClient( + "mongodb://localhost/?tlsInsecure=true", + connect=False, + tlsAllowInvalidHostnames=True, + ) # Conflicting legacy tlsInsecure options should also raise an error. with self.assertRaises(InvalidURI): - MongoClient('mongodb://localhost/?tlsInsecure=true', - connect=False, tlsAllowInvalidCertificates=False) + MongoClient( + "mongodb://localhost/?tlsInsecure=true", + connect=False, + tlsAllowInvalidCertificates=False, + ) # Conflicting kwargs should raise InvalidURI with self.assertRaises(InvalidURI): @@ -470,11 +474,13 @@ def test_uri_security_options(self): def test_event_listeners(self): c = MongoClient(event_listeners=[], connect=False) self.assertEqual(c.options.event_listeners, []) - listeners = [event_loggers.CommandLogger(), - event_loggers.HeartbeatLogger(), - event_loggers.ServerLogger(), - event_loggers.TopologyLogger(), - event_loggers.ConnectionPoolLogger()] + listeners = [ + event_loggers.CommandLogger(), + event_loggers.HeartbeatLogger(), + event_loggers.ServerLogger(), + event_loggers.TopologyLogger(), + event_loggers.ConnectionPoolLogger(), + ] c = MongoClient(event_listeners=listeners, connect=False) self.assertEqual(c.options.event_listeners, listeners) @@ -491,16 +497,19 @@ def test_client_options(self): class TestClient(IntegrationTest): def test_multiple_uris(self): with self.assertRaises(ConfigurationError): - MongoClient(host=['mongodb+srv://cluster-a.abc12.mongodb.net', - 'mongodb+srv://cluster-b.abc12.mongodb.net', - 'mongodb+srv://cluster-c.abc12.mongodb.net']) + MongoClient( + host=[ + "mongodb+srv://cluster-a.abc12.mongodb.net", + "mongodb+srv://cluster-b.abc12.mongodb.net", + "mongodb+srv://cluster-c.abc12.mongodb.net", + ] + ) def test_max_idle_time_reaper_default(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper doesn't remove sockets when maxIdleTimeMS not set client = rs_or_single_client() - server = client._get_topology().select_server( - readable_server_selector) + server = client._get_topology().select_server(readable_server_selector) with server._pool.get_socket() as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) @@ -510,89 +519,78 @@ def test_max_idle_time_reaper_default(self): def test_max_idle_time_reaper_removes_stale_minPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper removes idle socket and replaces it with a new one - client = rs_or_single_client(maxIdleTimeMS=500, - minPoolSize=1) - server = client._get_topology().select_server( - readable_server_selector) + client = rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1) + server = client._get_topology().select_server(readable_server_selector) with server._pool.get_socket() as sock_info: pass # When the reaper runs at the same time as the get_socket, two # sockets could be created and checked into the pool. self.assertGreaterEqual(len(server._pool.sockets), 1) - wait_until(lambda: sock_info not in server._pool.sockets, - "remove stale socket") - wait_until(lambda: 1 <= len(server._pool.sockets), - "replace stale socket") + wait_until(lambda: sock_info not in server._pool.sockets, "remove stale socket") + wait_until(lambda: 1 <= len(server._pool.sockets), "replace stale socket") client.close() def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper respects maxPoolSize when adding new sockets. - client = rs_or_single_client(maxIdleTimeMS=500, - minPoolSize=1, - maxPoolSize=1) - server = client._get_topology().select_server( - readable_server_selector) + client = rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1, maxPoolSize=1) + server = client._get_topology().select_server(readable_server_selector) with server._pool.get_socket() as sock_info: pass # When the reaper runs at the same time as the get_socket, # maxPoolSize=1 should prevent two sockets from being created. self.assertEqual(1, len(server._pool.sockets)) - wait_until(lambda: sock_info not in server._pool.sockets, - "remove stale socket") - wait_until(lambda: 1 == len(server._pool.sockets), - "replace stale socket") + wait_until(lambda: sock_info not in server._pool.sockets, "remove stale socket") + wait_until(lambda: 1 == len(server._pool.sockets), "replace stale socket") client.close() def test_max_idle_time_reaper_removes_stale(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper has removed idle socket and NOT replaced it client = rs_or_single_client(maxIdleTimeMS=500) - server = client._get_topology().select_server( - readable_server_selector) + server = client._get_topology().select_server(readable_server_selector) with server._pool.get_socket() as sock_info_one: pass # Assert that the pool does not close sockets prematurely. - time.sleep(.300) + time.sleep(0.300) with server._pool.get_socket() as sock_info_two: pass self.assertIs(sock_info_one, sock_info_two) wait_until( lambda: 0 == len(server._pool.sockets), - "stale socket reaped and new one NOT added to the pool") + "stale socket reaped and new one NOT added to the pool", + ) client.close() def test_min_pool_size(self): - with client_knobs(kill_cursor_frequency=.1): + with client_knobs(kill_cursor_frequency=0.1): client = rs_or_single_client() - server = client._get_topology().select_server( - readable_server_selector) + server = client._get_topology().select_server(readable_server_selector) self.assertEqual(0, len(server._pool.sockets)) # Assert that pool started up at minPoolSize client = rs_or_single_client(minPoolSize=10) - server = client._get_topology().select_server( - readable_server_selector) - wait_until(lambda: 10 == len(server._pool.sockets), - "pool initialized with 10 sockets") + server = client._get_topology().select_server(readable_server_selector) + wait_until(lambda: 10 == len(server._pool.sockets), "pool initialized with 10 sockets") # Assert that if a socket is closed, a new one takes its place with server._pool.get_socket() as sock_info: sock_info.close_socket(None) - wait_until(lambda: 10 == len(server._pool.sockets), - "a closed socket gets replaced from the pool") + wait_until( + lambda: 10 == len(server._pool.sockets), + "a closed socket gets replaced from the pool", + ) self.assertFalse(sock_info in server._pool.sockets) def test_max_idle_time_checkout(self): # Use high frequency to test _get_socket_no_auth. with client_knobs(kill_cursor_frequency=99999999): client = rs_or_single_client(maxIdleTimeMS=500) - server = client._get_topology().select_server( - readable_server_selector) + server = client._get_topology().select_server(readable_server_selector) with server._pool.get_socket() as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) - time.sleep(1) # Sleep so that the socket becomes stale. + time.sleep(1) # Sleep so that the socket becomes stale. with server._pool.get_socket() as new_sock_info: self.assertNotEqual(sock_info, new_sock_info) @@ -602,8 +600,7 @@ def test_max_idle_time_checkout(self): # Test that sockets are reused if maxIdleTimeMS is not set. client = rs_or_single_client() - server = client._get_topology().select_server( - readable_server_selector) + server = client._get_topology().select_server(readable_server_selector) with server._pool.get_socket() as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) @@ -619,15 +616,14 @@ def test_constants(self): host, port = client_context.host, client_context.port kwargs: dict = client_context.default_client_options.copy() if client_context.auth_enabled: - kwargs['username'] = db_user - kwargs['password'] = db_pwd + kwargs["username"] = db_user + kwargs["password"] = db_pwd # Set bad defaults. MongoClient.HOST = "somedomainthatdoesntexist.org" MongoClient.PORT = 123456789 with self.assertRaises(AutoReconnect): - connected(MongoClient(serverSelectionTimeoutMS=10, - **kwargs)) + connected(MongoClient(serverSelectionTimeoutMS=10, **kwargs)) # Override the defaults. No error. connected(MongoClient(host, port, **kwargs)) @@ -660,7 +656,7 @@ def test_init_disconnected(self): self.assertIsInstance(c.topology_description, TopologyDescription) self.assertEqual(c.topology_description, c._topology._description) self.assertIsNone(c.address) # PYTHON-2981 - c.admin.command('ping') # connect + c.admin.command("ping") # connect if client_context.is_rs: # The primary's host and port are from the replica set config. self.assertIsNotNone(c.address) @@ -668,66 +664,68 @@ def test_init_disconnected(self): self.assertEqual(c.address, (host, port)) bad_host = "somedomainthatdoesntexist.org" - c = MongoClient(bad_host, port, connectTimeoutMS=1, - serverSelectionTimeoutMS=10) + c = MongoClient(bad_host, port, connectTimeoutMS=1, serverSelectionTimeoutMS=10) self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) def test_init_disconnected_with_auth(self): uri = "mongodb://user:pass@somedomainthatdoesntexist" - c = MongoClient(uri, connectTimeoutMS=1, - serverSelectionTimeoutMS=10) + c = MongoClient(uri, connectTimeoutMS=1, serverSelectionTimeoutMS=10) self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) def test_equality(self): - seed = '%s:%s' % list(self.client._topology_settings.seeds)[0] + seed = "%s:%s" % list(self.client._topology_settings.seeds)[0] c = rs_or_single_client(seed, connect=False) self.addCleanup(c.close) self.assertEqual(client_context.client, c) # Explicitly test inequality self.assertFalse(client_context.client != c) - c = rs_or_single_client('invalid.com', connect=False) + c = rs_or_single_client("invalid.com", connect=False) self.addCleanup(c.close) self.assertNotEqual(client_context.client, c) self.assertTrue(client_context.client != c) # Seeds differ: - self.assertNotEqual(MongoClient('a', connect=False), - MongoClient('b', connect=False)) + self.assertNotEqual(MongoClient("a", connect=False), MongoClient("b", connect=False)) # Same seeds but out of order still compares equal: - self.assertEqual(MongoClient(['a', 'b', 'c'], connect=False), - MongoClient(['c', 'a', 'b'], connect=False)) + self.assertEqual( + MongoClient(["a", "b", "c"], connect=False), MongoClient(["c", "a", "b"], connect=False) + ) def test_hashable(self): - seed = '%s:%s' % list(self.client._topology_settings.seeds)[0] + seed = "%s:%s" % list(self.client._topology_settings.seeds)[0] c = rs_or_single_client(seed, connect=False) self.addCleanup(c.close) self.assertIn(c, {client_context.client}) - c = rs_or_single_client('invalid.com', connect=False) + c = rs_or_single_client("invalid.com", connect=False) self.addCleanup(c.close) self.assertNotIn(c, {client_context.client}) def test_host_w_port(self): with self.assertRaises(ValueError): - connected(MongoClient("%s:1234567" % (client_context.host,), - connectTimeoutMS=1, - serverSelectionTimeoutMS=10)) + connected( + MongoClient( + "%s:1234567" % (client_context.host,), + connectTimeoutMS=1, + serverSelectionTimeoutMS=10, + ) + ) def test_repr(self): # Used to test 'eval' below. import bson client = MongoClient( - 'mongodb://localhost:27017,localhost:27018/?replicaSet=replset' - '&connectTimeoutMS=12345&w=1&wtimeoutms=100', - connect=False, document_class=SON) + "mongodb://localhost:27017,localhost:27018/?replicaSet=replset" + "&connectTimeoutMS=12345&w=1&wtimeoutms=100", + connect=False, + document_class=SON, + ) the_repr = repr(client) - self.assertIn('MongoClient(host=', the_repr) + self.assertIn("MongoClient(host=", the_repr) self.assertIn( - "document_class=bson.son.SON, " - "tz_aware=False, " - "connect=False, ", - the_repr) + "document_class=bson.son.SON, " "tz_aware=False, " "connect=False, ", the_repr + ) self.assertIn("connecttimeoutms=12345", the_repr) self.assertIn("replicaset='replset'", the_repr) self.assertIn("w=1", the_repr) @@ -735,20 +733,18 @@ def test_repr(self): self.assertEqual(eval(the_repr), client) - client = MongoClient("localhost:27017,localhost:27018", - replicaSet='replset', - connectTimeoutMS=12345, - socketTimeoutMS=None, - w=1, - wtimeoutms=100, - connect=False) + client = MongoClient( + "localhost:27017,localhost:27018", + replicaSet="replset", + connectTimeoutMS=12345, + socketTimeoutMS=None, + w=1, + wtimeoutms=100, + connect=False, + ) the_repr = repr(client) - self.assertIn('MongoClient(host=', the_repr) - self.assertIn( - "document_class=dict, " - "tz_aware=False, " - "connect=False, ", - the_repr) + self.assertIn("MongoClient(host=", the_repr) + self.assertIn("document_class=dict, " "tz_aware=False, " "connect=False, ", the_repr) self.assertIn("connecttimeoutms=12345", the_repr) self.assertIn("replicaset='replset'", the_repr) self.assertIn("sockettimeoutms=None", the_repr) @@ -758,11 +754,10 @@ def test_repr(self): self.assertEqual(eval(the_repr), client) def test_getters(self): - wait_until(lambda: client_context.nodes == self.client.nodes, - "find all nodes") + wait_until(lambda: client_context.nodes == self.client.nodes, "find all nodes") def test_list_databases(self): - cmd_docs = self.client.admin.command('listDatabases')['databases'] + cmd_docs = self.client.admin.command("listDatabases")["databases"] cursor = self.client.list_databases() self.assertIsInstance(cursor, CommandCursor) helper_docs = list(cursor) @@ -809,7 +804,7 @@ def test_drop_database(self): if client_context.is_rs: wc_client = rs_or_single_client(w=len(client_context.nodes) + 1) with self.assertRaises(WriteConcernError): - wc_client.drop_database('pymongo_test2') + wc_client.drop_database("pymongo_test2") self.client.drop_database(self.client.pymongo_test2) dbs = self.client.list_database_names() @@ -823,7 +818,7 @@ def test_close(self): self.assertRaises(InvalidOperation, coll.count_documents, {}) def test_close_kills_cursors(self): - if sys.platform.startswith('java'): + if sys.platform.startswith("java"): # We can't figure out how to make this test reliable with Jython. raise SkipTest("Can't test with Jython") test_client = rs_or_single_client() @@ -868,7 +863,7 @@ def test_close_stops_kill_cursors_thread(self): self.assertTrue(client._kill_cursors_executor._stopped) # Reusing the closed client should raise an InvalidOperation error. - self.assertRaises(InvalidOperation, client.admin.command, 'ping') + self.assertRaises(InvalidOperation, client.admin.command, "ping") # Thread is still stopped. self.assertTrue(client._kill_cursors_executor._stopped) @@ -882,7 +877,7 @@ def test_uri_connect_option(self): self.assertFalse(kc_thread and kc_thread.is_alive()) # Using the client should open topology and start the thread. - client.admin.command('ping') + client.admin.command("ping") self.assertTrue(client._topology._opened) kc_thread = client._kill_cursors_executor._thread self.assertTrue(kc_thread and kc_thread.is_alive()) @@ -921,16 +916,13 @@ def test_auth_from_uri(self): self.addCleanup(client_context.drop_user, "admin", "admin") self.addCleanup(remove_all_users, self.client.pymongo_test) - client_context.create_user( - "pymongo_test", "user", "pass", roles=['userAdmin', 'readWrite']) + client_context.create_user("pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"]) with self.assertRaises(OperationFailure): - connected(rs_or_single_client_noauth( - "mongodb://a:b@%s:%d" % (host, port))) + connected(rs_or_single_client_noauth("mongodb://a:b@%s:%d" % (host, port))) # No error. - connected(rs_or_single_client_noauth( - "mongodb://admin:pass@%s:%d" % (host, port))) + connected(rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port))) # Wrong database. uri = "mongodb://admin:pass@%s:%d/pymongo_test" % (host, port) @@ -938,21 +930,21 @@ def test_auth_from_uri(self): connected(rs_or_single_client_noauth(uri)) # No error. - connected(rs_or_single_client_noauth( - "mongodb://user:pass@%s:%d/pymongo_test" % (host, port))) + connected( + rs_or_single_client_noauth("mongodb://user:pass@%s:%d/pymongo_test" % (host, port)) + ) # Auth with lazy connection. rs_or_single_client_noauth( - "mongodb://user:pass@%s:%d/pymongo_test" % (host, port), - connect=False).pymongo_test.test.find_one() + "mongodb://user:pass@%s:%d/pymongo_test" % (host, port), connect=False + ).pymongo_test.test.find_one() # Wrong password. bad_client = rs_or_single_client_noauth( - "mongodb://user:wrong@%s:%d/pymongo_test" % (host, port), - connect=False) + "mongodb://user:wrong@%s:%d/pymongo_test" % (host, port), connect=False + ) - self.assertRaises(OperationFailure, - bad_client.pymongo_test.test.find_one) + self.assertRaises(OperationFailure, bad_client.pymongo_test.test.find_one) @client_context.require_auth def test_username_and_password(self): @@ -971,26 +963,23 @@ def test_username_and_password(self): c.server_info() with self.assertRaises(OperationFailure): - rs_or_single_client_noauth( - username="ad min", password="foo").server_info() + rs_or_single_client_noauth(username="ad min", password="foo").server_info() @client_context.require_auth def test_lazy_auth_raises_operation_failure(self): lazy_client = rs_or_single_client_noauth( - "mongodb://user:wrong@%s/pymongo_test" % (client_context.host,), - connect=False) + "mongodb://user:wrong@%s/pymongo_test" % (client_context.host,), connect=False + ) - assertRaisesExactly( - OperationFailure, lazy_client.test.collection.find_one) + assertRaisesExactly(OperationFailure, lazy_client.test.collection.find_one) @client_context.require_no_tls def test_unix_socket(self): if not hasattr(socket, "AF_UNIX"): raise SkipTest("UNIX-sockets are not supported on this system") - mongodb_socket = '/tmp/mongodb-%d.sock' % (client_context.port,) - encoded_socket = ( - '%2Ftmp%2F' + 'mongodb-%d.sock' % (client_context.port,)) + mongodb_socket = "/tmp/mongodb-%d.sock" % (client_context.port,) + encoded_socket = "%2Ftmp%2F" + "mongodb-%d.sock" % (client_context.port,) if not os.access(mongodb_socket, os.R_OK): raise SkipTest("Socket file is not accessible") @@ -1006,8 +995,9 @@ def test_unix_socket(self): # Confirm it fails with a missing socket. self.assertRaises( ConnectionFailure, - connected, MongoClient("mongodb://%2Ftmp%2Fnon-existent.sock", - serverSelectionTimeoutMS=100)) + connected, + MongoClient("mongodb://%2Ftmp%2Fnon-existent.sock", serverSelectionTimeoutMS=100), + ) def test_document_class(self): c = self.client @@ -1029,7 +1019,8 @@ def test_timeouts(self): connectTimeoutMS=10500, socketTimeoutMS=10500, maxIdleTimeMS=10500, - serverSelectionTimeoutMS=10500) + serverSelectionTimeoutMS=10500, + ) self.assertEqual(10.5, get_pool(client).opts.connect_timeout) self.assertEqual(10.5, get_pool(client).opts.socket_timeout) self.assertEqual(10.5, get_pool(client).opts.max_idle_time_seconds) @@ -1046,14 +1037,11 @@ def test_socket_timeout_ms_validation(self): c = connected(rs_or_single_client(socketTimeoutMS=0)) self.assertEqual(None, get_pool(c).opts.socket_timeout) - self.assertRaises(ValueError, - rs_or_single_client, socketTimeoutMS=-1) + self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=-1) - self.assertRaises(ValueError, - rs_or_single_client, socketTimeoutMS=1e10) + self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=1e10) - self.assertRaises(ValueError, - rs_or_single_client, socketTimeoutMS='foo') + self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS="foo") def test_socket_timeout(self): no_timeout = self.client @@ -1069,6 +1057,7 @@ def test_socket_timeout(self): def get_x(db): doc = next(db.test.find().where(where_func)) return doc["x"] + self.assertEqual(1, get_x(no_timeout.pymongo_test)) self.assertRaises(NetworkTimeout, get_x, timeout.pymongo_test) @@ -1079,28 +1068,23 @@ def test_server_selection_timeout(self): client = MongoClient(serverSelectionTimeoutMS=0, connect=False) self.assertAlmostEqual(0, client.options.server_selection_timeout) - self.assertRaises(ValueError, MongoClient, - serverSelectionTimeoutMS="foo", connect=False) - self.assertRaises(ValueError, MongoClient, - serverSelectionTimeoutMS=-1, connect=False) - self.assertRaises(ConfigurationError, MongoClient, - serverSelectionTimeoutMS=None, connect=False) + self.assertRaises(ValueError, MongoClient, serverSelectionTimeoutMS="foo", connect=False) + self.assertRaises(ValueError, MongoClient, serverSelectionTimeoutMS=-1, connect=False) + self.assertRaises( + ConfigurationError, MongoClient, serverSelectionTimeoutMS=None, connect=False + ) - client = MongoClient( - 'mongodb://localhost/?serverSelectionTimeoutMS=100', connect=False) + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=100", connect=False) self.assertAlmostEqual(0.1, client.options.server_selection_timeout) - client = MongoClient( - 'mongodb://localhost/?serverSelectionTimeoutMS=0', connect=False) + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=0", connect=False) self.assertAlmostEqual(0, client.options.server_selection_timeout) # Test invalid timeout in URI ignored and set to default. - client = MongoClient( - 'mongodb://localhost/?serverSelectionTimeoutMS=-1', connect=False) + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=-1", connect=False) self.assertAlmostEqual(30, client.options.server_selection_timeout) - client = MongoClient( - 'mongodb://localhost/?serverSelectionTimeoutMS=', connect=False) + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=", connect=False) self.assertAlmostEqual(30, client.options.server_selection_timeout) def test_waitQueueTimeoutMS(self): @@ -1110,13 +1094,12 @@ def test_waitQueueTimeoutMS(self): def test_socketKeepAlive(self): pool = get_pool(self.client) with pool.get_socket() as sock_info: - keepalive = sock_info.sock.getsockopt(socket.SOL_SOCKET, - socket.SO_KEEPALIVE) + keepalive = sock_info.sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) self.assertTrue(keepalive) @no_type_check def test_tz_aware(self): - self.assertRaises(ValueError, MongoClient, tz_aware='foo') + self.assertRaises(ValueError, MongoClient, tz_aware="foo") aware = rs_or_single_client(tz_aware=True) naive = self.client @@ -1129,7 +1112,8 @@ def test_tz_aware(self): self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo) self.assertEqual( aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None), - naive.pymongo_test.test.find_one()["x"]) + naive.pymongo_test.test.find_one()["x"], + ) @client_context.require_ipv6 def test_ipv6(self): @@ -1144,7 +1128,7 @@ def test_ipv6(self): uri = "mongodb://%s[::1]:%d" % (auth_str, client_context.port) if client_context.is_rs: - uri += '/?replicaSet=' + (client_context.replica_set_name or "") + uri += "/?replicaSet=" + (client_context.replica_set_name or "") client = rs_or_single_client_noauth(uri) client.pymongo_test.test.insert_one({"dummy": "object"}) @@ -1174,7 +1158,7 @@ def test_contextlib(self): client.pymongo_test.test.find_one() def test_interrupt_signal(self): - if sys.platform.startswith('java'): + if sys.platform.startswith("java"): # We can't figure out how to raise an exception on a thread that's # blocked on a socket, whether that's the main thread or a worker, # without simply killing the whole thread in Jython. This suggests @@ -1191,8 +1175,8 @@ def test_interrupt_signal(self): where = delay(1.5) # Need exactly 1 document so find() will execute its $where clause once - db.drop_collection('foo') - db.foo.insert_one({'_id': 1}) + db.drop_collection("foo") + db.foo.insert_one({"_id": 1}) old_signal_handler = None try: @@ -1203,7 +1187,8 @@ def test_interrupt_signal(self): # sock.recv(): TypeError: 'int' object is not callable # We don't know what causes this, so we hack around it. - if sys.platform == 'win32': + if sys.platform == "win32": + def interrupter(): # Raises KeyboardInterrupt in the main thread time.sleep(0.25) @@ -1222,7 +1207,7 @@ def sigalarm(num, frame): raised = False try: # Will be interrupted by a KeyboardInterrupt. - next(db.foo.find({'$where': where})) + next(db.foo.find({"$where": where})) except KeyboardInterrupt: raised = True @@ -1233,10 +1218,7 @@ def sigalarm(num, frame): # Raises AssertionError due to PYTHON-294 -- Mongo's response to # the previous find() is still waiting to be read on the socket, # so the request id's don't match. - self.assertEqual( - {'_id': 1}, - next(db.foo.find()) - ) + self.assertEqual({"_id": 1}, next(db.foo.find())) finally: if old_signal_handler: signal.signal(signal.SIGALRM, old_signal_handler) @@ -1253,10 +1235,8 @@ def test_operation_failure(self): self.assertGreaterEqual(socket_count, 1) old_sock_info = next(iter(pool.sockets)) client.pymongo_test.test.drop() - client.pymongo_test.test.insert_one({'_id': 'foo'}) - self.assertRaises( - OperationFailure, - client.pymongo_test.test.insert_one, {'_id': 'foo'}) + client.pymongo_test.test.insert_one({"_id": "foo"}) + self.assertRaises(OperationFailure, client.pymongo_test.test.insert_one, {"_id": "foo"}) self.assertEqual(socket_count, len(pool.sockets)) new_sock_info = next(iter(pool.sockets)) @@ -1268,27 +1248,26 @@ def test_lazy_connect_w0(self): # Use a separate collection to avoid races where we're still # completing an operation on a collection while the next test begins. - client_context.client.drop_database('test_lazy_connect_w0') - self.addCleanup( - client_context.client.drop_database, 'test_lazy_connect_w0') + client_context.client.drop_database("test_lazy_connect_w0") + self.addCleanup(client_context.client.drop_database, "test_lazy_connect_w0") client = rs_or_single_client(connect=False, w=0) client.test_lazy_connect_w0.test.insert_one({}) wait_until( - lambda: client.test_lazy_connect_w0.test.count_documents({}) == 1, - "find one document") + lambda: client.test_lazy_connect_w0.test.count_documents({}) == 1, "find one document" + ) client = rs_or_single_client(connect=False, w=0) - client.test_lazy_connect_w0.test.update_one({}, {'$set': {'x': 1}}) + client.test_lazy_connect_w0.test.update_one({}, {"$set": {"x": 1}}) wait_until( - lambda: client.test_lazy_connect_w0.test.find_one().get('x') == 1, - "update one document") + lambda: client.test_lazy_connect_w0.test.find_one().get("x") == 1, "update one document" + ) client = rs_or_single_client(connect=False, w=0) client.test_lazy_connect_w0.test.delete_one({}) wait_until( - lambda: client.test_lazy_connect_w0.test.count_documents({}) == 0, - "delete one document") + lambda: client.test_lazy_connect_w0.test.count_documents({}) == 0, "delete one document" + ) @client_context.require_no_mongos def test_exhaust_network_error(self): @@ -1320,9 +1299,7 @@ def test_auth_network_error(self): # when authenticating a new socket with cached credentials. # Get a client with one socket so we detect if it's leaked. - c = connected(rs_or_single_client(maxPoolSize=1, - waitQueueTimeoutMS=1, - retryReads=False)) + c = connected(rs_or_single_client(maxPoolSize=1, waitQueueTimeoutMS=1, retryReads=False)) # Cause a network error on the actual socket. pool = get_pool(c) @@ -1338,8 +1315,7 @@ def test_auth_network_error(self): @client_context.require_no_replica_set def test_connect_to_standalone_using_replica_set_name(self): - client = single_client(replicaSet='anything', - serverSelectionTimeoutMS=100) + client = single_client(replicaSet="anything", serverSelectionTimeoutMS=100) with self.assertRaises(AutoReconnect): client.test.test.find_one() @@ -1350,16 +1326,24 @@ def test_stale_getmore(self): # the topology before the getMore message is sent. Test that # MongoClient._run_operation_with_response handles the error. with self.assertRaises(AutoReconnect): - client = rs_client(connect=False, - serverSelectionTimeoutMS=100) + client = rs_client(connect=False, serverSelectionTimeoutMS=100) client._run_operation( - operation=message._GetMore('pymongo_test', 'collection', - 101, 1234, client.codec_options, - ReadPreference.PRIMARY, - None, client, None, None, False), - unpack_res=Cursor( - client.pymongo_test.collection)._unpack_response, - address=('not-a-member', 27017)) + operation=message._GetMore( + "pymongo_test", + "collection", + 101, + 1234, + client.codec_options, + ReadPreference.PRIMARY, + None, + client, + None, + None, + False, + ), + unpack_res=Cursor(client.pymongo_test.collection)._unpack_response, + address=("not-a-member", 27017), + ) def test_heartbeat_frequency_ms(self): class HeartbeatStartedListener(ServerHeartbeatListener): @@ -1386,15 +1370,17 @@ def init(self, *args): ServerHeartbeatStartedEvent.__init__ = init # type: ignore listener = HeartbeatStartedListener() uri = "mongodb://%s:%d/?heartbeatFrequencyMS=500" % ( - client_context.host, client_context.port) + client_context.host, + client_context.port, + ) client = single_client(uri, event_listeners=[listener]) - wait_until(lambda: len(listener.results) >= 2, - "record two ServerHeartbeatStartedEvents") + wait_until( + lambda: len(listener.results) >= 2, "record two ServerHeartbeatStartedEvents" + ) # Default heartbeatFrequencyMS is 10 sec. Check the interval was # closer to 0.5 sec with heartbeatFrequencyMS configured. - self.assertAlmostEqual( - heartbeat_times[1] - heartbeat_times[0], 0.5, delta=2) + self.assertAlmostEqual(heartbeat_times[1] - heartbeat_times[0], 0.5, delta=2) client.close() finally: @@ -1405,7 +1391,7 @@ def test_small_heartbeat_frequency_ms(self): with self.assertRaises(ConfigurationError) as context: MongoClient(uri) - self.assertIn('heartbeatFrequencyMS', str(context.exception)) + self.assertIn("heartbeatFrequencyMS", str(context.exception)) def test_compression(self): def compression_settings(client): @@ -1415,16 +1401,16 @@ def compression_settings(client): uri = "mongodb://localhost:27017/?compressors=zlib" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=4" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, 4) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-1" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017" client = MongoClient(uri, connect=False) @@ -1439,7 +1425,7 @@ def compression_settings(client): uri = "mongodb://localhost:27017/?compressors=foobar,zlib" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) # According to the connection string spec, unsupported values @@ -1447,12 +1433,12 @@ def compression_settings(client): uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=10" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-2" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) if not _HAVE_SNAPPY: @@ -1464,11 +1450,11 @@ def compression_settings(client): uri = "mongodb://localhost:27017/?compressors=snappy" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['snappy']) + self.assertEqual(opts.compressors, ["snappy"]) uri = "mongodb://localhost:27017/?compressors=snappy,zlib" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['snappy', 'zlib']) + self.assertEqual(opts.compressors, ["snappy", "zlib"]) if not _HAVE_ZSTD: uri = "mongodb://localhost:27017/?compressors=zstd" @@ -1479,11 +1465,11 @@ def compression_settings(client): uri = "mongodb://localhost:27017/?compressors=zstd" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zstd']) + self.assertEqual(opts.compressors, ["zstd"]) uri = "mongodb://localhost:27017/?compressors=zstd,zlib" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zstd', 'zlib']) + self.assertEqual(opts.compressors, ["zstd", "zlib"]) options = client_context.default_client_options if "compressors" in options and "zlib" in options["compressors"]: @@ -1495,7 +1481,7 @@ def compression_settings(client): def test_reset_during_update_pool(self): client = rs_or_single_client(minPoolSize=10) self.addCleanup(client.close) - client.admin.command('ping') + client.admin.command("ping") pool = get_pool(client) generation = pool.gen.get_overall() @@ -1511,9 +1497,8 @@ def stop(self): def run(self): while self.running: - exc = AutoReconnect('mock pool error') - ctx = _ErrorContext( - exc, 0, pool.gen.get_overall(), False, None) + exc = AutoReconnect("mock pool error") + ctx = _ErrorContext(exc, 0, pool.gen.get_overall(), False, None) client._topology.handle_error(pool.address, ctx) time.sleep(0.001) @@ -1531,17 +1516,17 @@ def run(self): finally: t.stop() t.join() - client.admin.command('ping') + client.admin.command("ping") def test_background_connections_do_not_hold_locks(self): min_pool_size = 10 client = rs_or_single_client( - serverSelectionTimeoutMS=3000, minPoolSize=min_pool_size, - connect=False) + serverSelectionTimeoutMS=3000, minPoolSize=min_pool_size, connect=False + ) self.addCleanup(client.close) # Create a single connection in the pool. - client.admin.command('ping') + client.admin.command("ping") # Cause new connections stall for a few seconds. pool = get_pool(client) @@ -1553,15 +1538,15 @@ def stall_connect(*args, **kwargs): pool.connect = stall_connect # Un-patch Pool.connect to break the cyclic reference. - self.addCleanup(delattr, pool, 'connect') + self.addCleanup(delattr, pool, "connect") # Wait for the background thread to start creating connections - wait_until(lambda: len(pool.sockets) > 1, 'start creating connections') + wait_until(lambda: len(pool.sockets) > 1, "start creating connections") # Assert that application operations do not block. for _ in range(10): start = time.monotonic() - client.admin.command('ping') + client.admin.command("ping") total = time.monotonic() - start # Each ping command should not take more than 2 seconds self.assertLess(total, 2) @@ -1570,28 +1555,27 @@ def stall_connect(*args, **kwargs): def test_direct_connection(self): # direct_connection=True should result in Single topology. client = rs_or_single_client(directConnection=True) - client.admin.command('ping') + client.admin.command("ping") self.assertEqual(len(client.nodes), 1) - self.assertEqual(client._topology_settings.get_topology_type(), - TOPOLOGY_TYPE.Single) + self.assertEqual(client._topology_settings.get_topology_type(), TOPOLOGY_TYPE.Single) client.close() # direct_connection=False should result in RS topology. client = rs_or_single_client(directConnection=False) - client.admin.command('ping') + client.admin.command("ping") self.assertGreaterEqual(len(client.nodes), 1) - self.assertIn(client._topology_settings.get_topology_type(), - [TOPOLOGY_TYPE.ReplicaSetNoPrimary, - TOPOLOGY_TYPE.ReplicaSetWithPrimary]) + self.assertIn( + client._topology_settings.get_topology_type(), + [TOPOLOGY_TYPE.ReplicaSetNoPrimary, TOPOLOGY_TYPE.ReplicaSetWithPrimary], + ) client.close() # directConnection=True, should error with multiple hosts as a list. with self.assertRaises(ConfigurationError): - MongoClient(['host1', 'host2'], directConnection=True) + MongoClient(["host1", "host2"], directConnection=True) - @unittest.skipIf(sys.platform.startswith('java'), - 'Jython does not support gc.get_objects') - @unittest.skipIf('PyPy' in sys.version, 'PYTHON-2927 fails often on PyPy') + @unittest.skipIf(sys.platform.startswith("java"), "Jython does not support gc.get_objects") + @unittest.skipIf("PyPy" in sys.version, "PYTHON-2927 fails often on PyPy") def test_continuous_network_errors(self): def server_description_count(): i = 0 @@ -1602,12 +1586,12 @@ def server_description_count(): except ReferenceError: pass return i + gc.collect() with client_knobs(min_heartbeat_interval=0.003): client = MongoClient( - 'invalid:27017', - heartbeatFrequencyMS=3, - serverSelectionTimeoutMS=100) + "invalid:27017", heartbeatFrequencyMS=3, serverSelectionTimeoutMS=100 + ) initial_count = server_description_count() self.addCleanup(client.close) with self.assertRaises(ServerSelectionTimeoutError): @@ -1623,15 +1607,15 @@ def server_description_count(): def test_network_error_message(self): client = single_client(retryReads=False) self.addCleanup(client.close) - client.admin.command('ping') # connect - with self.fail_point({'mode': {'times': 1}, - 'data': {'closeConnection': True, - 'failCommands': ['find']}}): - expected = '%s:%s: ' % client.address + client.admin.command("ping") # connect + with self.fail_point( + {"mode": {"times": 1}, "data": {"closeConnection": True, "failCommands": ["find"]}} + ): + expected = "%s:%s: " % client.address with self.assertRaisesRegex(AutoReconnect, expected): client.pymongo_test.test.find_one({}) - @unittest.skipIf('PyPy' in sys.version, 'PYTHON-2938 could fail on PyPy') + @unittest.skipIf("PyPy" in sys.version, "PYTHON-2938 could fail on PyPy") def test_process_periodic_tasks(self): client = rs_or_single_client() coll = client.db.collection @@ -1643,49 +1627,45 @@ def test_process_periodic_tasks(self): client.close() # Add cursor to kill cursors queue del cursor - wait_until(lambda: client._MongoClient__kill_cursors_queue, - "waited for cursor to be added to queue") + wait_until( + lambda: client._MongoClient__kill_cursors_queue, + "waited for cursor to be added to queue", + ) client._process_periodic_tasks() # This must not raise or print any exceptions with self.assertRaises(InvalidOperation): coll.insert_many([{} for _ in range(5)]) - @unittest.skipUnless( - _HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") + @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") def test_service_name_from_kwargs(self): client = MongoClient( - 'mongodb+srv://user:password@test22.test.build.10gen.cc', - srvServiceName='customname', connect=False) - self.assertEqual(client._topology_settings.srv_service_name, - 'customname') + "mongodb+srv://user:password@test22.test.build.10gen.cc", + srvServiceName="customname", + connect=False, + ) + self.assertEqual(client._topology_settings.srv_service_name, "customname") client = MongoClient( - 'mongodb+srv://user:password@test22.test.build.10gen.cc' - '/?srvServiceName=shouldbeoverriden', - srvServiceName='customname', connect=False) - self.assertEqual(client._topology_settings.srv_service_name, - 'customname') + "mongodb+srv://user:password@test22.test.build.10gen.cc" + "/?srvServiceName=shouldbeoverriden", + srvServiceName="customname", + connect=False, + ) + self.assertEqual(client._topology_settings.srv_service_name, "customname") client = MongoClient( - 'mongodb+srv://user:password@test22.test.build.10gen.cc' - '/?srvServiceName=customname', - connect=False) - self.assertEqual(client._topology_settings.srv_service_name, - 'customname') - - @unittest.skipUnless( - _HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") + "mongodb+srv://user:password@test22.test.build.10gen.cc" "/?srvServiceName=customname", + connect=False, + ) + self.assertEqual(client._topology_settings.srv_service_name, "customname") + + @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") def test_srv_max_hosts_kwarg(self): + client = MongoClient("mongodb+srv://test1.test.build.10gen.cc/") + self.assertGreater(len(client.topology_description.server_descriptions()), 1) + client = MongoClient("mongodb+srv://test1.test.build.10gen.cc/", srvmaxhosts=1) + self.assertEqual(len(client.topology_description.server_descriptions()), 1) client = MongoClient( - 'mongodb+srv://test1.test.build.10gen.cc/') - self.assertGreater( - len(client.topology_description.server_descriptions()), 1) - client = MongoClient( - 'mongodb+srv://test1.test.build.10gen.cc/', srvmaxhosts=1) - self.assertEqual( - len(client.topology_description.server_descriptions()), 1) - client = MongoClient( - 'mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1', - srvmaxhosts=2) - self.assertEqual( - len(client.topology_description.server_descriptions()), 2) + "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1", srvmaxhosts=2 + ) + self.assertEqual(len(client.topology_description.server_descriptions()), 2) class TestExhaustCursor(IntegrationTest): @@ -1708,8 +1688,8 @@ def test_exhaust_query_server_error(self): # This will cause OperationFailure in all mongo versions since # the value for $orderby must be a document. cursor = collection.find( - SON([('$query', {}), ('$orderby', True)]), - cursor_type=CursorType.EXHAUST) + SON([("$query", {}), ("$orderby", True)]), cursor_type=CursorType.EXHAUST + ) self.assertRaises(OperationFailure, cursor.next) self.assertFalse(sock_info.closed) @@ -1743,8 +1723,8 @@ def receive_message(request_id): SocketInfo.receive_message(sock_info, request_id) # responseFlags bit 1 is QueryFailure. - msg = struct.pack('= count, - 'find %s %s event(s)' % (count, event), timeout=timeout) + event = OBJECT_TYPES[op["event"]] + count = op["count"] + timeout = op.get("timeout", 10000) / 1000.0 + wait_until( + lambda: self.listener.event_count(event) >= count, + "find %s %s event(s)" % (count, event), + timeout=timeout, + ) def check_out(self, op): """Run the 'checkOut' operation.""" - label = op['label'] + label = op["label"] with self.pool.get_socket() as sock_info: # Call 'pin_cursor' so we can hold the socket. sock_info.pin_cursor() @@ -130,7 +130,7 @@ def check_out(self, op): def check_in(self, op): """Run the 'checkIn' operation.""" - label = op['connection'] + label = op["connection"] sock_info = self.labels[label] self.pool.return_socket(sock_info) @@ -148,8 +148,8 @@ def close(self, op): def run_operation(self, op): """Run a single operation in a test.""" - op_name = camel_to_snake(op['name']) - thread = op['thread'] + op_name = camel_to_snake(op["name"]) + thread = op["thread"] meth = getattr(self, op_name) if thread: self.targets[thread].schedule(lambda: meth(op)) @@ -164,9 +164,9 @@ def run_operations(self, ops): def check_object(self, actual, expected): """Assert that the actual object matches the expected object.""" - self.assertEqual(type(actual), OBJECT_TYPES[expected['type']]) + self.assertEqual(type(actual), OBJECT_TYPES[expected["type"]]) for attr, expected_val in expected.items(): - if attr == 'type': + if attr == "type": continue c2s = camel_to_snake(attr) actual_val = getattr(actual, c2s) @@ -182,62 +182,60 @@ def check_event(self, actual, expected): def actual_events(self, ignore): """Return all the non-ignored events.""" ignore = tuple(OBJECT_TYPES[name] for name in ignore) - return [event for event in self.listener.events - if not isinstance(event, ignore)] + return [event for event in self.listener.events if not isinstance(event, ignore)] def check_events(self, events, ignore): """Check the events of a test.""" actual_events = self.actual_events(ignore) for actual, expected in zip(actual_events, events): - self.logs.append('Checking event actual: %r vs expected: %r' % ( - actual, expected)) + self.logs.append("Checking event actual: %r vs expected: %r" % (actual, expected)) self.check_event(actual, expected) if len(events) > len(actual_events): - self.fail('missing events: %r' % (events[len(actual_events):],)) + self.fail("missing events: %r" % (events[len(actual_events) :],)) def check_error(self, actual, expected): - message = expected.pop('message') + message = expected.pop("message") self.check_object(actual, expected) self.assertIn(message, str(actual)) def _set_fail_point(self, client, command_args): - cmd = SON([('configureFailPoint', 'failCommand')]) + cmd = SON([("configureFailPoint", "failCommand")]) cmd.update(command_args) client.admin.command(cmd) def set_fail_point(self, command_args): if not client_context.supports_failCommand_fail_point: - self.skipTest('failCommand fail point must be supported') + self.skipTest("failCommand fail point must be supported") self._set_fail_point(self.client, command_args) def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" self.logs: list = [] - self.assertEqual(scenario_def['version'], 1) - self.assertIn(scenario_def['style'], ['unit', 'integration']) + self.assertEqual(scenario_def["version"], 1) + self.assertIn(scenario_def["style"], ["unit", "integration"]) self.listener = CMAPListener() self._ops: list = [] # Configure the fail point before creating the client. - if 'failPoint' in test: - fp = test['failPoint'] + if "failPoint" in test: + fp = test["failPoint"] self.set_fail_point(fp) - self.addCleanup(self.set_fail_point, { - 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off'}) - - opts = test['poolOptions'].copy() - opts['event_listeners'] = [self.listener] - opts['_monitor_class'] = DummyMonitor - opts['connect'] = False + self.addCleanup( + self.set_fail_point, {"configureFailPoint": fp["configureFailPoint"], "mode": "off"} + ) + + opts = test["poolOptions"].copy() + opts["event_listeners"] = [self.listener] + opts["_monitor_class"] = DummyMonitor + opts["connect"] = False # Support backgroundThreadIntervalMS, default to 50ms. - interval = opts.pop('backgroundThreadIntervalMS', 50) + interval = opts.pop("backgroundThreadIntervalMS", 50) if interval < 0: kill_cursor_frequency = 99999999 else: - kill_cursor_frequency = interval/1000.0 - with client_knobs(kill_cursor_frequency=kill_cursor_frequency, - min_heartbeat_interval=.05): + kill_cursor_frequency = interval / 1000.0 + with client_knobs(kill_cursor_frequency=kill_cursor_frequency, min_heartbeat_interval=0.05): client = single_client(**opts) # Update the SD to a known type because the DummyMonitor will not. # Note we cannot simply call topology.on_change because that would @@ -245,10 +243,10 @@ def run_scenario(self, scenario_def, test): # PoolReadyEvents. Instead, update the initial state before # opening the Topology. td = client_context.client._topology.description - sd = td.server_descriptions()[(client_context.host, - client_context.port)] + sd = td.server_descriptions()[(client_context.host, client_context.port)] client._topology._description = updated_topology_description( - client._topology._description, sd) + client._topology._description, sd + ) # When backgroundThreadIntervalMS is negative we do not start the # background thread to ensure it never runs. if interval < 0: @@ -274,37 +272,37 @@ def cleanup(): self.addCleanup(cleanup) try: - if test['error']: + if test["error"]: with self.assertRaises(PyMongoError) as ctx: - self.run_operations(test['operations']) - self.check_error(ctx.exception, test['error']) + self.run_operations(test["operations"]) + self.check_error(ctx.exception, test["error"]) else: - self.run_operations(test['operations']) + self.run_operations(test["operations"]) - self.check_events(test['events'], test['ignore']) + self.check_events(test["events"], test["ignore"]) except Exception: # Print the events after a test failure. - print('\nFailed test: %r' % (test['description'],)) - print('Operations:') + print("\nFailed test: %r" % (test["description"],)) + print("Operations:") for op in self._ops: print(op) - print('Threads:') + print("Threads:") print(self.targets) - print('Connections:') + print("Connections:") print(self.labels) - print('Events:') + print("Events:") for event in self.listener.events: print(event) - print('Log:') + print("Log:") for log in self.logs: print(log) raise POOL_OPTIONS = { - 'maxPoolSize': 50, - 'minPoolSize': 1, - 'maxIdleTimeMS': 10000, - 'waitQueueTimeoutMS': 10000 + "maxPoolSize": 50, + "minPoolSize": 1, + "maxIdleTimeMS": 10000, + "waitQueueTimeoutMS": 10000, } # @@ -319,11 +317,10 @@ def test_1_client_connection_pool_options(self): def test_2_all_client_pools_have_same_options(self): client = rs_or_single_client(**self.POOL_OPTIONS) self.addCleanup(client.close) - client.admin.command('ping') + client.admin.command("ping") # Discover at least one secondary. if client_context.has_secondaries: - client.admin.command( - 'ping', read_preference=ReadPreference.SECONDARY) + client.admin.command("ping", read_preference=ReadPreference.SECONDARY) pools = get_pools(client) pool_opts = pools[0].opts @@ -332,9 +329,8 @@ def test_2_all_client_pools_have_same_options(self): self.assertEqual(pool.opts, pool_opts) def test_3_uri_connection_pool_options(self): - opts = '&'.join(['%s=%s' % (k, v) - for k, v in self.POOL_OPTIONS.items()]) - uri = 'mongodb://%s/?%s' % (client_context.pair, opts) + opts = "&".join(["%s=%s" % (k, v) for k, v in self.POOL_OPTIONS.items()]) + uri = "mongodb://%s/?%s" % (client_context.pair, opts) client = rs_or_single_client(uri) self.addCleanup(client.close) pool_opts = get_pool(client).opts @@ -347,18 +343,16 @@ def test_4_subscribe_to_events(self): self.assertEqual(listener.event_count(PoolCreatedEvent), 1) # Creates a new connection. - client.admin.command('ping') - self.assertEqual( - listener.event_count(ConnectionCheckOutStartedEvent), 1) + client.admin.command("ping") + self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 1) self.assertEqual(listener.event_count(ConnectionCreatedEvent), 1) self.assertEqual(listener.event_count(ConnectionReadyEvent), 1) self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 1) self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 1) # Uses the existing connection. - client.admin.command('ping') - self.assertEqual( - listener.event_count(ConnectionCheckOutStartedEvent), 2) + client.admin.command("ping") + self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 2) self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 2) self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 2) @@ -373,49 +367,44 @@ def test_5_check_out_fails_connection_error(self): pool = get_pool(client) def mock_connect(*args, **kwargs): - raise ConnectionFailure('connect failed') + raise ConnectionFailure("connect failed") + pool.connect = mock_connect # Un-patch Pool.connect to break the cyclic reference. - self.addCleanup(delattr, pool, 'connect') + self.addCleanup(delattr, pool, "connect") # Attempt to create a new connection. - with self.assertRaisesRegex(ConnectionFailure, 'connect failed'): - client.admin.command('ping') + with self.assertRaisesRegex(ConnectionFailure, "connect failed"): + client.admin.command("ping") self.assertIsInstance(listener.events[0], PoolCreatedEvent) self.assertIsInstance(listener.events[1], PoolReadyEvent) - self.assertIsInstance(listener.events[2], - ConnectionCheckOutStartedEvent) - self.assertIsInstance(listener.events[3], - ConnectionCheckOutFailedEvent) + self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[3], ConnectionCheckOutFailedEvent) self.assertIsInstance(listener.events[4], PoolClearedEvent) failed_event = listener.events[3] - self.assertEqual( - failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR) + self.assertEqual(failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR) def test_5_check_out_fails_auth_error(self): listener = CMAPListener() client = single_client_noauth( - username="notauser", password="fail", - event_listeners=[listener]) + username="notauser", password="fail", event_listeners=[listener] + ) self.addCleanup(client.close) # Attempt to create a new connection. - with self.assertRaisesRegex(OperationFailure, 'failed'): - client.admin.command('ping') + with self.assertRaisesRegex(OperationFailure, "failed"): + client.admin.command("ping") self.assertIsInstance(listener.events[0], PoolCreatedEvent) self.assertIsInstance(listener.events[1], PoolReadyEvent) - self.assertIsInstance(listener.events[2], - ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) self.assertIsInstance(listener.events[3], ConnectionCreatedEvent) # Error happens here. self.assertIsInstance(listener.events[4], ConnectionClosedEvent) - self.assertIsInstance(listener.events[5], - ConnectionCheckOutFailedEvent) - self.assertEqual(listener.events[5].reason, - ConnectionCheckOutFailedReason.CONN_ERROR) + self.assertIsInstance(listener.events[5], ConnectionCheckOutFailedEvent) + self.assertEqual(listener.events[5].reason, ConnectionCheckOutFailedReason.CONN_ERROR) # # Extra non-spec tests @@ -426,13 +415,13 @@ def assertRepr(self, obj): self.assertEqual(repr(new_obj), repr(obj)) def test_events_repr(self): - host = ('localhost', 27017) + host = ("localhost", 27017) self.assertRepr(ConnectionCheckedInEvent(host, 1)) self.assertRepr(ConnectionCheckedOutEvent(host, 1)) - self.assertRepr(ConnectionCheckOutFailedEvent( - host, ConnectionCheckOutFailedReason.POOL_CLOSED)) - self.assertRepr(ConnectionClosedEvent( - host, 1, ConnectionClosedReason.POOL_CLOSED)) + self.assertRepr( + ConnectionCheckOutFailedEvent(host, ConnectionCheckOutFailedReason.POOL_CLOSED) + ) + self.assertRepr(ConnectionClosedEvent(host, 1, ConnectionClosedReason.POOL_CLOSED)) self.assertRepr(ConnectionCreatedEvent(host, 1)) self.assertRepr(ConnectionReadyEvent(host, 1)) self.assertRepr(ConnectionCheckOutStartedEvent(host)) @@ -446,7 +435,7 @@ def test_close_leaves_pool_unpaused(self): # test_threads.TestThreads.test_client_disconnect listener = CMAPListener() client = single_client(event_listeners=[listener]) - client.admin.command('ping') + client.admin.command("ping") pool = get_pool(client) client.close() self.assertEqual(1, listener.event_count(PoolClearedEvent)) @@ -464,7 +453,6 @@ def run_scenario(self): class CMAPTestCreator(TestCreator): - def tests(self, scenario_def): """Extract the tests from a spec file. diff --git a/test/test_code.py b/test/test_code.py index 1c4b5be1fe..9ff305e39a 100644 --- a/test/test_code.py +++ b/test/test_code.py @@ -20,9 +20,10 @@ sys.path[0:0] = [""] -from bson.code import Code from test import unittest +from bson.code import Code + class TestCode(unittest.TestCase): def test_types(self): @@ -37,6 +38,7 @@ def test_read_only(self): def set_c(): c.scope = 5 # type: ignore + self.assertRaises(AttributeError, set_c) def test_code(self): @@ -47,15 +49,15 @@ def test_code(self): self.assertTrue(isinstance(a_code, Code)) self.assertFalse(isinstance(a_string, Code)) self.assertIsNone(a_code.scope) - with_scope = Code('hello world', {'my_var': 5}) - self.assertEqual({'my_var': 5}, with_scope.scope) - empty_scope = Code('hello world', {}) + with_scope = Code("hello world", {"my_var": 5}) + self.assertEqual({"my_var": 5}, with_scope.scope) + empty_scope = Code("hello world", {}) self.assertEqual({}, empty_scope.scope) - another_scope = Code(with_scope, {'new_var': 42}) + another_scope = Code(with_scope, {"new_var": 42}) self.assertEqual(str(with_scope), str(another_scope)) - self.assertEqual({'new_var': 42, 'my_var': 5}, another_scope.scope) + self.assertEqual({"new_var": 42, "my_var": 5}, another_scope.scope) # No error. - Code('héllø world¡') + Code("héllø world¡") def test_repr(self): c = Code("hello world", {}) @@ -98,8 +100,7 @@ def test_scope_preserved(self): def test_scope_kwargs(self): self.assertEqual({"a": 1}, Code("", a=1).scope) self.assertEqual({"a": 1}, Code("", {"a": 2}, a=1).scope) - self.assertEqual({"a": 1, "b": 2, "c": 3}, - Code("", {"b": 2}, a=1, c=3).scope) + self.assertEqual({"a": 1, "b": 2, "c": 3}, Code("", {"b": 2}, a=1, c=3).scope) if __name__ == "__main__": diff --git a/test/test_collation.py b/test/test_collation.py index 9c4f4f6576..d8410a9de4 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -16,42 +16,48 @@ import functools import warnings - +from test import IntegrationTest, client_context, unittest +from test.utils import EventListener, rs_or_single_client from typing import Any from pymongo.collation import ( Collation, - CollationCaseFirst, CollationStrength, CollationAlternate, - CollationMaxVariable) + CollationAlternate, + CollationCaseFirst, + CollationMaxVariable, + CollationStrength, +) from pymongo.errors import ConfigurationError -from pymongo.operations import (DeleteMany, DeleteOne, IndexModel, ReplaceOne, - UpdateMany, UpdateOne) +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + ReplaceOne, + UpdateMany, + UpdateOne, +) from pymongo.write_concern import WriteConcern -from test import client_context, IntegrationTest, unittest -from test.utils import EventListener, rs_or_single_client class TestCollationObject(unittest.TestCase): - def test_constructor(self): self.assertRaises(TypeError, Collation, locale=42) # Fill in a locale to test the other options. - _Collation = functools.partial(Collation, 'en_US') + _Collation = functools.partial(Collation, "en_US") # No error. _Collation(caseFirst=CollationCaseFirst.UPPER) - self.assertRaises(TypeError, _Collation, caseLevel='true') - self.assertRaises(ValueError, _Collation, strength='six') - self.assertRaises(TypeError, _Collation, - numericOrdering='true') + self.assertRaises(TypeError, _Collation, caseLevel="true") + self.assertRaises(ValueError, _Collation, strength="six") + self.assertRaises(TypeError, _Collation, numericOrdering="true") self.assertRaises(TypeError, _Collation, alternate=5) self.assertRaises(TypeError, _Collation, maxVariable=2) - self.assertRaises(TypeError, _Collation, normalization='false') - self.assertRaises(TypeError, _Collation, backwards='true') + self.assertRaises(TypeError, _Collation, normalization="false") + self.assertRaises(TypeError, _Collation, backwards="true") # No errors. - Collation('en_US', future_option='bar', another_option=42) + Collation("en_US", future_option="bar", another_option=42) collation = Collation( - 'en_US', + "en_US", caseLevel=True, caseFirst=CollationCaseFirst.UPPER, strength=CollationStrength.QUATERNARY, @@ -59,24 +65,27 @@ def test_constructor(self): alternate=CollationAlternate.SHIFTED, maxVariable=CollationMaxVariable.SPACE, normalization=True, - backwards=True) - - self.assertEqual({ - 'locale': 'en_US', - 'caseLevel': True, - 'caseFirst': 'upper', - 'strength': 4, - 'numericOrdering': True, - 'alternate': 'shifted', - 'maxVariable': 'space', - 'normalization': True, - 'backwards': True - }, collation.document) - - self.assertEqual({ - 'locale': 'en_US', - 'backwards': True - }, Collation('en_US', backwards=True).document) + backwards=True, + ) + + self.assertEqual( + { + "locale": "en_US", + "caseLevel": True, + "caseFirst": "upper", + "strength": 4, + "numericOrdering": True, + "alternate": "shifted", + "maxVariable": "space", + "normalization": True, + "backwards": True, + }, + collation.document, + ) + + self.assertEqual( + {"locale": "en_US", "backwards": True}, Collation("en_US", backwards=True).document + ) class TestCollation(IntegrationTest): @@ -91,7 +100,7 @@ def setUpClass(cls): cls.listener = EventListener() cls.client = rs_or_single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test - cls.collation = Collation('en_US') + cls.collation = Collation("en_US") cls.warn_context = warnings.catch_warnings() cls.warn_context.__enter__() warnings.simplefilter("ignore", DeprecationWarning) @@ -108,38 +117,33 @@ def tearDown(self): super(TestCollation, self).tearDown() def last_command_started(self): - return self.listener.results['started'][-1].command + return self.listener.results["started"][-1].command def assertCollationInLastCommand(self): - self.assertEqual( - self.collation.document, - self.last_command_started()['collation']) + self.assertEqual(self.collation.document, self.last_command_started()["collation"]) def test_create_collection(self): self.db.test.drop() - self.db.create_collection('test', collation=self.collation) + self.db.create_collection("test", collation=self.collation) self.assertCollationInLastCommand() # Test passing collation as a dict as well. self.db.test.drop() self.listener.results.clear() - self.db.create_collection('test', collation=self.collation.document) + self.db.create_collection("test", collation=self.collation.document) self.assertCollationInLastCommand() def test_index_model(self): - model = IndexModel([('a', 1), ('b', -1)], collation=self.collation) - self.assertEqual(self.collation.document, model.document['collation']) + model = IndexModel([("a", 1), ("b", -1)], collation=self.collation) + self.assertEqual(self.collation.document, model.document["collation"]) def test_create_index(self): - self.db.test.create_index('foo', collation=self.collation) - ci_cmd = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - ci_cmd['indexes'][0]['collation']) + self.db.test.create_index("foo", collation=self.collation) + ci_cmd = self.listener.results["started"][0].command + self.assertEqual(self.collation.document, ci_cmd["indexes"][0]["collation"]) def test_aggregate(self): - self.db.test.aggregate([{'$group': {'_id': 42}}], - collation=self.collation) + self.db.test.aggregate([{"$group": {"_id": 42}}], collation=self.collation) self.assertCollationInLastCommand() def test_count_documents(self): @@ -147,15 +151,15 @@ def test_count_documents(self): self.assertCollationInLastCommand() def test_distinct(self): - self.db.test.distinct('foo', collation=self.collation) + self.db.test.distinct("foo", collation=self.collation) self.assertCollationInLastCommand() self.listener.results.clear() - self.db.test.find(collation=self.collation).distinct('foo') + self.db.test.find(collation=self.collation).distinct("foo") self.assertCollationInLastCommand() def test_find_command(self): - self.db.test.insert_one({'is this thing on?': True}) + self.db.test.insert_one({"is this thing on?": True}) self.listener.results.clear() next(self.db.test.find(collation=self.collation)) self.assertCollationInLastCommand() @@ -165,127 +169,118 @@ def test_explain_command(self): self.db.test.find(collation=self.collation).explain() # The collation should be part of the explained command. self.assertEqual( - self.collation.document, - self.last_command_started()['explain']['collation']) + self.collation.document, self.last_command_started()["explain"]["collation"] + ) def test_delete(self): - self.db.test.delete_one({'foo': 42}, collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['deletes'][0]['collation']) + self.db.test.delete_one({"foo": 42}, collation=self.collation) + command = self.listener.results["started"][0].command + self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) self.listener.results.clear() - self.db.test.delete_many({'foo': 42}, collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['deletes'][0]['collation']) + self.db.test.delete_many({"foo": 42}, collation=self.collation) + command = self.listener.results["started"][0].command + self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) def test_update(self): - self.db.test.replace_one({'foo': 42}, {'foo': 43}, - collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['updates'][0]['collation']) + self.db.test.replace_one({"foo": 42}, {"foo": 43}, collation=self.collation) + command = self.listener.results["started"][0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) self.listener.results.clear() - self.db.test.update_one({'foo': 42}, {'$set': {'foo': 43}}, - collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['updates'][0]['collation']) + self.db.test.update_one({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) + command = self.listener.results["started"][0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) self.listener.results.clear() - self.db.test.update_many({'foo': 42}, {'$set': {'foo': 43}}, - collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['updates'][0]['collation']) + self.db.test.update_many({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) + command = self.listener.results["started"][0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) def test_find_and(self): - self.db.test.find_one_and_delete({'foo': 42}, collation=self.collation) + self.db.test.find_one_and_delete({"foo": 42}, collation=self.collation) self.assertCollationInLastCommand() self.listener.results.clear() - self.db.test.find_one_and_update({'foo': 42}, {'$set': {'foo': 43}}, - collation=self.collation) + self.db.test.find_one_and_update( + {"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation + ) self.assertCollationInLastCommand() self.listener.results.clear() - self.db.test.find_one_and_replace({'foo': 42}, {'foo': 43}, - collation=self.collation) + self.db.test.find_one_and_replace({"foo": 42}, {"foo": 43}, collation=self.collation) self.assertCollationInLastCommand() def test_bulk_write(self): - self.db.test.collection.bulk_write([ - DeleteOne({'noCollation': 42}), - DeleteMany({'noCollation': 42}), - DeleteOne({'foo': 42}, collation=self.collation), - DeleteMany({'foo': 42}, collation=self.collation), - ReplaceOne({'noCollation': 24}, {'bar': 42}), - UpdateOne({'noCollation': 84}, {'$set': {'bar': 10}}, upsert=True), - UpdateMany({'noCollation': 45}, {'$set': {'bar': 42}}), - ReplaceOne({'foo': 24}, {'foo': 42}, collation=self.collation), - UpdateOne({'foo': 84}, {'$set': {'foo': 10}}, upsert=True, - collation=self.collation), - UpdateMany({'foo': 45}, {'$set': {'foo': 42}}, - collation=self.collation) - ]) - - delete_cmd = self.listener.results['started'][0].command - update_cmd = self.listener.results['started'][1].command + self.db.test.collection.bulk_write( + [ + DeleteOne({"noCollation": 42}), + DeleteMany({"noCollation": 42}), + DeleteOne({"foo": 42}, collation=self.collation), + DeleteMany({"foo": 42}, collation=self.collation), + ReplaceOne({"noCollation": 24}, {"bar": 42}), + UpdateOne({"noCollation": 84}, {"$set": {"bar": 10}}, upsert=True), + UpdateMany({"noCollation": 45}, {"$set": {"bar": 42}}), + ReplaceOne({"foo": 24}, {"foo": 42}, collation=self.collation), + UpdateOne( + {"foo": 84}, {"$set": {"foo": 10}}, upsert=True, collation=self.collation + ), + UpdateMany({"foo": 45}, {"$set": {"foo": 42}}, collation=self.collation), + ] + ) + + delete_cmd = self.listener.results["started"][0].command + update_cmd = self.listener.results["started"][1].command def check_ops(ops): for op in ops: - if 'noCollation' in op['q']: - self.assertNotIn('collation', op) + if "noCollation" in op["q"]: + self.assertNotIn("collation", op) else: - self.assertEqual(self.collation.document, - op['collation']) + self.assertEqual(self.collation.document, op["collation"]) - check_ops(delete_cmd['deletes']) - check_ops(update_cmd['updates']) + check_ops(delete_cmd["deletes"]) + check_ops(update_cmd["updates"]) def test_indexes_same_keys_different_collations(self): self.db.test.drop() - usa_collation = Collation('en_US') - ja_collation = Collation('ja') - self.db.test.create_indexes([ - IndexModel('fieldname', collation=usa_collation), - IndexModel('fieldname', name='japanese_version', - collation=ja_collation), - IndexModel('fieldname', name='simple') - ]) + usa_collation = Collation("en_US") + ja_collation = Collation("ja") + self.db.test.create_indexes( + [ + IndexModel("fieldname", collation=usa_collation), + IndexModel("fieldname", name="japanese_version", collation=ja_collation), + IndexModel("fieldname", name="simple"), + ] + ) indexes = self.db.test.index_information() - self.assertEqual(usa_collation.document['locale'], - indexes['fieldname_1']['collation']['locale']) - self.assertEqual(ja_collation.document['locale'], - indexes['japanese_version']['collation']['locale']) - self.assertNotIn('collation', indexes['simple']) - self.db.test.drop_index('fieldname_1') + self.assertEqual( + usa_collation.document["locale"], indexes["fieldname_1"]["collation"]["locale"] + ) + self.assertEqual( + ja_collation.document["locale"], indexes["japanese_version"]["collation"]["locale"] + ) + self.assertNotIn("collation", indexes["simple"]) + self.db.test.drop_index("fieldname_1") indexes = self.db.test.index_information() - self.assertIn('japanese_version', indexes) - self.assertIn('simple', indexes) - self.assertNotIn('fieldname', indexes) + self.assertIn("japanese_version", indexes) + self.assertIn("simple", indexes) + self.assertNotIn("fieldname", indexes) def test_unacknowledged_write(self): unacknowledged = WriteConcern(w=0) - collection = self.db.get_collection( - 'test', write_concern=unacknowledged) + collection = self.db.get_collection("test", write_concern=unacknowledged) with self.assertRaises(ConfigurationError): collection.update_one( - {'hello': 'world'}, {'$set': {'hello': 'moon'}}, - collation=self.collation) - update_one = UpdateOne({'hello': 'world'}, {'$set': {'hello': 'moon'}}, - collation=self.collation) + {"hello": "world"}, {"$set": {"hello": "moon"}}, collation=self.collation + ) + update_one = UpdateOne( + {"hello": "world"}, {"$set": {"hello": "moon"}}, collation=self.collation + ) with self.assertRaises(ConfigurationError): collection.bulk_write([update_one]) def test_cursor_collation(self): - self.db.test.insert_one({'hello': 'world'}) + self.db.test.insert_one({"hello": "world"}) next(self.db.test.find().collation(self.collation)) self.assertCollationInLastCommand() diff --git a/test/test_collection.py b/test/test_collection.py index 3d4a107aa9..f81c2c2645 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -19,7 +19,6 @@ import contextlib import re import sys - from codecs import utf_8_decode # type: ignore from collections import defaultdict from typing import no_type_check @@ -28,47 +27,57 @@ sys.path[0:0] = [""] +from test import client_context, unittest +from test.test_client import IntegrationTest +from test.utils import ( + IMPOSSIBLE_WRITE_CONCERN, + EventListener, + get_pool, + is_mongos, + rs_or_single_client, + single_client, + wait_until, +) + from bson import encode -from bson.raw_bson import RawBSONDocument -from bson.regex import Regex from bson.codec_options import CodecOptions from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from bson.regex import Regex from bson.son import SON from pymongo import ASCENDING, DESCENDING, GEO2D, GEOSPHERE, HASHED, TEXT from pymongo.bulk import BulkWriteError from pymongo.collection import Collection, ReturnDocument from pymongo.command_cursor import CommandCursor from pymongo.cursor import CursorType -from pymongo.errors import (ConfigurationError, - DocumentTooLarge, - DuplicateKeyError, - ExecutionTimeout, - InvalidDocument, - InvalidName, - InvalidOperation, - OperationFailure, - WriteConcernError) +from pymongo.errors import ( + ConfigurationError, + DocumentTooLarge, + DuplicateKeyError, + ExecutionTimeout, + InvalidDocument, + InvalidName, + InvalidOperation, + OperationFailure, + WriteConcernError, +) from pymongo.message import _COMMAND_OVERHEAD, _gen_find_command from pymongo.mongo_client import MongoClient from pymongo.operations import * from pymongo.read_concern import DEFAULT_READ_CONCERN from pymongo.read_preferences import ReadPreference -from pymongo.results import (InsertOneResult, - InsertManyResult, - UpdateResult, - DeleteResult) +from pymongo.results import ( + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) from pymongo.write_concern import WriteConcern -from test import client_context, unittest -from test.test_client import IntegrationTest -from test.utils import (get_pool, is_mongos, - rs_or_single_client, single_client, - wait_until, EventListener, - IMPOSSIBLE_WRITE_CONCERN) class TestCollectionNoConnect(unittest.TestCase): - """Test Collection features on a client that does not connect. - """ + """Test Collection features on a client that does not connect.""" + db: Database @classmethod @@ -95,7 +104,7 @@ def make_col(base, name): def test_getattr(self): coll = self.db.test - self.assertTrue(isinstance(coll['_does_not_exist'], Collection)) + self.assertTrue(isinstance(coll["_does_not_exist"], Collection)) with self.assertRaises(AttributeError) as context: coll._does_not_exist @@ -104,8 +113,7 @@ def test_getattr(self): # "AttributeError: Collection has no attribute '_does_not_exist'. To # access the test._does_not_exist collection, use # database['test._does_not_exist']." - self.assertIn("has no attribute '_does_not_exist'", - str(context.exception)) + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) coll2 = coll.with_options(write_concern=WriteConcern(w=0)) self.assertEqual(coll2.write_concern, WriteConcern(w=0)) @@ -143,8 +151,8 @@ def write_concern_collection(self): with self.assertRaises(WriteConcernError): # Unsatisfiable write concern. yield Collection( - self.db, 'test', - write_concern=WriteConcern(w=len(client_context.nodes) + 1)) + self.db, "test", write_concern=WriteConcern(w=len(client_context.nodes) + 1) + ) else: yield self.db.test @@ -163,33 +171,33 @@ def test_create(self): db = client_context.client.pymongo_test db.create_test_no_wc.drop() wait_until( - lambda: 'create_test_no_wc' not in db.list_collection_names(), - 'drop create_test_no_wc collection') - Collection(db, name='create_test_no_wc', create=True) + lambda: "create_test_no_wc" not in db.list_collection_names(), + "drop create_test_no_wc collection", + ) + Collection(db, name="create_test_no_wc", create=True) wait_until( - lambda: 'create_test_no_wc' in db.list_collection_names(), - 'create create_test_no_wc collection') + lambda: "create_test_no_wc" in db.list_collection_names(), + "create create_test_no_wc collection", + ) # SERVER-33317 - if (not client_context.is_mongos or not - client_context.version.at_least(3, 7, 0)): + if not client_context.is_mongos or not client_context.version.at_least(3, 7, 0): with self.assertRaises(OperationFailure): Collection( - db, name='create-test-wc', - write_concern=IMPOSSIBLE_WRITE_CONCERN, - create=True) + db, name="create-test-wc", write_concern=IMPOSSIBLE_WRITE_CONCERN, create=True + ) def test_drop_nonexistent_collection(self): - self.db.drop_collection('test') - self.assertFalse('test' in self.db.list_collection_names()) + self.db.drop_collection("test") + self.assertFalse("test" in self.db.list_collection_names()) # No exception - self.db.drop_collection('test') + self.db.drop_collection("test") def test_create_indexes(self): db = self.db - self.assertRaises(TypeError, db.test.create_indexes, 'foo') - self.assertRaises(TypeError, db.test.create_indexes, ['foo']) + self.assertRaises(TypeError, db.test.create_indexes, "foo") + self.assertRaises(TypeError, db.test.create_indexes, ["foo"]) self.assertRaises(TypeError, IndexModel, 5) self.assertRaises(ValueError, IndexModel, []) @@ -198,8 +206,7 @@ def test_create_indexes(self): self.assertEqual(len(db.test.index_information()), 1) db.test.create_indexes([IndexModel("hello")]) - db.test.create_indexes([IndexModel([("hello", DESCENDING), - ("world", ASCENDING)])]) + db.test.create_indexes([IndexModel([("hello", DESCENDING), ("world", ASCENDING)])]) # Tuple instead of list. db.test.create_indexes([IndexModel((("world", ASCENDING),))]) @@ -207,9 +214,9 @@ def test_create_indexes(self): self.assertEqual(len(db.test.index_information()), 4) db.test.drop_indexes() - names = db.test.create_indexes([IndexModel([("hello", DESCENDING), - ("world", ASCENDING)], - name="hello_world")]) + names = db.test.create_indexes( + [IndexModel([("hello", DESCENDING), ("world", ASCENDING)], name="hello_world")] + ) self.assertEqual(names, ["hello_world"]) db.test.drop_indexes() @@ -219,37 +226,35 @@ def test_create_indexes(self): db.test.drop_indexes() self.assertEqual(len(db.test.index_information()), 1) - names = db.test.create_indexes([IndexModel([("hello", DESCENDING), - ("world", ASCENDING)]), - IndexModel("hello")]) + names = db.test.create_indexes( + [IndexModel([("hello", DESCENDING), ("world", ASCENDING)]), IndexModel("hello")] + ) info = db.test.index_information() for name in names: self.assertTrue(name in info) db.test.drop() - db.test.insert_one({'a': 1}) - db.test.insert_one({'a': 1}) - self.assertRaises( - DuplicateKeyError, - db.test.create_indexes, - [IndexModel('a', unique=True)]) + db.test.insert_one({"a": 1}) + db.test.insert_one({"a": 1}) + self.assertRaises(DuplicateKeyError, db.test.create_indexes, [IndexModel("a", unique=True)]) with self.write_concern_collection() as coll: - coll.create_indexes([IndexModel('hello')]) + coll.create_indexes([IndexModel("hello")]) @client_context.require_version_max(4, 3, -1) def test_create_indexes_commitQuorum_requires_44(self): db = self.db with self.assertRaisesRegex( - ConfigurationError, - 'Must be connected to MongoDB 4\.4\+ to use the commitQuorum ' - 'option for createIndexes'): - db.coll.create_indexes([IndexModel('a')], commitQuorum="majority") + ConfigurationError, + "Must be connected to MongoDB 4\.4\+ to use the commitQuorum " + "option for createIndexes", + ): + db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") @client_context.require_no_standalone @client_context.require_version_min(4, 4, -1) def test_create_indexes_commitQuorum(self): - self.db.coll.create_indexes([IndexModel('a')], commitQuorum="majority") + self.db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") def test_create_index(self): db = self.db @@ -271,8 +276,7 @@ def test_create_index(self): self.assertEqual(len(db.test.index_information()), 4) db.test.drop_indexes() - ix = db.test.create_index([("hello", DESCENDING), - ("world", ASCENDING)], name="hello_world") + ix = db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], name="hello_world") self.assertEqual(ix, "hello_world") db.test.drop_indexes() @@ -286,13 +290,12 @@ def test_create_index(self): self.assertTrue("hello_-1_world_1" in db.test.index_information()) db.test.drop() - db.test.insert_one({'a': 1}) - db.test.insert_one({'a': 1}) - self.assertRaises( - DuplicateKeyError, db.test.create_index, 'a', unique=True) + db.test.insert_one({"a": 1}) + db.test.insert_one({"a": 1}) + self.assertRaises(DuplicateKeyError, db.test.create_index, "a", unique=True) with self.write_concern_collection() as coll: - coll.create_index([('hello', DESCENDING)]) + coll.create_index([("hello", DESCENDING)]) def test_drop_index(self): db = self.db @@ -321,31 +324,22 @@ def test_drop_index(self): self.assertTrue("hello_1" in db.test.index_information()) with self.write_concern_collection() as coll: - coll.drop_index('hello_1') + coll.drop_index("hello_1") @client_context.require_no_mongos @client_context.require_test_commands def test_index_management_max_time_ms(self): coll = self.db.test - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="alwaysOn") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: + self.assertRaises(ExecutionTimeout, coll.create_index, "foo", maxTimeMS=1) self.assertRaises( - ExecutionTimeout, coll.create_index, "foo", maxTimeMS=1) - self.assertRaises( - ExecutionTimeout, - coll.create_indexes, - [IndexModel("foo")], - maxTimeMS=1) - self.assertRaises( - ExecutionTimeout, coll.drop_index, "foo", maxTimeMS=1) - self.assertRaises( - ExecutionTimeout, coll.drop_indexes, maxTimeMS=1) + ExecutionTimeout, coll.create_indexes, [IndexModel("foo")], maxTimeMS=1 + ) + self.assertRaises(ExecutionTimeout, coll.drop_index, "foo", maxTimeMS=1) + self.assertRaises(ExecutionTimeout, coll.drop_indexes, maxTimeMS=1) finally: - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="off") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") def test_list_indexes(self): db = self.db @@ -362,16 +356,15 @@ def map_indexes(indexes): db.test.create_index("hello") indexes = list(db.test.list_indexes()) self.assertEqual(len(indexes), 2) - self.assertEqual(map_indexes(indexes)["hello_1"]["key"], - SON([("hello", ASCENDING)])) + self.assertEqual(map_indexes(indexes)["hello_1"]["key"], SON([("hello", ASCENDING)])) - db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], - unique=True) + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) indexes = list(db.test.list_indexes()) self.assertEqual(len(indexes), 3) index_map = map_indexes(indexes) - self.assertEqual(index_map["hello_-1_world_1"]["key"], - SON([("hello", DESCENDING), ("world", ASCENDING)])) + self.assertEqual( + index_map["hello_-1_world_1"]["key"], SON([("hello", DESCENDING), ("world", ASCENDING)]) + ) self.assertEqual(True, index_map["hello_-1_world_1"]["unique"]) # List indexes on a collection that does not exist. @@ -391,26 +384,23 @@ def test_index_info(self): db.test.create_index("hello") self.assertEqual(len(db.test.index_information()), 2) - self.assertEqual(db.test.index_information()["hello_1"]["key"], - [("hello", ASCENDING)]) + self.assertEqual(db.test.index_information()["hello_1"]["key"], [("hello", ASCENDING)]) - db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], - unique=True) - self.assertEqual(db.test.index_information()["hello_1"]["key"], - [("hello", ASCENDING)]) + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) + self.assertEqual(db.test.index_information()["hello_1"]["key"], [("hello", ASCENDING)]) self.assertEqual(len(db.test.index_information()), 3) - self.assertEqual([("hello", DESCENDING), ("world", ASCENDING)], - db.test.index_information()["hello_-1_world_1"]["key"] - ) self.assertEqual( - True, db.test.index_information()["hello_-1_world_1"]["unique"]) + [("hello", DESCENDING), ("world", ASCENDING)], + db.test.index_information()["hello_-1_world_1"]["key"], + ) + self.assertEqual(True, db.test.index_information()["hello_-1_world_1"]["unique"]) def test_index_geo2d(self): db = self.db db.test.drop_indexes() - self.assertEqual('loc_2d', db.test.create_index([("loc", GEO2D)])) - index_info = db.test.index_information()['loc_2d'] - self.assertEqual([('loc', '2d')], index_info['key']) + self.assertEqual("loc_2d", db.test.create_index([("loc", GEO2D)])) + index_info = db.test.index_information()["loc_2d"] + self.assertEqual([("loc", "2d")], index_info["key"]) # geoSearch was deprecated in 4.4 and removed in 5.0 @client_context.require_version_max(4, 5) @@ -418,35 +408,29 @@ def test_index_geo2d(self): def test_index_haystack(self): db = self.db db.test.drop() - _id = db.test.insert_one({ - "pos": {"long": 34.2, "lat": 33.3}, - "type": "restaurant" - }).inserted_id - db.test.insert_one({ - "pos": {"long": 34.2, "lat": 37.3}, "type": "restaurant" - }) - db.test.insert_one({ - "pos": {"long": 59.1, "lat": 87.2}, "type": "office" - }) - db.test.create_index( - [("pos", "geoHaystack"), ("type", ASCENDING)], - bucketSize=1 - ) - - results = db.command(SON([ - ("geoSearch", "test"), - ("near", [33, 33]), - ("maxDistance", 6), - ("search", {"type": "restaurant"}), - ("limit", 30), - ]))['results'] + _id = db.test.insert_one( + {"pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"} + ).inserted_id + db.test.insert_one({"pos": {"long": 34.2, "lat": 37.3}, "type": "restaurant"}) + db.test.insert_one({"pos": {"long": 59.1, "lat": 87.2}, "type": "office"}) + db.test.create_index([("pos", "geoHaystack"), ("type", ASCENDING)], bucketSize=1) + + results = db.command( + SON( + [ + ("geoSearch", "test"), + ("near", [33, 33]), + ("maxDistance", 6), + ("search", {"type": "restaurant"}), + ("limit", 30), + ] + ) + )["results"] self.assertEqual(2, len(results)) - self.assertEqual({ - "_id": _id, - "pos": {"long": 34.2, "lat": 33.3}, - "type": "restaurant" - }, results[0]) + self.assertEqual( + {"_id": _id, "pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"}, results[0] + ) @client_context.require_no_mongos def test_index_text(self): @@ -456,38 +440,33 @@ def test_index_text(self): index_info = db.test.index_information()["t_text"] self.assertTrue("weights" in index_info) - db.test.insert_many([ - {'t': 'spam eggs and spam'}, - {'t': 'spam'}, - {'t': 'egg sausage and bacon'}]) + db.test.insert_many( + [{"t": "spam eggs and spam"}, {"t": "spam"}, {"t": "egg sausage and bacon"}] + ) # MongoDB 2.6 text search. Create 'score' field in projection. - cursor = db.test.find( - {'$text': {'$search': 'spam'}}, - {'score': {'$meta': 'textScore'}}) + cursor = db.test.find({"$text": {"$search": "spam"}}, {"score": {"$meta": "textScore"}}) # Sort by 'score' field. - cursor.sort([('score', {'$meta': 'textScore'})]) + cursor.sort([("score", {"$meta": "textScore"})]) results = list(cursor) - self.assertTrue(results[0]['score'] >= results[1]['score']) + self.assertTrue(results[0]["score"] >= results[1]["score"]) db.test.drop_indexes() def test_index_2dsphere(self): db = self.db db.test.drop_indexes() - self.assertEqual("geo_2dsphere", - db.test.create_index([("geo", GEOSPHERE)])) + self.assertEqual("geo_2dsphere", db.test.create_index([("geo", GEOSPHERE)])) for dummy, info in db.test.index_information().items(): - field, idx_type = info['key'][0] - if field == 'geo' and idx_type == '2dsphere': + field, idx_type = info["key"][0] + if field == "geo" and idx_type == "2dsphere": break else: self.fail("2dsphere index not found.") - poly = {"type": "Polygon", - "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]} + poly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]} query = {"geo": {"$within": {"$geometry": poly}}} # This query will error without a 2dsphere index. @@ -497,12 +476,11 @@ def test_index_2dsphere(self): def test_index_hashed(self): db = self.db db.test.drop_indexes() - self.assertEqual("a_hashed", - db.test.create_index([("a", HASHED)])) + self.assertEqual("a_hashed", db.test.create_index([("a", HASHED)])) for dummy, info in db.test.index_information().items(): - field, idx_type = info['key'][0] - if field == 'a' and idx_type == 'hashed': + field, idx_type = info["key"][0] + if field == "a" and idx_type == "hashed": break else: self.fail("hashed index not found.") @@ -512,25 +490,25 @@ def test_index_hashed(self): def test_index_sparse(self): db = self.db db.test.drop_indexes() - db.test.create_index([('key', ASCENDING)], sparse=True) - self.assertTrue(db.test.index_information()['key_1']['sparse']) + db.test.create_index([("key", ASCENDING)], sparse=True) + self.assertTrue(db.test.index_information()["key_1"]["sparse"]) def test_index_background(self): db = self.db db.test.drop_indexes() - db.test.create_index([('keya', ASCENDING)]) - db.test.create_index([('keyb', ASCENDING)], background=False) - db.test.create_index([('keyc', ASCENDING)], background=True) - self.assertFalse('background' in db.test.index_information()['keya_1']) - self.assertFalse(db.test.index_information()['keyb_1']['background']) - self.assertTrue(db.test.index_information()['keyc_1']['background']) + db.test.create_index([("keya", ASCENDING)]) + db.test.create_index([("keyb", ASCENDING)], background=False) + db.test.create_index([("keyc", ASCENDING)], background=True) + self.assertFalse("background" in db.test.index_information()["keya_1"]) + self.assertFalse(db.test.index_information()["keyb_1"]["background"]) + self.assertTrue(db.test.index_information()["keyc_1"]["background"]) def _drop_dups_setup(self, db): - db.drop_collection('test') - db.test.insert_one({'i': 1}) - db.test.insert_one({'i': 2}) - db.test.insert_one({'i': 2}) # duplicate - db.test.insert_one({'i': 3}) + db.drop_collection("test") + db.test.insert_one({"i": 1}) + db.test.insert_one({"i": 2}) + db.test.insert_one({"i": 2}) # duplicate + db.test.insert_one({"i": 3}) def test_index_dont_drop_dups(self): # Try *not* dropping duplicates @@ -539,11 +517,8 @@ def test_index_dont_drop_dups(self): # There's a duplicate def test_create(): - db.test.create_index( - [('i', ASCENDING)], - unique=True, - dropDups=False - ) + db.test.create_index([("i", ASCENDING)], unique=True, dropDups=False) + self.assertRaises(DuplicateKeyError, test_create) # Duplicate wasn't dropped @@ -554,12 +529,12 @@ def test_create(): # Get the plan dynamically because the explain format will change. def get_plan_stage(self, root, stage): - if root.get('stage') == stage: + if root.get("stage") == stage: return root elif "inputStage" in root: - return self.get_plan_stage(root['inputStage'], stage) + return self.get_plan_stage(root["inputStage"], stage) elif "inputStages" in root: - for i in root['inputStages']: + for i in root["inputStages"]: stage = self.get_plan_stage(i, stage) if stage: return stage @@ -567,8 +542,8 @@ def get_plan_stage(self, root, stage): # queryPlan (and slotBasedPlan) are new in 5.0. return self.get_plan_stage(root["queryPlan"], stage) elif "shards" in root: - for i in root['shards']: - stage = self.get_plan_stage(i['winningPlan'], stage) + for i in root["shards"]: + stage = self.get_plan_stage(i["winningPlan"], stage) if stage: return stage return {} @@ -578,52 +553,52 @@ def test_index_filter(self): db.drop_collection("test") # Test bad filter spec on create. - self.assertRaises(OperationFailure, db.test.create_index, "x", - partialFilterExpression=5) - self.assertRaises(OperationFailure, db.test.create_index, "x", - partialFilterExpression={"x": {"$asdasd": 3}}) - self.assertRaises(OperationFailure, db.test.create_index, "x", - partialFilterExpression={"$and": 5}) - - self.assertEqual("x_1", db.test.create_index( - [('x', ASCENDING)], partialFilterExpression={"a": {"$lte": 1.5}})) + self.assertRaises(OperationFailure, db.test.create_index, "x", partialFilterExpression=5) + self.assertRaises( + OperationFailure, + db.test.create_index, + "x", + partialFilterExpression={"x": {"$asdasd": 3}}, + ) + self.assertRaises( + OperationFailure, db.test.create_index, "x", partialFilterExpression={"$and": 5} + ) + + self.assertEqual( + "x_1", + db.test.create_index([("x", ASCENDING)], partialFilterExpression={"a": {"$lte": 1.5}}), + ) db.test.insert_one({"x": 5, "a": 2}) db.test.insert_one({"x": 6, "a": 1}) # Operations that use the partial index. explain = db.test.find({"x": 6, "a": 1}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'IXSCAN') - self.assertEqual("x_1", stage.get('indexName')) - self.assertTrue(stage.get('isPartial')) + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) explain = db.test.find({"x": {"$gt": 1}, "a": 1}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'IXSCAN') - self.assertEqual("x_1", stage.get('indexName')) - self.assertTrue(stage.get('isPartial')) + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) explain = db.test.find({"x": 6, "a": {"$lte": 1}}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'IXSCAN') - self.assertEqual("x_1", stage.get('indexName')) - self.assertTrue(stage.get('isPartial')) + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) # Operations that do not use the partial index. explain = db.test.find({"x": 6, "a": {"$lte": 1.6}}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'COLLSCAN') + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") self.assertNotEqual({}, stage) explain = db.test.find({"x": 6}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'COLLSCAN') + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") self.assertNotEqual({}, stage) # Test drop_indexes. db.test.drop_index("x_1") explain = db.test.find({"x": 6, "a": 1}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'COLLSCAN') + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") self.assertNotEqual({}, stage) def test_field_selection(self): @@ -685,7 +660,7 @@ def test_options(self): db.drop_collection("test") db.create_collection("test", capped=True, size=4096) result = db.test.options() - self.assertEqual(result, {"capped": True, 'size': 4096}) + self.assertEqual(result, {"capped": True, "size": 4096}) db.drop_collection("test") def test_insert_one(self): @@ -710,19 +685,16 @@ def test_insert_one(self): self.assertIsNotNone(db.test.find_one({"_id": document["_id"]})) self.assertEqual(2, db.test.count_documents({})) - db = db.client.get_database(db.name, - write_concern=WriteConcern(w=0)) + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.insert_one(document) self.assertTrue(isinstance(result, InsertOneResult)) self.assertTrue(isinstance(result.inserted_id, ObjectId)) self.assertEqual(document["_id"], result.inserted_id) self.assertFalse(result.acknowledged) # The insert failed duplicate key... - wait_until(lambda: 2 == db.test.count_documents({}), - 'forcing duplicate key error') + wait_until(lambda: 2 == db.test.count_documents({}), "forcing duplicate key error") - document = RawBSONDocument( - encode({'_id': ObjectId(), 'foo': 'bar'})) + document = RawBSONDocument(encode({"_id": ObjectId(), "foo": "bar"})) result = db.test.insert_one(document) self.assertTrue(isinstance(result, InsertOneResult)) self.assertEqual(result.inserted_id, None) @@ -740,7 +712,7 @@ def test_insert_many(self): _id = doc["_id"] self.assertTrue(isinstance(_id, ObjectId)) self.assertTrue(_id in result.inserted_ids) - self.assertEqual(1, db.test.count_documents({'_id': _id})) + self.assertEqual(1, db.test.count_documents({"_id": _id})) self.assertTrue(result.acknowledged) docs = [{"_id": i} for i in range(5)] @@ -755,15 +727,13 @@ def test_insert_many(self): self.assertEqual(1, db.test.count_documents({"_id": _id})) self.assertTrue(result.acknowledged) - docs = [RawBSONDocument(encode({"_id": i + 5})) - for i in range(5)] + docs = [RawBSONDocument(encode({"_id": i + 5})) for i in range(5)] result = db.test.insert_many(docs) self.assertTrue(isinstance(result, InsertManyResult)) self.assertTrue(isinstance(result.inserted_ids, list)) self.assertEqual([], result.inserted_ids) - db = db.client.get_database(db.name, - write_concern=WriteConcern(w=0)) + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) docs: list = [{} for _ in range(5)] result = db.test.insert_many(docs) self.assertTrue(isinstance(result, InsertManyResult)) @@ -775,11 +745,11 @@ def test_insert_many_generator(self): coll.delete_many({}) def gen(): - yield {'a': 1, 'b': 1} - yield {'a': 1, 'b': 2} - yield {'a': 2, 'b': 3} - yield {'a': 3, 'b': 5} - yield {'a': 5, 'b': 8} + yield {"a": 1, "b": 1} + yield {"a": 1, "b": 2} + yield {"a": 2, "b": 3} + yield {"a": 3, "b": 5} + yield {"a": 5, "b": 8} result = coll.insert_many(gen()) self.assertEqual(5, len(result.inserted_ids)) @@ -787,21 +757,17 @@ def gen(): def test_insert_many_invalid(self): db = self.db - with self.assertRaisesRegex( - TypeError, "documents must be a non-empty list"): + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): db.test.insert_many({}) - with self.assertRaisesRegex( - TypeError, "documents must be a non-empty list"): + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): db.test.insert_many([]) - with self.assertRaisesRegex( - TypeError, "documents must be a non-empty list"): + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): db.test.insert_many(1) # type: ignore[arg-type] - with self.assertRaisesRegex( - TypeError, "documents must be a non-empty list"): - db.test.insert_many(RawBSONDocument(encode({'_id': 2}))) # type: ignore[arg-type] + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + db.test.insert_many(RawBSONDocument(encode({"_id": 2}))) # type: ignore[arg-type] def test_delete_one(self): self.db.test.drop() @@ -822,13 +788,12 @@ def test_delete_one(self): self.assertTrue(result.acknowledged) self.assertEqual(1, self.db.test.count_documents({})) - db = self.db.client.get_database(self.db.name, - write_concern=WriteConcern(w=0)) + db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) result = db.test.delete_one({"z": 1}) self.assertTrue(isinstance(result, DeleteResult)) self.assertRaises(InvalidOperation, lambda: result.deleted_count) self.assertFalse(result.acknowledged) - wait_until(lambda: 0 == db.test.count_documents({}), 'delete 1 documents') + wait_until(lambda: 0 == db.test.count_documents({}), "delete 1 documents") def test_delete_many(self): self.db.test.drop() @@ -844,25 +809,20 @@ def test_delete_many(self): self.assertTrue(result.acknowledged) self.assertEqual(0, self.db.test.count_documents({"x": 1})) - db = self.db.client.get_database(self.db.name, - write_concern=WriteConcern(w=0)) + db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) result = db.test.delete_many({"y": 1}) self.assertTrue(isinstance(result, DeleteResult)) self.assertRaises(InvalidOperation, lambda: result.deleted_count) self.assertFalse(result.acknowledged) - wait_until( - lambda: 0 == db.test.count_documents({}), 'delete 2 documents') + wait_until(lambda: 0 == db.test.count_documents({}), "delete 2 documents") def test_command_document_too_large(self): - large = '*' * (client_context.max_bson_size + _COMMAND_OVERHEAD) + large = "*" * (client_context.max_bson_size + _COMMAND_OVERHEAD) coll = self.db.test - self.assertRaises( - DocumentTooLarge, coll.insert_one, {'data': large}) + self.assertRaises(DocumentTooLarge, coll.insert_one, {"data": large}) # update_one and update_many are the same - self.assertRaises( - DocumentTooLarge, coll.replace_one, {}, {'data': large}) - self.assertRaises( - DocumentTooLarge, coll.delete_one, {'data': large}) + self.assertRaises(DocumentTooLarge, coll.replace_one, {}, {"data": large}) + self.assertRaises(DocumentTooLarge, coll.delete_one, {"data": large}) def test_write_large_document(self): max_size = client_context.max_bson_size @@ -871,42 +831,38 @@ def test_write_large_document(self): half_str = "x" * half_size self.assertEqual(max_size, 16777216) - self.assertRaises(OperationFailure, self.db.test.insert_one, - {"foo": max_str}) - self.assertRaises(OperationFailure, self.db.test.replace_one, - {}, {"foo": max_str}, upsert=True) - self.assertRaises(OperationFailure, self.db.test.insert_many, - [{"x": 1}, {"foo": max_str}]) + self.assertRaises(OperationFailure, self.db.test.insert_one, {"foo": max_str}) + self.assertRaises( + OperationFailure, self.db.test.replace_one, {}, {"foo": max_str}, upsert=True + ) + self.assertRaises(OperationFailure, self.db.test.insert_many, [{"x": 1}, {"foo": max_str}]) self.db.test.insert_many([{"foo": half_str}, {"foo": half_str}]) self.db.test.insert_one({"bar": "x"}) # Use w=0 here to test legacy doc size checking in all server versions unack_coll = self.db.test.with_options(write_concern=WriteConcern(w=0)) - self.assertRaises(DocumentTooLarge, unack_coll.replace_one, - {"bar": "x"}, {"bar": "x" * (max_size - 14)}) + self.assertRaises( + DocumentTooLarge, unack_coll.replace_one, {"bar": "x"}, {"bar": "x" * (max_size - 14)} + ) self.db.test.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 32)}) def test_insert_bypass_document_validation(self): db = self.db db.test.drop() db.create_collection("test", validator={"a": {"$exists": True}}) - db_w0 = self.db.client.get_database( - self.db.name, write_concern=WriteConcern(w=0)) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) # Test insert_one - self.assertRaises(OperationFailure, db.test.insert_one, - {"_id": 1, "x": 100}) - result = db.test.insert_one({"_id": 1, "x": 100}, - bypass_document_validation=True) + self.assertRaises(OperationFailure, db.test.insert_one, {"_id": 1, "x": 100}) + result = db.test.insert_one({"_id": 1, "x": 100}, bypass_document_validation=True) self.assertTrue(isinstance(result, InsertOneResult)) self.assertEqual(1, result.inserted_id) - result = db.test.insert_one({"_id":2, "a":0}) + result = db.test.insert_one({"_id": 2, "a": 0}) self.assertTrue(isinstance(result, InsertOneResult)) self.assertEqual(2, result.inserted_id) db_w0.test.insert_one({"y": 1}, bypass_document_validation=True) - wait_until(lambda: db_w0.test.find_one({"y": 1}), - "find w:0 inserted document") + wait_until(lambda: db_w0.test.find_one({"y": 1}), "find w:0 inserted document") # Test insert_many docs = [{"_id": i, "x": 100 - i} for i in range(3, 100)] @@ -931,25 +887,25 @@ def test_insert_bypass_document_validation(self): self.assertEqual(1, db.test.count_documents({"a": doc["a"]})) self.assertTrue(result.acknowledged) - self.assertRaises(OperationFailure, db_w0.test.insert_many, - [{"x": 1}, {"x": 2}], - bypass_document_validation=True) + self.assertRaises( + OperationFailure, + db_w0.test.insert_many, + [{"x": 1}, {"x": 2}], + bypass_document_validation=True, + ) def test_replace_bypass_document_validation(self): db = self.db db.test.drop() db.create_collection("test", validator={"a": {"$exists": True}}) - db_w0 = self.db.client.get_database( - self.db.name, write_concern=WriteConcern(w=0)) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) # Test replace_one db.test.insert_one({"a": 101}) - self.assertRaises(OperationFailure, db.test.replace_one, - {"a": 101}, {"y": 1}) + self.assertRaises(OperationFailure, db.test.replace_one, {"a": 101}, {"y": 1}) self.assertEqual(0, db.test.count_documents({"y": 1})) self.assertEqual(1, db.test.count_documents({"a": 101})) - db.test.replace_one({"a": 101}, {"y": 1}, - bypass_document_validation=True) + db.test.replace_one({"a": 101}, {"y": 1}, bypass_document_validation=True) self.assertEqual(0, db.test.count_documents({"a": 101})) self.assertEqual(1, db.test.count_documents({"y": 1})) db.test.replace_one({"y": 1}, {"a": 102}) @@ -958,123 +914,107 @@ def test_replace_bypass_document_validation(self): self.assertEqual(1, db.test.count_documents({"a": 102})) db.test.insert_one({"y": 1}, bypass_document_validation=True) - self.assertRaises(OperationFailure, db.test.replace_one, - {"y": 1}, {"x": 101}) + self.assertRaises(OperationFailure, db.test.replace_one, {"y": 1}, {"x": 101}) self.assertEqual(0, db.test.count_documents({"x": 101})) self.assertEqual(1, db.test.count_documents({"y": 1})) - db.test.replace_one({"y": 1}, {"x": 101}, - bypass_document_validation=True) + db.test.replace_one({"y": 1}, {"x": 101}, bypass_document_validation=True) self.assertEqual(0, db.test.count_documents({"y": 1})) self.assertEqual(1, db.test.count_documents({"x": 101})) - db.test.replace_one({"x": 101}, {"a": 103}, - bypass_document_validation=False) + db.test.replace_one({"x": 101}, {"a": 103}, bypass_document_validation=False) self.assertEqual(0, db.test.count_documents({"x": 101})) self.assertEqual(1, db.test.count_documents({"a": 103})) db.test.insert_one({"y": 1}, bypass_document_validation=True) - db_w0.test.replace_one({"y": 1}, {"x": 1}, - bypass_document_validation=True) - wait_until(lambda: db_w0.test.find_one({"x": 1}), - "find w:0 replaced document") + db_w0.test.replace_one({"y": 1}, {"x": 1}, bypass_document_validation=True) + wait_until(lambda: db_w0.test.find_one({"x": 1}), "find w:0 replaced document") def test_update_bypass_document_validation(self): db = self.db db.test.drop() db.test.insert_one({"z": 5}) - db.command(SON([("collMod", "test"), - ("validator", {"z": {"$gte": 0}})])) - db_w0 = self.db.client.get_database( - self.db.name, write_concern=WriteConcern(w=0)) + db.command(SON([("collMod", "test"), ("validator", {"z": {"$gte": 0}})])) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) # Test update_one - self.assertRaises(OperationFailure, db.test.update_one, - {"z": 5}, {"$inc": {"z": -10}}) + self.assertRaises(OperationFailure, db.test.update_one, {"z": 5}, {"$inc": {"z": -10}}) self.assertEqual(0, db.test.count_documents({"z": -5})) self.assertEqual(1, db.test.count_documents({"z": 5})) - db.test.update_one({"z": 5}, {"$inc": {"z": -10}}, - bypass_document_validation=True) + db.test.update_one({"z": 5}, {"$inc": {"z": -10}}, bypass_document_validation=True) self.assertEqual(0, db.test.count_documents({"z": 5})) self.assertEqual(1, db.test.count_documents({"z": -5})) - db.test.update_one({"z": -5}, {"$inc": {"z": 6}}, - bypass_document_validation=False) + db.test.update_one({"z": -5}, {"$inc": {"z": 6}}, bypass_document_validation=False) self.assertEqual(1, db.test.count_documents({"z": 1})) self.assertEqual(0, db.test.count_documents({"z": -5})) - db.test.insert_one({"z": -10}, - bypass_document_validation=True) - self.assertRaises(OperationFailure, db.test.update_one, - {"z": -10}, {"$inc": {"z": 1}}) + db.test.insert_one({"z": -10}, bypass_document_validation=True) + self.assertRaises(OperationFailure, db.test.update_one, {"z": -10}, {"$inc": {"z": 1}}) self.assertEqual(0, db.test.count_documents({"z": -9})) self.assertEqual(1, db.test.count_documents({"z": -10})) - db.test.update_one({"z": -10}, {"$inc": {"z": 1}}, - bypass_document_validation=True) + db.test.update_one({"z": -10}, {"$inc": {"z": 1}}, bypass_document_validation=True) self.assertEqual(1, db.test.count_documents({"z": -9})) self.assertEqual(0, db.test.count_documents({"z": -10})) - db.test.update_one({"z": -9}, {"$inc": {"z": 9}}, - bypass_document_validation=False) + db.test.update_one({"z": -9}, {"$inc": {"z": 9}}, bypass_document_validation=False) self.assertEqual(0, db.test.count_documents({"z": -9})) self.assertEqual(1, db.test.count_documents({"z": 0})) db.test.insert_one({"y": 1, "x": 0}, bypass_document_validation=True) - db_w0.test.update_one({"y": 1}, {"$inc": {"x": 1}}, - bypass_document_validation=True) - wait_until(lambda: db_w0.test.find_one({"y": 1, "x": 1}), - "find w:0 updated document") + db_w0.test.update_one({"y": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) + wait_until(lambda: db_w0.test.find_one({"y": 1, "x": 1}), "find w:0 updated document") # Test update_many db.test.insert_many([{"z": i} for i in range(3, 101)]) - db.test.insert_one({"y": 0}, - bypass_document_validation=True) - self.assertRaises(OperationFailure, db.test.update_many, {}, - {"$inc": {"z": -100}}) + db.test.insert_one({"y": 0}, bypass_document_validation=True) + self.assertRaises(OperationFailure, db.test.update_many, {}, {"$inc": {"z": -100}}) self.assertEqual(100, db.test.count_documents({"z": {"$gte": 0}})) self.assertEqual(0, db.test.count_documents({"z": {"$lt": 0}})) self.assertEqual(0, db.test.count_documents({"y": 0, "z": -100})) - db.test.update_many({"z": {"$gte": 0}}, {"$inc": {"z": -100}}, - bypass_document_validation=True) + db.test.update_many( + {"z": {"$gte": 0}}, {"$inc": {"z": -100}}, bypass_document_validation=True + ) self.assertEqual(0, db.test.count_documents({"z": {"$gt": 0}})) self.assertEqual(100, db.test.count_documents({"z": {"$lte": 0}})) - db.test.update_many({"z": {"$gt": -50}}, {"$inc": {"z": 100}}, - bypass_document_validation=False) + db.test.update_many( + {"z": {"$gt": -50}}, {"$inc": {"z": 100}}, bypass_document_validation=False + ) self.assertEqual(50, db.test.count_documents({"z": {"$gt": 0}})) self.assertEqual(50, db.test.count_documents({"z": {"$lt": 0}})) - db.test.insert_many([{"z": -i} for i in range(50)], - bypass_document_validation=True) - self.assertRaises(OperationFailure, db.test.update_many, - {}, {"$inc": {"z": 1}}) + db.test.insert_many([{"z": -i} for i in range(50)], bypass_document_validation=True) + self.assertRaises(OperationFailure, db.test.update_many, {}, {"$inc": {"z": 1}}) self.assertEqual(100, db.test.count_documents({"z": {"$lte": 0}})) self.assertEqual(50, db.test.count_documents({"z": {"$gt": 1}})) - db.test.update_many({"z": {"$gte": 0}}, {"$inc": {"z": -100}}, - bypass_document_validation=True) + db.test.update_many( + {"z": {"$gte": 0}}, {"$inc": {"z": -100}}, bypass_document_validation=True + ) self.assertEqual(0, db.test.count_documents({"z": {"$gt": 0}})) self.assertEqual(150, db.test.count_documents({"z": {"$lte": 0}})) - db.test.update_many({"z": {"$lte": 0}}, {"$inc": {"z": 100}}, - bypass_document_validation=False) + db.test.update_many( + {"z": {"$lte": 0}}, {"$inc": {"z": 100}}, bypass_document_validation=False + ) self.assertEqual(150, db.test.count_documents({"z": {"$gte": 0}})) self.assertEqual(0, db.test.count_documents({"z": {"$lt": 0}})) db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) - db_w0.test.update_many({"m": 1}, {"$inc": {"x": 1}}, - bypass_document_validation=True) + db_w0.test.update_many({"m": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) wait_until( - lambda: db_w0.test.count_documents({"m": 1, "x": 1}) == 2, - "find w:0 updated documents") + lambda: db_w0.test.count_documents({"m": 1, "x": 1}) == 2, "find w:0 updated documents" + ) def test_bypass_document_validation_bulk_write(self): db = self.db db.test.drop() db.create_collection("test", validator={"a": {"$gte": 0}}) - db_w0 = self.db.client.get_database( - self.db.name, write_concern=WriteConcern(w=0)) - - ops: list = [InsertOne({"a": -10}), - InsertOne({"a": -11}), - InsertOne({"a": -12}), - UpdateOne({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), - UpdateMany({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), - ReplaceOne({"a": {"$lte": -10}}, {"a": -1})] + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + ops: list = [ + InsertOne({"a": -10}), + InsertOne({"a": -11}), + InsertOne({"a": -12}), + UpdateOne({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), + UpdateMany({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), + ReplaceOne({"a": {"$lte": -10}}, {"a": -1}), + ] db.test.bulk_write(ops, bypass_document_validation=True) self.assertEqual(3, db.test.count_documents({})) @@ -1086,22 +1026,22 @@ def test_bypass_document_validation_bulk_write(self): for op in ops: self.assertRaises(BulkWriteError, db.test.bulk_write, [op]) - self.assertRaises(OperationFailure, db_w0.test.bulk_write, ops, - bypass_document_validation=True) + self.assertRaises( + OperationFailure, db_w0.test.bulk_write, ops, bypass_document_validation=True + ) def test_find_by_default_dct(self): db = self.db - db.test.insert_one({'foo': 'bar'}) - dct = defaultdict(dict, [('foo', 'bar')]) # type: ignore[arg-type] + db.test.insert_one({"foo": "bar"}) + dct = defaultdict(dict, [("foo", "bar")]) # type: ignore[arg-type] self.assertIsNotNone(db.test.find_one(dct)) - self.assertEqual(dct, defaultdict(dict, [('foo', 'bar')])) + self.assertEqual(dct, defaultdict(dict, [("foo", "bar")])) def test_find_w_fields(self): db = self.db db.test.delete_many({}) - db.test.insert_one({"x": 1, "mike": "awesome", - "extra thing": "abcdefghijklmnopqrstuvwxyz"}) + db.test.insert_one({"x": 1, "mike": "awesome", "extra thing": "abcdefghijklmnopqrstuvwxyz"}) self.assertEqual(1, db.test.count_documents({})) doc = next(db.test.find({})) self.assertTrue("x" in doc) @@ -1130,9 +1070,7 @@ def test_fields_specifier_as_dict(self): db.test.insert_one({"x": [1, 2, 3], "mike": "awesome"}) self.assertEqual([1, 2, 3], db.test.find_one()["x"]) - self.assertEqual([2, 3], - db.test.find_one( - projection={"x": {"$slice": -2}})["x"]) + self.assertEqual([2, 3], db.test.find_one(projection={"x": {"$slice": -2}})["x"]) self.assertTrue("x" not in db.test.find_one(projection={"x": 0})) self.assertTrue("mike" in db.test.find_one(projection={"x": 0})) @@ -1146,14 +1084,10 @@ def test_find_w_regex(self): db.test.insert_one({"x": "hello_test"}) self.assertEqual(len(list(db.test.find())), 4) - self.assertEqual(len(list(db.test.find({"x": - re.compile("^hello.*")}))), 4) - self.assertEqual(len(list(db.test.find({"x": - re.compile("ello")}))), 4) - self.assertEqual(len(list(db.test.find({"x": - re.compile("^hello$")}))), 0) - self.assertEqual(len(list(db.test.find({"x": - re.compile("^hello_mi.*$")}))), 2) + self.assertEqual(len(list(db.test.find({"x": re.compile("^hello.*")}))), 4) + self.assertEqual(len(list(db.test.find({"x": re.compile("ello")}))), 4) + self.assertEqual(len(list(db.test.find({"x": re.compile("^hello$")}))), 0) + self.assertEqual(len(list(db.test.find({"x": re.compile("^hello_mi.*$")}))), 2) def test_id_can_be_anything(self): db = self.db @@ -1217,83 +1151,74 @@ def test_write_error_text_handling(self): db.test.create_index("text", unique=True) # Test workaround for SERVER-24007 - data = (b'a\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83') + data = ( + b"a\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + ) text = utf_8_decode(data, None, True) db.test.insert_one({"text": text}) # Should raise DuplicateKeyError, not InvalidBSON - self.assertRaises(DuplicateKeyError, - db.test.insert_one, - {"text": text}) + self.assertRaises(DuplicateKeyError, db.test.insert_one, {"text": text}) - self.assertRaises(DuplicateKeyError, - db.test.replace_one, - {"_id": ObjectId()}, - {"text": text}, - upsert=True) + self.assertRaises( + DuplicateKeyError, db.test.replace_one, {"_id": ObjectId()}, {"text": text}, upsert=True + ) # Should raise BulkWriteError, not InvalidBSON - self.assertRaises(BulkWriteError, - db.test.insert_many, - [{"text": text}]) + self.assertRaises(BulkWriteError, db.test.insert_many, [{"text": text}]) def test_write_error_unicode(self): coll = self.db.test self.addCleanup(coll.drop) - coll.create_index('a', unique=True) - coll.insert_one({'a': 'unicode \U0001f40d'}) - with self.assertRaisesRegex( - DuplicateKeyError, - 'E11000 duplicate key error') as ctx: - coll.insert_one({'a': 'unicode \U0001f40d'}) + coll.create_index("a", unique=True) + coll.insert_one({"a": "unicode \U0001f40d"}) + with self.assertRaisesRegex(DuplicateKeyError, "E11000 duplicate key error") as ctx: + coll.insert_one({"a": "unicode \U0001f40d"}) # Once more for good measure. - self.assertIn('E11000 duplicate key error', - str(ctx.exception)) + self.assertIn("E11000 duplicate key error", str(ctx.exception)) def test_wtimeout(self): # Ensure setting wtimeout doesn't disable write concern altogether. # See SERVER-12596. collection = self.db.test collection.drop() - collection.insert_one({'_id': 1}) + collection.insert_one({"_id": 1}) - coll = collection.with_options( - write_concern=WriteConcern(w=1, wtimeout=1000)) - self.assertRaises(DuplicateKeyError, coll.insert_one, {'_id': 1}) + coll = collection.with_options(write_concern=WriteConcern(w=1, wtimeout=1000)) + self.assertRaises(DuplicateKeyError, coll.insert_one, {"_id": 1}) - coll = collection.with_options( - write_concern=WriteConcern(wtimeout=1000)) - self.assertRaises(DuplicateKeyError, coll.insert_one, {'_id': 1}) + coll = collection.with_options(write_concern=WriteConcern(wtimeout=1000)) + self.assertRaises(DuplicateKeyError, coll.insert_one, {"_id": 1}) def test_error_code(self): try: @@ -1319,16 +1244,13 @@ def test_index_on_subfield(self): db.test.insert_one({"hello": {"a": 4, "b": 5}}) db.test.insert_one({"hello": {"a": 7, "b": 2}}) - self.assertRaises(DuplicateKeyError, - db.test.insert_one, - {"hello": {"a": 4, "b": 10}}) + self.assertRaises(DuplicateKeyError, db.test.insert_one, {"hello": {"a": 4, "b": 10}}) def test_replace_one(self): db = self.db db.drop_collection("test") - self.assertRaises(ValueError, - lambda: db.test.replace_one({}, {"$set": {"x": 1}})) + self.assertRaises(ValueError, lambda: db.test.replace_one({}, {"$set": {"x": 1}})) id1 = db.test.insert_one({"x": 1}).inserted_id result = db.test.replace_one({"x": 1}, {"y": 1}) @@ -1360,8 +1282,7 @@ def test_replace_one(self): self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"y": 2})) - db = db.client.get_database(db.name, - write_concern=WriteConcern(w=0)) + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.replace_one({"x": 0}, {"y": 0}) self.assertTrue(isinstance(result, UpdateResult)) self.assertRaises(InvalidOperation, lambda: result.matched_count) @@ -1373,8 +1294,7 @@ def test_update_one(self): db = self.db db.drop_collection("test") - self.assertRaises(ValueError, - lambda: db.test.update_one({}, {"x": 1})) + self.assertRaises(ValueError, lambda: db.test.update_one({}, {"x": 1})) id1 = db.test.insert_one({"x": 5}).inserted_id result = db.test.update_one({}, {"$inc": {"x": 1}}) @@ -1402,8 +1322,7 @@ def test_update_one(self): self.assertTrue(isinstance(result.upserted_id, ObjectId)) self.assertTrue(result.acknowledged) - db = db.client.get_database(db.name, - write_concern=WriteConcern(w=0)) + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.update_one({"x": 0}, {"$inc": {"x": 1}}) self.assertTrue(isinstance(result, UpdateResult)) self.assertRaises(InvalidOperation, lambda: result.matched_count) @@ -1415,8 +1334,7 @@ def test_update_many(self): db = self.db db.drop_collection("test") - self.assertRaises(ValueError, - lambda: db.test.update_many({}, {"x": 1})) + self.assertRaises(ValueError, lambda: db.test.update_many({}, {"x": 1})) db.test.insert_one({"x": 4, "y": 3}) db.test.insert_one({"x": 5, "y": 5}) @@ -1445,8 +1363,7 @@ def test_update_many(self): self.assertTrue(isinstance(result.upserted_id, ObjectId)) self.assertTrue(result.acknowledged) - db = db.client.get_database(db.name, - write_concern=WriteConcern(w=0)) + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.update_many({"x": 0}, {"$inc": {"x": 1}}) self.assertTrue(isinstance(result, UpdateResult)) self.assertRaises(InvalidOperation, lambda: result.matched_count) @@ -1459,28 +1376,28 @@ def test_update_check_keys(self): self.assertTrue(self.db.test.insert_one({"hello": "world"})) # Modify shouldn't check keys... - self.assertTrue(self.db.test.update_one({"hello": "world"}, - {"$set": {"foo.bar": "baz"}}, - upsert=True)) + self.assertTrue( + self.db.test.update_one({"hello": "world"}, {"$set": {"foo.bar": "baz"}}, upsert=True) + ) # I know this seems like testing the server but I'd like to be notified # by CI if the server's behavior changes here. doc = SON([("$set", {"foo.bar": "bim"}), ("hello", "world")]) - self.assertRaises(OperationFailure, self.db.test.update_one, - {"hello": "world"}, doc, upsert=True) + self.assertRaises( + OperationFailure, self.db.test.update_one, {"hello": "world"}, doc, upsert=True + ) # This is going to cause keys to be checked and raise InvalidDocument. # That's OK assuming the server's behavior in the previous assert # doesn't change. If the behavior changes checking the first key for # '$' in update won't be good enough anymore. doc = SON([("hello", "world"), ("$set", {"foo.bar": "bim"})]) - self.assertRaises(OperationFailure, self.db.test.replace_one, - {"hello": "world"}, doc, upsert=True) + self.assertRaises( + OperationFailure, self.db.test.replace_one, {"hello": "world"}, doc, upsert=True + ) # Replace with empty document - self.assertNotEqual(0, - self.db.test.replace_one( - {"hello": "world"}, {}).matched_count) + self.assertNotEqual(0, self.db.test.replace_one({"hello": "world"}, {}).matched_count) def test_acknowledged_delete(self): db = self.db @@ -1514,10 +1431,9 @@ def test_count_documents(self): self.assertEqual(db.test.count_documents({}), 0) db.test.insert_many([{}, {}]) self.assertEqual(db.test.count_documents({}), 2) - db.test.insert_many([{'foo': 'bar'}, {'foo': 'baz'}]) - self.assertEqual(db.test.count_documents({'foo': 'bar'}), 1) - self.assertEqual( - db.test.count_documents({'foo': re.compile(r'ba.*')}), 2) + db.test.insert_many([{"foo": "bar"}, {"foo": "baz"}]) + self.assertEqual(db.test.count_documents({"foo": "bar"}), 1) + self.assertEqual(db.test.count_documents({"foo": re.compile(r"ba.*")}), 2) def test_estimated_document_count(self): db = self.db @@ -1533,39 +1449,37 @@ def test_estimated_document_count(self): def test_aggregate(self): db = self.db db.drop_collection("test") - db.test.insert_one({'foo': [1, 2]}) + db.test.insert_one({"foo": [1, 2]}) self.assertRaises(TypeError, db.test.aggregate, "wow") pipeline = {"$project": {"_id": False, "foo": True}} result = db.test.aggregate([pipeline]) self.assertTrue(isinstance(result, CommandCursor)) - self.assertEqual([{'foo': [1, 2]}], list(result)) + self.assertEqual([{"foo": [1, 2]}], list(result)) # Test write concern. with self.write_concern_collection() as coll: - coll.aggregate([{'$out': 'output-collection'}]) + coll.aggregate([{"$out": "output-collection"}]) def test_aggregate_raw_bson(self): db = self.db db.drop_collection("test") - db.test.insert_one({'foo': [1, 2]}) + db.test.insert_one({"foo": [1, 2]}) self.assertRaises(TypeError, db.test.aggregate, "wow") pipeline = {"$project": {"_id": False, "foo": True}} - coll = db.get_collection( - 'test', - codec_options=CodecOptions(document_class=RawBSONDocument)) + coll = db.get_collection("test", codec_options=CodecOptions(document_class=RawBSONDocument)) result = coll.aggregate([pipeline]) self.assertTrue(isinstance(result, CommandCursor)) first_result = next(result) self.assertIsInstance(first_result, RawBSONDocument) - self.assertEqual([1, 2], list(first_result['foo'])) + self.assertEqual([1, 2], list(first_result["foo"])) def test_aggregation_cursor_validation(self): db = self.db - projection = {'$project': {'_id': '$_id'}} + projection = {"$project": {"_id": "$_id"}} cursor = db.test.aggregate([projection], cursor={}) self.assertTrue(isinstance(cursor, CommandCursor)) @@ -1576,20 +1490,17 @@ def test_aggregation_cursor(self): db = self.client.get_database( db.name, read_preference=ReadPreference.SECONDARY, - write_concern=WriteConcern(w=self.w)) + write_concern=WriteConcern(w=self.w), + ) for collection_size in (10, 1000): db.drop_collection("test") - db.test.insert_many([{'_id': i} for i in range(collection_size)]) + db.test.insert_many([{"_id": i} for i in range(collection_size)]) expected_sum = sum(range(collection_size)) # Use batchSize to ensure multiple getMore messages - cursor = db.test.aggregate( - [{'$project': {'_id': '$_id'}}], - batchSize=5) + cursor = db.test.aggregate([{"$project": {"_id": "$_id"}}], batchSize=5) - self.assertEqual( - expected_sum, - sum(doc['_id'] for doc in cursor)) + self.assertEqual(expected_sum, sum(doc["_id"] for doc in cursor)) # Test that batchSize is handled properly. cursor = db.test.aggregate([], batchSize=5) @@ -1607,7 +1518,7 @@ def test_aggregation_cursor_alive(self): self.db.test.delete_many({}) self.db.test.insert_many([{} for _ in range(3)]) self.addCleanup(self.db.test.delete_many, {}) - cursor = self.db.test.aggregate(pipeline=[], cursor={'batchSize': 2}) + cursor = self.db.test.aggregate(pipeline=[], cursor={"batchSize": 2}) n = 0 while True: cursor.next() @@ -1621,15 +1532,14 @@ def test_aggregation_cursor_alive(self): def test_large_limit(self): db = self.db db.drop_collection("test_large_limit") - db.test_large_limit.create_index([('x', 1)]) + db.test_large_limit.create_index([("x", 1)]) my_str = "mongomongo" * 1000 - db.test_large_limit.insert_many( - {"x": i, "y": my_str} for i in range(2000)) + db.test_large_limit.insert_many({"x": i, "y": my_str} for i in range(2000)) i = 0 y = 0 - for doc in db.test_large_limit.find(limit=1900).sort([('x', 1)]): + for doc in db.test_large_limit.find(limit=1900).sort([("x", 1)]): i += 1 y += doc["x"] @@ -1683,7 +1593,7 @@ def test_rename(self): db.foo.rename("test", dropTarget=True) with self.write_concern_collection() as coll: - coll.rename('foo') + coll.rename("foo") @no_type_check def test_find_one(self): @@ -1696,8 +1606,7 @@ def test_find_one(self): self.assertEqual(db.test.find_one(_id), db.test.find_one()) self.assertEqual(db.test.find_one(None), db.test.find_one()) self.assertEqual(db.test.find_one({}), db.test.find_one()) - self.assertEqual(db.test.find_one({"hello": "world"}), - db.test.find_one()) + self.assertEqual(db.test.find_one({"hello": "world"}), db.test.find_one()) self.assertTrue("hello" in db.test.find_one(projection=["hello"])) self.assertTrue("hello" not in db.test.find_one(projection=["foo"])) @@ -1711,8 +1620,7 @@ def test_find_one(self): self.assertTrue("hello" in db.test.find_one(projection=frozenset(["hello"]))) self.assertTrue("hello" not in db.test.find_one(projection=frozenset(["foo"]))) - self.assertEqual(["_id"], list(db.test.find_one(projection={'_id': - True}))) + self.assertEqual(["_id"], list(db.test.find_one(projection={"_id": True}))) self.assertTrue("hello" in list(db.test.find_one(projection={}))) self.assertTrue("hello" in list(db.test.find_one(projection=[]))) @@ -1765,16 +1673,13 @@ def test_cursor_timeout(self): def test_exhaust(self): if is_mongos(self.db.client): - self.assertRaises(InvalidOperation, - self.db.test.find, - cursor_type=CursorType.EXHAUST) + self.assertRaises(InvalidOperation, self.db.test.find, cursor_type=CursorType.EXHAUST) return # Limit is incompatible with exhaust. - self.assertRaises(InvalidOperation, - self.db.test.find, - cursor_type=CursorType.EXHAUST, - limit=5) + self.assertRaises( + InvalidOperation, self.db.test.find, cursor_type=CursorType.EXHAUST, limit=5 + ) cur = self.db.test.find(cursor_type=CursorType.EXHAUST) self.assertRaises(InvalidOperation, cur.limit, 5) cur = self.db.test.find(limit=5) @@ -1785,7 +1690,7 @@ def test_exhaust(self): self.db.drop_collection("test") # Insert enough documents to require more than one batch - self.db.test.insert_many([{'i': i} for i in range(150)]) + self.db.test.insert_many([{"i": i} for i in range(150)]) client = rs_or_single_client(maxPoolSize=1) self.addCleanup(client.close) @@ -1807,8 +1712,7 @@ def test_exhaust(self): # If the Cursor instance is discarded before being completely iterated # and the socket has pending data (more_to_come=True) we have to close # and discard the socket. - cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST, - batch_size=2) + cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST, batch_size=2) if client_context.version.at_least(4, 2): # On 4.2+ we use OP_MSG which only sets more_to_come=True after the # first getMore. @@ -1817,12 +1721,12 @@ def test_exhaust(self): else: next(cur) self.assertEqual(0, len(pool.sockets)) - if sys.platform.startswith('java') or 'PyPy' in sys.version: + if sys.platform.startswith("java") or "PyPy" in sys.version: # Don't wait for GC or use gc.collect(), it's unreliable. cur.close() cur = None # Wait until the background thread returns the socket. - wait_until(lambda: pool.active_sockets == 0, 'return socket') + wait_until(lambda: pool.active_sockets == 0, "return socket") # The socket should be discarded. self.assertEqual(0, len(pool.sockets)) @@ -1837,11 +1741,11 @@ def test_distinct(self): self.assertEqual([1, 2, 3], distinct) - distinct = test.find({'a': {'$gt': 1}}).distinct("a") + distinct = test.find({"a": {"$gt": 1}}).distinct("a") distinct.sort() self.assertEqual([2, 3], distinct) - distinct = test.distinct('a', {'a': {'$gt': 1}}) + distinct = test.distinct("a", {"a": {"$gt": 1}}) distinct.sort() self.assertEqual([2, 3], distinct) @@ -1862,19 +1766,15 @@ def test_query_on_query_field(self): self.db.test.insert_one({"query": "foo"}) self.db.test.insert_one({"bar": "foo"}) - self.assertEqual(1, - self.db.test.count_documents({"query": {"$ne": None}})) - self.assertEqual(1, - len(list(self.db.test.find({"query": {"$ne": None}}))) - ) + self.assertEqual(1, self.db.test.count_documents({"query": {"$ne": None}})) + self.assertEqual(1, len(list(self.db.test.find({"query": {"$ne": None}})))) def test_min_query(self): self.db.drop_collection("test") self.db.test.insert_many([{"x": 1}, {"x": 2}]) self.db.test.create_index("x") - cursor = self.db.test.find({"$min": {"x": 2}, "$query": {}}, - hint="x_1") + cursor = self.db.test.find({"$min": {"x": 2}, "$query": {}}, hint="x_1") docs = list(cursor) self.assertEqual(1, len(docs)) @@ -1891,24 +1791,30 @@ def test_numerous_inserts(self): def test_insert_many_large_batch(self): # Tests legacy insert. db = self.client.test_insert_large_batch - self.addCleanup(self.client.drop_database, 'test_insert_large_batch') + self.addCleanup(self.client.drop_database, "test_insert_large_batch") max_bson_size = client_context.max_bson_size # Write commands are limited to 16MB + 16k per batch - big_string = 'x' * int(max_bson_size / 2) + big_string = "x" * int(max_bson_size / 2) # Batch insert that requires 2 batches. - successful_insert = [{'x': big_string}, {'x': big_string}, - {'x': big_string}, {'x': big_string}] + successful_insert = [ + {"x": big_string}, + {"x": big_string}, + {"x": big_string}, + {"x": big_string}, + ] db.collection_0.insert_many(successful_insert) self.assertEqual(4, db.collection_0.count_documents({})) db.collection_0.drop() # Test that inserts fail after first error. - insert_second_fails = [{'_id': 'id0', 'x': big_string}, - {'_id': 'id0', 'x': big_string}, - {'_id': 'id1', 'x': big_string}, - {'_id': 'id2', 'x': big_string}] + insert_second_fails = [ + {"_id": "id0", "x": big_string}, + {"_id": "id0", "x": big_string}, + {"_id": "id1", "x": big_string}, + {"_id": "id2", "x": big_string}, + ] with self.assertRaises(BulkWriteError): db.collection_1.insert_many(insert_second_fails) @@ -1918,25 +1824,27 @@ def test_insert_many_large_batch(self): db.collection_1.drop() # 2 batches, 2nd insert fails, unacknowledged, ordered. - unack_coll = db.collection_2.with_options( - write_concern=WriteConcern(w=0)) + unack_coll = db.collection_2.with_options(write_concern=WriteConcern(w=0)) unack_coll.insert_many(insert_second_fails) - wait_until(lambda: 1 == db.collection_2.count_documents({}), - 'insert 1 document', timeout=60) + wait_until( + lambda: 1 == db.collection_2.count_documents({}), "insert 1 document", timeout=60 + ) db.collection_2.drop() # 2 batches, ids of docs 0 and 1 are dupes, ids of docs 2 and 3 are # dupes. Acknowledged, unordered. - insert_two_failures = [{'_id': 'id0', 'x': big_string}, - {'_id': 'id0', 'x': big_string}, - {'_id': 'id1', 'x': big_string}, - {'_id': 'id1', 'x': big_string}] + insert_two_failures = [ + {"_id": "id0", "x": big_string}, + {"_id": "id0", "x": big_string}, + {"_id": "id1", "x": big_string}, + {"_id": "id1", "x": big_string}, + ] with self.assertRaises(OperationFailure) as context: db.collection_3.insert_many(insert_two_failures, ordered=False) - self.assertIn('id1', str(context.exception)) + self.assertIn("id1", str(context.exception)) # Only the first and third documents should be inserted. self.assertEqual(2, db.collection_3.count_documents({})) @@ -1944,13 +1852,13 @@ def test_insert_many_large_batch(self): db.collection_3.drop() # 2 batches, 2 errors, unacknowledged, unordered. - unack_coll = db.collection_4.with_options( - write_concern=WriteConcern(w=0)) + unack_coll = db.collection_4.with_options(write_concern=WriteConcern(w=0)) unack_coll.insert_many(insert_two_failures, ordered=False) # Only the first and third documents are inserted. - wait_until(lambda: 2 == db.collection_4.count_documents({}), - 'insert 2 documents', timeout=60) + wait_until( + lambda: 2 == db.collection_4.count_documents({}), "insert 2 documents", timeout=60 + ) db.collection_4.drop() @@ -1978,224 +1886,246 @@ class BadGetAttr(dict): def __getattr__(self, name): pass - bad = BadGetAttr([('foo', 'bar')]) - c.insert_one({'bad': bad}) - self.assertEqual('bar', c.find_one()['bad']['foo']) # type: ignore + bad = BadGetAttr([("foo", "bar")]) + c.insert_one({"bad": bad}) + self.assertEqual("bar", c.find_one()["bad"]["foo"]) # type: ignore def test_array_filters_validation(self): # array_filters must be a list. c = self.db.test with self.assertRaises(TypeError): - c.update_one({}, {'$set': {'a': 1}}, array_filters={}) # type: ignore[arg-type] + c.update_one({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] with self.assertRaises(TypeError): - c.update_many({}, {'$set': {'a': 1}}, array_filters={} ) # type: ignore[arg-type] + c.update_many({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] with self.assertRaises(TypeError): - c.find_one_and_update({}, {'$set': {'a': 1}}, array_filters={}) # type: ignore[arg-type] + c.find_one_and_update({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] def test_array_filters_unacknowledged(self): c_w0 = self.db.test.with_options(write_concern=WriteConcern(w=0)) with self.assertRaises(ConfigurationError): - c_w0.update_one({}, {'$set': {'y.$[i].b': 5}}, - array_filters=[{'i.b': 1}]) + c_w0.update_one({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) with self.assertRaises(ConfigurationError): - c_w0.update_many({}, {'$set': {'y.$[i].b': 5}}, - array_filters=[{'i.b': 1}]) + c_w0.update_many({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) with self.assertRaises(ConfigurationError): - c_w0.find_one_and_update({}, {'$set': {'y.$[i].b': 5}}, - array_filters=[{'i.b': 1}]) + c_w0.find_one_and_update({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) def test_find_one_and(self): c = self.db.test c.drop() - c.insert_one({'_id': 1, 'i': 1}) - - self.assertEqual({'_id': 1, 'i': 1}, - c.find_one_and_update({'_id': 1}, {'$inc': {'i': 1}})) - self.assertEqual({'_id': 1, 'i': 3}, - c.find_one_and_update( - {'_id': 1}, {'$inc': {'i': 1}}, - return_document=ReturnDocument.AFTER)) - - self.assertEqual({'_id': 1, 'i': 3}, - c.find_one_and_delete({'_id': 1})) - self.assertEqual(None, c.find_one({'_id': 1})) - - self.assertEqual(None, - c.find_one_and_update({'_id': 1}, {'$inc': {'i': 1}})) - self.assertEqual({'_id': 1, 'i': 1}, - c.find_one_and_update( - {'_id': 1}, {'$inc': {'i': 1}}, - return_document=ReturnDocument.AFTER, - upsert=True)) - self.assertEqual({'_id': 1, 'i': 2}, - c.find_one_and_update( - {'_id': 1}, {'$inc': {'i': 1}}, - return_document=ReturnDocument.AFTER)) - - self.assertEqual({'_id': 1, 'i': 3}, - c.find_one_and_replace( - {'_id': 1}, {'i': 3, 'j': 1}, - projection=['i'], - return_document=ReturnDocument.AFTER)) - self.assertEqual({'i': 4}, - c.find_one_and_update( - {'_id': 1}, {'$inc': {'i': 1}}, - projection={'i': 1, '_id': 0}, - return_document=ReturnDocument.AFTER)) + c.insert_one({"_id": 1, "i": 1}) + + self.assertEqual({"_id": 1, "i": 1}, c.find_one_and_update({"_id": 1}, {"$inc": {"i": 1}})) + self.assertEqual( + {"_id": 1, "i": 3}, + c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER + ), + ) + + self.assertEqual({"_id": 1, "i": 3}, c.find_one_and_delete({"_id": 1})) + self.assertEqual(None, c.find_one({"_id": 1})) + + self.assertEqual(None, c.find_one_and_update({"_id": 1}, {"$inc": {"i": 1}})) + self.assertEqual( + {"_id": 1, "i": 1}, + c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER, upsert=True + ), + ) + self.assertEqual( + {"_id": 1, "i": 2}, + c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER + ), + ) + + self.assertEqual( + {"_id": 1, "i": 3}, + c.find_one_and_replace( + {"_id": 1}, {"i": 3, "j": 1}, projection=["i"], return_document=ReturnDocument.AFTER + ), + ) + self.assertEqual( + {"i": 4}, + c.find_one_and_update( + {"_id": 1}, + {"$inc": {"i": 1}}, + projection={"i": 1, "_id": 0}, + return_document=ReturnDocument.AFTER, + ), + ) c.drop() for j in range(5): - c.insert_one({'j': j, 'i': 0}) + c.insert_one({"j": j, "i": 0}) - sort = [('j', DESCENDING)] - self.assertEqual(4, c.find_one_and_update({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) + sort = [("j", DESCENDING)] + self.assertEqual(4, c.find_one_and_update({}, {"$inc": {"i": 1}}, sort=sort)["j"]) def test_find_one_and_write_concern(self): listener = EventListener() db = single_client(event_listeners=[listener])[self.db.name] # non-default WriteConcern. - c_w0 = db.get_collection( - 'test', write_concern=WriteConcern(w=0)) + c_w0 = db.get_collection("test", write_concern=WriteConcern(w=0)) # default WriteConcern. - c_default = db.get_collection('test', write_concern=WriteConcern()) + c_default = db.get_collection("test", write_concern=WriteConcern()) results = listener.results # Authenticate the client and throw out auth commands from the listener. - db.command('ping') + db.command("ping") results.clear() - c_w0.find_one_and_update( - {'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertEqual( - {'w': 0}, results['started'][0].command['writeConcern']) + c_w0.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + self.assertEqual({"w": 0}, results["started"][0].command["writeConcern"]) results.clear() - c_w0.find_one_and_replace({'_id': 1}, {'foo': 'bar'}) - self.assertEqual( - {'w': 0}, results['started'][0].command['writeConcern']) + c_w0.find_one_and_replace({"_id": 1}, {"foo": "bar"}) + self.assertEqual({"w": 0}, results["started"][0].command["writeConcern"]) results.clear() - c_w0.find_one_and_delete({'_id': 1}) - self.assertEqual( - {'w': 0}, results['started'][0].command['writeConcern']) + c_w0.find_one_and_delete({"_id": 1}) + self.assertEqual({"w": 0}, results["started"][0].command["writeConcern"]) results.clear() # Test write concern errors. if client_context.is_rs: c_wc_error = db.get_collection( - 'test', - write_concern=WriteConcern( - w=len(client_context.nodes) + 1)) + "test", write_concern=WriteConcern(w=len(client_context.nodes) + 1) + ) self.assertRaises( WriteConcernError, c_wc_error.find_one_and_update, - {'_id': 1}, {'$set': {'foo': 'bar'}}) + {"_id": 1}, + {"$set": {"foo": "bar"}}, + ) self.assertRaises( WriteConcernError, c_wc_error.find_one_and_replace, - {'w': 0}, results['started'][0].command['writeConcern']) + {"w": 0}, + results["started"][0].command["writeConcern"], + ) self.assertRaises( WriteConcernError, c_wc_error.find_one_and_delete, - {'w': 0}, results['started'][0].command['writeConcern']) + {"w": 0}, + results["started"][0].command["writeConcern"], + ) results.clear() - c_default.find_one_and_update({'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertNotIn('writeConcern', results['started'][0].command) + c_default.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + self.assertNotIn("writeConcern", results["started"][0].command) results.clear() - c_default.find_one_and_replace({'_id': 1}, {'foo': 'bar'}) - self.assertNotIn('writeConcern', results['started'][0].command) + c_default.find_one_and_replace({"_id": 1}, {"foo": "bar"}) + self.assertNotIn("writeConcern", results["started"][0].command) results.clear() - c_default.find_one_and_delete({'_id': 1}) - self.assertNotIn('writeConcern', results['started'][0].command) + c_default.find_one_and_delete({"_id": 1}) + self.assertNotIn("writeConcern", results["started"][0].command) results.clear() def test_find_with_nested(self): c = self.db.test c.drop() - c.insert_many([{'i': i} for i in range(5)]) # [0, 1, 2, 3, 4] + c.insert_many([{"i": i} for i in range(5)]) # [0, 1, 2, 3, 4] self.assertEqual( [2], - [i['i'] for i in c.find({ - '$and': [ + [ + i["i"] + for i in c.find( { - # This clause gives us [1,2,4] - '$or': [ - {'i': {'$lte': 2}}, - {'i': {'$gt': 3}}, - ], - }, - { - # This clause gives us [2,3] - '$or': [ - {'i': 2}, - {'i': 3}, + "$and": [ + { + # This clause gives us [1,2,4] + "$or": [ + {"i": {"$lte": 2}}, + {"i": {"$gt": 3}}, + ], + }, + { + # This clause gives us [2,3] + "$or": [ + {"i": 2}, + {"i": 3}, + ] + }, ] - }, - ] - })] + } + ) + ], ) self.assertEqual( [0, 1, 2], - [i['i'] for i in c.find({ - '$or': [ - { - # This clause gives us [2] - '$and': [ - {'i': {'$gte': 2}}, - {'i': {'$lt': 3}}, - ], - }, + [ + i["i"] + for i in c.find( { - # This clause gives us [0,1] - '$and': [ - {'i': {'$gt': -100}}, - {'i': {'$lt': 2}}, + "$or": [ + { + # This clause gives us [2] + "$and": [ + {"i": {"$gte": 2}}, + {"i": {"$lt": 3}}, + ], + }, + { + # This clause gives us [0,1] + "$and": [ + {"i": {"$gt": -100}}, + {"i": {"$lt": 2}}, + ] + }, ] - }, - ] - })] + } + ) + ], ) def test_find_regex(self): c = self.db.test c.drop() - c.insert_one({'r': re.compile('.*')}) + c.insert_one({"r": re.compile(".*")}) - self.assertTrue(isinstance(c.find_one()['r'], Regex)) # type: ignore + self.assertTrue(isinstance(c.find_one()["r"], Regex)) # type: ignore for doc in c.find(): - self.assertTrue(isinstance(doc['r'], Regex)) + self.assertTrue(isinstance(doc["r"], Regex)) def test_find_command_generation(self): - cmd = _gen_find_command('coll', {'$query': {'foo': 1}, '$dumb': 2}, - None, 0, 0, 0, None, DEFAULT_READ_CONCERN, - None, None) + cmd = _gen_find_command( + "coll", + {"$query": {"foo": 1}, "$dumb": 2}, + None, + 0, + 0, + 0, + None, + DEFAULT_READ_CONCERN, + None, + None, + ) self.assertEqual( - cmd.to_dict(), - SON([('find', 'coll'), - ('$dumb', 2), - ('filter', {'foo': 1})]).to_dict()) + cmd.to_dict(), SON([("find", "coll"), ("$dumb", 2), ("filter", {"foo": 1})]).to_dict() + ) def test_bool(self): with self.assertRaises(NotImplementedError): - bool(Collection(self.db, 'test')) + bool(Collection(self.db, "test")) @client_context.require_version_min(5, 0, 0) def test_helpers_with_let(self): c = self.db.test - helpers = [(c.delete_many, ({}, {})), (c.delete_one, ({}, {})), - (c.find, ({})), (c.update_many, ({}, {'$inc': {'x': 3}})), - (c.update_one, ({}, {'$inc': {'x': 3}})), - (c.find_one_and_delete, ({}, {})), - (c.find_one_and_replace, ({}, {})), - (c.aggregate, ([], {}))] + helpers = [ + (c.delete_many, ({}, {})), + (c.delete_one, ({}, {})), + (c.find, ({})), + (c.update_many, ({}, {"$inc": {"x": 3}})), + (c.update_one, ({}, {"$inc": {"x": 3}})), + (c.find_one_and_delete, ({}, {})), + (c.find_one_and_replace, ({}, {})), + (c.aggregate, ([], {})), + ] for let in [10, "str"]: for helper, args in helpers: - with self.assertRaisesRegex(TypeError, - "let must be an instance of dict"): + with self.assertRaisesRegex(TypeError, "let must be an instance of dict"): helper(*args, let=let) # type: ignore for helper, args in helpers: helper(*args, let={}) # type: ignore diff --git a/test/test_collection_management.py b/test/test_collection_management.py index 342e612583..c5e29eda8a 100644 --- a/test/test_collection_management.py +++ b/test/test_collection_management.py @@ -20,12 +20,10 @@ sys.path[0:0] = [""] from test import unittest - from test.unified_format import generate_test_classes # Location of JSON test specifications. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'collection_management') +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "collection_management") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/test/test_command_monitoring_legacy.py b/test/test_command_monitoring_legacy.py index a05dbd9668..ed3d516f97 100644 --- a/test/test_command_monitoring_legacy.py +++ b/test/test_command_monitoring_legacy.py @@ -20,26 +20,28 @@ sys.path[0:0] = [""] -import pymongo +from test import client_context, unittest +from test.utils import ( + EventListener, + parse_read_preference, + rs_or_single_client, + wait_until, +) -from pymongo import MongoClient +import pymongo from bson import json_util +from pymongo import MongoClient from pymongo.errors import OperationFailure from pymongo.write_concern import WriteConcern -from test import unittest, client_context -from test.utils import (rs_or_single_client, wait_until, EventListener, - parse_read_preference) # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'command_monitoring') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "command_monitoring") def camel_to_snake(camel): # Regex to convert CamelCase to snake_case. - snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower() + snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() class TestAllScenarios(unittest.TestCase): @@ -61,9 +63,9 @@ def tearDown(self): def format_actual_results(results): - started = results['started'] - succeeded = results['succeeded'] - failed = results['failed'] + started = results["started"] + succeeded = results["succeeded"] + failed = results["failed"] msg = "\nStarted: %r" % (started[0].command if len(started) else None,) msg += "\nSucceeded: %r" % (succeeded[0].reply if len(succeeded) else None,) msg += "\nFailed: %r" % (failed[0].failure if len(failed) else None,) @@ -72,51 +74,51 @@ def format_actual_results(results): def create_test(scenario_def, test): def run_scenario(self): - dbname = scenario_def['database_name'] - collname = scenario_def['collection_name'] + dbname = scenario_def["database_name"] + collname = scenario_def["collection_name"] coll = self.client[dbname][collname] coll.drop() - coll.insert_many(scenario_def['data']) + coll.insert_many(scenario_def["data"]) self.listener.results.clear() - name = camel_to_snake(test['operation']['name']) - if 'read_preference' in test['operation']: - coll = coll.with_options(read_preference=parse_read_preference( - test['operation']['read_preference'])) - if 'collectionOptions' in test['operation']: - colloptions = test['operation']['collectionOptions'] - if 'writeConcern' in colloptions: - concern = colloptions['writeConcern'] - coll = coll.with_options( - write_concern=WriteConcern(**concern)) - - test_args = test['operation']['arguments'] - if 'options' in test_args: - options = test_args.pop('options') + name = camel_to_snake(test["operation"]["name"]) + if "read_preference" in test["operation"]: + coll = coll.with_options( + read_preference=parse_read_preference(test["operation"]["read_preference"]) + ) + if "collectionOptions" in test["operation"]: + colloptions = test["operation"]["collectionOptions"] + if "writeConcern" in colloptions: + concern = colloptions["writeConcern"] + coll = coll.with_options(write_concern=WriteConcern(**concern)) + + test_args = test["operation"]["arguments"] + if "options" in test_args: + options = test_args.pop("options") test_args.update(options) args = {} for arg in test_args: args[camel_to_snake(arg)] = test_args[arg] - if name == 'count': - self.skipTest('PyMongo does not support count') - elif name == 'bulk_write': + if name == "count": + self.skipTest("PyMongo does not support count") + elif name == "bulk_write": bulk_args = [] - for request in args['requests']: - opname = request['name'] + for request in args["requests"]: + opname = request["name"] klass = opname[0:1].upper() + opname[1:] - arg = getattr(pymongo, klass)(**request['arguments']) + arg = getattr(pymongo, klass)(**request["arguments"]) bulk_args.append(arg) try: - coll.bulk_write(bulk_args, args.get('ordered', True)) + coll.bulk_write(bulk_args, args.get("ordered", True)) except OperationFailure: pass - elif name == 'find': - if 'sort' in args: - args['sort'] = list(args['sort'].items()) - if 'hint' in args: - args['hint'] = list(args['hint'].items()) - for arg in 'skip', 'limit': + elif name == "find": + if "sort" in args: + args["sort"] = list(args["sort"].items()) + if "hint" in args: + args["hint"] = list(args["hint"].items()) + for arg in "skip", "limit": if arg in args: args[arg] = int(args[arg]) try: @@ -131,73 +133,73 @@ def run_scenario(self): pass res = self.listener.results - for expectation in test['expectations']: + for expectation in test["expectations"]: event_type = next(iter(expectation)) if event_type == "command_started_event": - event = res['started'][0] if len(res['started']) else None + event = res["started"][0] if len(res["started"]) else None if event is not None: # The tests substitute 42 for any number other than 0. - if (event.command_name == 'getMore' - and event.command['getMore']): - event.command['getMore'] = 42 - elif event.command_name == 'killCursors': - event.command['cursors'] = [42] - elif event.command_name == 'update': + if event.command_name == "getMore" and event.command["getMore"]: + event.command["getMore"] = 42 + elif event.command_name == "killCursors": + event.command["cursors"] = [42] + elif event.command_name == "update": # TODO: remove this once PYTHON-1744 is done. # Add upsert and multi fields back into # expectations. - updates = expectation[event_type]['command'][ - 'updates'] + updates = expectation[event_type]["command"]["updates"] for update in updates: - update.setdefault('upsert', False) - update.setdefault('multi', False) + update.setdefault("upsert", False) + update.setdefault("multi", False) elif event_type == "command_succeeded_event": - event = ( - res['succeeded'].pop(0) if len(res['succeeded']) else None) + event = res["succeeded"].pop(0) if len(res["succeeded"]) else None if event is not None: reply = event.reply # The tests substitute 42 for any number other than 0, # and "" for any error message. - if 'writeErrors' in reply: - for doc in reply['writeErrors']: + if "writeErrors" in reply: + for doc in reply["writeErrors"]: # Remove any new fields the server adds. The tests # only have index, code, and errmsg. - diff = set(doc) - set(['index', 'code', 'errmsg']) + diff = set(doc) - set(["index", "code", "errmsg"]) for field in diff: doc.pop(field) - doc['code'] = 42 - doc['errmsg'] = "" - elif 'cursor' in reply: - if reply['cursor']['id']: - reply['cursor']['id'] = 42 - elif event.command_name == 'killCursors': + doc["code"] = 42 + doc["errmsg"] = "" + elif "cursor" in reply: + if reply["cursor"]["id"]: + reply["cursor"]["id"] = 42 + elif event.command_name == "killCursors": # Make the tests continue to pass when the killCursors # command is actually in use. - if 'cursorsKilled' in reply: - reply.pop('cursorsKilled') - reply['cursorsUnknown'] = [42] + if "cursorsKilled" in reply: + reply.pop("cursorsKilled") + reply["cursorsUnknown"] = [42] # Found succeeded event. Pop related started event. - res['started'].pop(0) + res["started"].pop(0) elif event_type == "command_failed_event": - event = res['failed'].pop(0) if len(res['failed']) else None + event = res["failed"].pop(0) if len(res["failed"]) else None if event is not None: # Found failed event. Pop related started event. - res['started'].pop(0) + res["started"].pop(0) else: self.fail("Unknown event type") if event is None: - event_name = event_type.split('_')[1] + event_name = event_type.split("_")[1] self.fail( "Expected %s event for %s command. Actual " - "results:%s" % ( + "results:%s" + % ( event_name, - expectation[event_type]['command_name'], - format_actual_results(res))) + expectation[event_type]["command_name"], + format_actual_results(res), + ) + ) for attr, expected in expectation[event_type].items(): - if 'options' in expected: - options = expected.pop('options') + if "options" in expected: + options = expected.pop("options") expected.update(options) actual = getattr(event, attr) if isinstance(expected, dict): @@ -210,35 +212,33 @@ def run_scenario(self): def create_tests(): - for dirpath, _, filenames in os.walk(os.path.join(_TEST_PATH, 'legacy')): + for dirpath, _, filenames in os.walk(os.path.join(_TEST_PATH, "legacy")): dirname = os.path.split(dirpath)[-1] for filename in filenames: with open(os.path.join(dirpath, filename)) as scenario_stream: scenario_def = json_util.loads(scenario_stream.read()) - assert bool(scenario_def.get('tests')), "tests cannot be empty" + assert bool(scenario_def.get("tests")), "tests cannot be empty" # Construct test from scenario. - for test in scenario_def['tests']: + for test in scenario_def["tests"]: new_test = create_test(scenario_def, test) if "ignore_if_server_version_greater_than" in test: version = test["ignore_if_server_version_greater_than"] - ver = tuple(int(elt) for elt in version.split('.')) - new_test = client_context.require_version_max(*ver)( - new_test) + ver = tuple(int(elt) for elt in version.split(".")) + new_test = client_context.require_version_max(*ver)(new_test) if "ignore_if_server_version_less_than" in test: version = test["ignore_if_server_version_less_than"] - ver = tuple(int(elt) for elt in version.split('.')) - new_test = client_context.require_version_min(*ver)( - new_test) + ver = tuple(int(elt) for elt in version.split(".")) + new_test = client_context.require_version_min(*ver)(new_test) if "ignore_if_topology_type" in test: types = set(test["ignore_if_topology_type"]) if "sharded" in types: - new_test = client_context.require_no_mongos(None)( - new_test) + new_test = client_context.require_no_mongos(None)(new_test) - test_name = 'test_%s_%s_%s' % ( + test_name = "test_%s_%s_%s" % ( dirname, os.path.splitext(filename)[0], - str(test['description'].replace(" ", "_"))) + str(test["description"].replace(" ", "_")), + ) new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/test_command_monitoring_unified.py b/test/test_command_monitoring_unified.py index 9390c9fec6..46e1e4724c 100644 --- a/test/test_command_monitoring_unified.py +++ b/test/test_command_monitoring_unified.py @@ -22,16 +22,16 @@ from test import unittest from test.unified_format import generate_test_classes - # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'command_monitoring') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "command_monitoring") -globals().update(generate_test_classes( - os.path.join(_TEST_PATH, 'unified'), - module=__name__,)) +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) if __name__ == "__main__": diff --git a/test/test_common.py b/test/test_common.py index 7d7a26c278..ff50878ea1 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -19,13 +19,14 @@ sys.path[0:0] = [""] -from bson.binary import Binary, PYTHON_LEGACY, STANDARD, UuidRepresentation +from test import IntegrationTest, client_context, unittest +from test.utils import connected, rs_or_single_client, single_client + +from bson.binary import PYTHON_LEGACY, STANDARD, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.objectid import ObjectId from pymongo.errors import OperationFailure from pymongo.write_concern import WriteConcern -from test import client_context, unittest, IntegrationTest -from test.utils import connected, rs_or_single_client, single_client @client_context.require_connection @@ -34,81 +35,79 @@ def setUpModule(): class TestCommon(IntegrationTest): - def test_uuid_representation(self): coll = self.db.uuid coll.drop() # Test property - self.assertEqual(UuidRepresentation.UNSPECIFIED, - coll.codec_options.uuid_representation) + self.assertEqual(UuidRepresentation.UNSPECIFIED, coll.codec_options.uuid_representation) # Test basic query uu = uuid.uuid4() # Insert as binary subtype 3 - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) legacy_opts = coll.codec_options - coll.insert_one({'uu': uu}) - self.assertEqual(uu, coll.find_one({'uu': uu})['uu']) # type: ignore - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=STANDARD)) + coll.insert_one({"uu": uu}) + self.assertEqual(uu, coll.find_one({"uu": uu})["uu"]) # type: ignore + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) self.assertEqual(STANDARD, coll.codec_options.uuid_representation) - self.assertEqual(None, coll.find_one({'uu': uu})) + self.assertEqual(None, coll.find_one({"uu": uu})) uul = Binary.from_uuid(uu, PYTHON_LEGACY) - self.assertEqual(uul, coll.find_one({'uu': uul})['uu']) # type: ignore + self.assertEqual(uul, coll.find_one({"uu": uul})["uu"]) # type: ignore # Test count_documents - self.assertEqual(0, coll.count_documents({'uu': uu})) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - self.assertEqual(1, coll.count_documents({'uu': uu})) + self.assertEqual(0, coll.count_documents({"uu": uu})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(1, coll.count_documents({"uu": uu})) # Test delete - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=STANDARD)) - coll.delete_one({'uu': uu}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + coll.delete_one({"uu": uu}) self.assertEqual(1, coll.count_documents({})) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - coll.delete_one({'uu': uu}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + coll.delete_one({"uu": uu}) self.assertEqual(0, coll.count_documents({})) # Test update_one - coll.insert_one({'_id': uu, 'i': 1}) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=STANDARD)) - coll.update_one({'_id': uu}, {'$set': {'i': 2}}) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - self.assertEqual(1, coll.find_one({'_id': uu})['i']) # type: ignore - coll.update_one({'_id': uu}, {'$set': {'i': 2}}) - self.assertEqual(2, coll.find_one({'_id': uu})['i']) # type: ignore + coll.insert_one({"_id": uu, "i": 1}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + coll.update_one({"_id": uu}, {"$set": {"i": 2}}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(1, coll.find_one({"_id": uu})["i"]) # type: ignore + coll.update_one({"_id": uu}, {"$set": {"i": 2}}) + self.assertEqual(2, coll.find_one({"_id": uu})["i"]) # type: ignore # Test Cursor.distinct - self.assertEqual([2], coll.find({'_id': uu}).distinct('i')) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=STANDARD)) - self.assertEqual([], coll.find({'_id': uu}).distinct('i')) + self.assertEqual([2], coll.find({"_id": uu}).distinct("i")) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + self.assertEqual([], coll.find({"_id": uu}).distinct("i")) # Test findAndModify - self.assertEqual(None, coll.find_one_and_update({'_id': uu}, - {'$set': {'i': 5}})) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - self.assertEqual(2, coll.find_one_and_update({'_id': uu}, - {'$set': {'i': 5}})['i']) - self.assertEqual(5, coll.find_one({'_id': uu})['i']) # type: ignore + self.assertEqual(None, coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(2, coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}})["i"]) + self.assertEqual(5, coll.find_one({"_id": uu})["i"]) # type: ignore # Test command - self.assertEqual(5, self.db.command( - 'findAndModify', 'uuid', - update={'$set': {'i': 6}}, - query={'_id': uu}, codec_options=legacy_opts)['value']['i']) - self.assertEqual(6, self.db.command( - 'findAndModify', 'uuid', - update={'$set': {'i': 7}}, - query={'_id': Binary.from_uuid(uu, PYTHON_LEGACY)})['value']['i']) + self.assertEqual( + 5, + self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 6}}, + query={"_id": uu}, + codec_options=legacy_opts, + )["value"]["i"], + ) + self.assertEqual( + 6, + self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 7}}, + query={"_id": Binary.from_uuid(uu, PYTHON_LEGACY)}, + )["value"]["i"], + ) def test_write_concern(self): c = rs_or_single_client(connect=False) @@ -119,7 +118,7 @@ def test_write_concern(self): self.assertEqual(wc, c.write_concern) # Can we override back to the server default? - db = c.get_database('pymongo_test', write_concern=WriteConcern()) + db = c.get_database("pymongo_test", write_concern=WriteConcern()) self.assertEqual(db.write_concern, WriteConcern()) db = c.pymongo_test @@ -128,7 +127,7 @@ def test_write_concern(self): self.assertEqual(wc, coll.write_concern) cwc = WriteConcern(j=True) - coll = db.get_collection('test', write_concern=cwc) + coll = db.get_collection("test", write_concern=cwc) self.assertEqual(cwc, coll.write_concern) self.assertEqual(wc, db.write_concern) @@ -149,21 +148,22 @@ def test_mongo_client(self): self.assertTrue(new_coll.insert_one(doc)) self.assertRaises(OperationFailure, coll.insert_one, doc) - m = rs_or_single_client("mongodb://%s/" % (pair,), - replicaSet=client_context.replica_set_name) + m = rs_or_single_client( + "mongodb://%s/" % (pair,), replicaSet=client_context.replica_set_name + ) coll = m.pymongo_test.write_concern_test self.assertRaises(OperationFailure, coll.insert_one, doc) - m = rs_or_single_client("mongodb://%s/?w=0" % (pair,), - replicaSet=client_context.replica_set_name) + m = rs_or_single_client( + "mongodb://%s/?w=0" % (pair,), replicaSet=client_context.replica_set_name + ) coll = m.pymongo_test.write_concern_test coll.insert_one(doc) # Equality tests direct = connected(single_client(w=0)) - direct2 = connected(single_client("mongodb://%s/?w=0" % (pair,), - **self.credentials)) + direct2 = connected(single_client("mongodb://%s/?w=0" % (pair,), **self.credentials)) self.assertEqual(direct, direct2) self.assertFalse(direct != direct2) diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index e683974b03..fd9f126551 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -18,20 +18,20 @@ sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import ( + CMAPListener, + ensure_all_connected, + repl_set_step_down, + rs_or_single_client, +) + from bson import SON from pymongo import monitoring from pymongo.collection import Collection from pymongo.errors import NotPrimaryError from pymongo.write_concern import WriteConcern -from test import (client_context, - unittest, - IntegrationTest) -from test.utils import (CMAPListener, - ensure_all_connected, - repl_set_step_down, - rs_or_single_client) - class TestConnectionsSurvivePrimaryStepDown(IntegrationTest): listener: CMAPListener @@ -42,9 +42,9 @@ class TestConnectionsSurvivePrimaryStepDown(IntegrationTest): def setUpClass(cls): super(TestConnectionsSurvivePrimaryStepDown, cls).setUpClass() cls.listener = CMAPListener() - cls.client = rs_or_single_client(event_listeners=[cls.listener], - retryWrites=False, - heartbeatFrequencyMS=500) + cls.client = rs_or_single_client( + event_listeners=[cls.listener], retryWrites=False, heartbeatFrequencyMS=500 + ) # Ensure connections to all servers in replica set. This is to test # that the is_writable flag is properly updated for sockets that @@ -52,10 +52,8 @@ def setUpClass(cls): ensure_all_connected(cls.client) cls.listener.reset() - cls.db = cls.client.get_database( - "step-down", write_concern=WriteConcern("majority")) - cls.coll = cls.db.get_collection( - "step-down", write_concern=WriteConcern("majority")) + cls.db = cls.client.get_database("step-down", write_concern=WriteConcern("majority")) + cls.coll = cls.db.get_collection("step-down", write_concern=WriteConcern("majority")) @classmethod def tearDownClass(cls): @@ -73,17 +71,15 @@ def set_fail_point(self, command_args): self.client.admin.command(cmd) def verify_pool_cleared(self): - self.assertEqual( - self.listener.event_count(monitoring.PoolClearedEvent), 1) + self.assertEqual(self.listener.event_count(monitoring.PoolClearedEvent), 1) def verify_pool_not_cleared(self): - self.assertEqual( - self.listener.event_count(monitoring.PoolClearedEvent), 0) + self.assertEqual(self.listener.event_count(monitoring.PoolClearedEvent), 0) @client_context.require_version_min(4, 2, -1) def test_get_more_iteration(self): # Insert 5 documents with WC majority. - self.coll.insert_many([{'data': k} for k in range(5)]) + self.coll.insert_many([{"data": k} for k in range(5)]) # Start a find operation and retrieve first batch of results. batch_size = 2 cursor = self.coll.find(batch_size=batch_size) @@ -108,14 +104,14 @@ def test_get_more_iteration(self): def run_scenario(self, error_code, retry, pool_status_checker): # Set fail point. - self.set_fail_point({"mode": {"times": 1}, - "data": {"failCommands": ["insert"], - "errorCode": error_code}}) + self.set_fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["insert"], "errorCode": error_code}} + ) self.addCleanup(self.set_fail_point, {"mode": "off"}) # Insert record and verify failure. with self.assertRaises(NotPrimaryError) as exc: self.coll.insert_one({"test": 1}) - self.assertEqual(exc.exception.details['code'], error_code) # type: ignore + self.assertEqual(exc.exception.details["code"], error_code) # type: ignore # Retry before CMAPListener assertion if retry_before=True. if retry: self.coll.insert_one({"test": 1}) diff --git a/test/test_create_entities.py b/test/test_create_entities.py index b82b730aef..ad0ac9347e 100644 --- a/test/test_create_entities.py +++ b/test/test_create_entities.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import unittest - from test.unified_format import UnifiedSpecTestMixinV1 @@ -26,23 +25,18 @@ def test_store_events_as_entities(self): { "client": { "id": "client0", - "storeEventsAsEntities": [ - { - "id": "events1", - "events": [ - "PoolCreatedEvent", - ] - } - ] + "storeEventsAsEntities": [ + { + "id": "events1", + "events": [ + "PoolCreatedEvent", + ], + } + ], } }, ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] + "tests": [{"description": "foo", "operations": []}], } self.scenario_runner.TEST_SPEC = spec self.scenario_runner.setUp() @@ -63,27 +57,18 @@ def test_store_all_others_as_entities(self): { "client": { "id": "client0", - "uriOptions": { - "retryReads": True - }, - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "dat" + "uriOptions": {"retryReads": True}, } }, + {"database": {"id": "database0", "client": "client0", "databaseName": "dat"}}, { "collection": { "id": "collection0", "database": "database0", - "collectionName": "dat" + "collectionName": "dat", } - } + }, ], - "tests": [ { "description": "test loops", @@ -99,33 +84,21 @@ def test_store_all_others_as_entities(self): "numIterations": 5, "operations": [ { - "name": "insertOne", - "object": "collection0", - "arguments": { - "document": { - "_id": 1, - "x": 44 - } - } - + "name": "insertOne", + "object": "collection0", + "arguments": {"document": {"_id": 1, "x": 44}}, }, { "name": "insertOne", "object": "collection0", - "arguments": { - "document": { - "_id": 1, - "x": 44 - } - } - - } - ] - } + "arguments": {"document": {"_id": 1, "x": 44}}, + }, + ], + }, } - ] + ], } - ] + ], } self.scenario_runner.TEST_SPEC = spec diff --git a/test/test_crud_unified.py b/test/test_crud_unified.py index a435c1caa1..cc9a521b3b 100644 --- a/test/test_crud_unified.py +++ b/test/test_crud_unified.py @@ -20,16 +20,13 @@ sys.path[0:0] = [""] from test import unittest - from test.unified_format import generate_test_classes # Location of JSON test specifications. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'crud', 'unified') +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "crud", "unified") # Generate unified tests. -globals().update(generate_test_classes( - TEST_PATH, module=__name__, RUN_ON_SERVERLESS=True)) +globals().update(generate_test_classes(TEST_PATH, module=__name__, RUN_ON_SERVERLESS=True)) if __name__ == "__main__": unittest.main() diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py index 4399d9f223..c23ce28061 100644 --- a/test/test_crud_v1.py +++ b/test/test_crud_v1.py @@ -19,26 +19,32 @@ sys.path[0:0] = [""] -from pymongo import operations, WriteConcern +from test import IntegrationTest, client_context, unittest +from test.utils import ( + TestCreator, + camel_to_snake, + camel_to_snake_args, + camel_to_upper_camel, + drop_collections, +) + +from pymongo import WriteConcern, operations from pymongo.command_cursor import CommandCursor from pymongo.cursor import Cursor from pymongo.errors import PyMongoError +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, +) from pymongo.read_concern import ReadConcern -from pymongo.results import _WriteResult, BulkWriteResult -from pymongo.operations import (InsertOne, - DeleteOne, - DeleteMany, - ReplaceOne, - UpdateOne, - UpdateMany) - -from test import client_context, unittest, IntegrationTest -from test.utils import (camel_to_snake, camel_to_upper_camel, - camel_to_snake_args, drop_collections, TestCreator) +from pymongo.results import BulkWriteResult, _WriteResult # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'crud', 'v1') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "crud", "v1") class TestAllScenarios(IntegrationTest): @@ -51,8 +57,7 @@ def check_result(self, expected_result, result): prop = camel_to_snake(res) msg = "%s : %r != %r" % (prop, expected_result, result) # SPEC-869: Only BulkWriteResult has upserted_count. - if (prop == "upserted_count" - and not isinstance(result, BulkWriteResult)): + if prop == "upserted_count" and not isinstance(result, BulkWriteResult): if result.upserted_id is not None: # type: ignore upserted_count = 1 else: @@ -61,8 +66,7 @@ def check_result(self, expected_result, result): elif prop == "inserted_ids": # BulkWriteResult does not have inserted_ids. if isinstance(result, BulkWriteResult): - self.assertEqual(len(expected_result[res]), - result.inserted_count) + self.assertEqual(len(expected_result[res]), result.inserted_count) else: # InsertManyResult may be compared to [id1] from the # crud spec or {"0": id1} from the retryable write spec. @@ -78,8 +82,7 @@ def check_result(self, expected_result, result): expected_ids[int(str_index)] = ids[str_index] self.assertEqual(expected_ids, result.upserted_ids, msg) # type: ignore else: - self.assertEqual( - getattr(result, prop), expected_result[res], msg) + self.assertEqual(getattr(result, prop), expected_result[res], msg) else: self.assertEqual(result, expected_result) @@ -87,16 +90,16 @@ def check_result(self, expected_result, result): def run_operation(collection, test): # Convert command from CamelCase to pymongo.collection method. - operation = camel_to_snake(test['operation']['name']) + operation = camel_to_snake(test["operation"]["name"]) cmd = getattr(collection, operation) # Convert arguments to snake_case and handle special cases. - arguments = test['operation']['arguments'] + arguments = test["operation"]["arguments"] options = arguments.pop("options", {}) for option_name in options: arguments[camel_to_snake(option_name)] = options[option_name] - if operation == 'count': - raise unittest.SkipTest('PyMongo does not support count') + if operation == "count": + raise unittest.SkipTest("PyMongo does not support count") if operation == "bulk_write": # Parse each request into a bulk write model. requests = [] @@ -137,15 +140,15 @@ def create_test(scenario_def, test, name): def run_scenario(self): # Cleanup state and load data (if provided). drop_collections(self.db) - data = scenario_def.get('data') + data = scenario_def.get("data") if data: - self.db.test.with_options( - write_concern=WriteConcern(w="majority")).insert_many( - scenario_def['data']) + self.db.test.with_options(write_concern=WriteConcern(w="majority")).insert_many( + scenario_def["data"] + ) # Run operations and check results or errors. - expected_result = test.get('outcome', {}).get('result') - expected_error = test.get('outcome', {}).get('error') + expected_result = test.get("outcome", {}).get("result") + expected_error = test.get("outcome", {}).get("error") if expected_error is True: with self.assertRaises(PyMongoError): run_operation(self.db.test, test) @@ -155,16 +158,15 @@ def run_scenario(self): check_result(self, expected_result, result) # Assert final state is expected. - expected_c = test['outcome'].get('collection') + expected_c = test["outcome"].get("collection") if expected_c is not None: - expected_name = expected_c.get('name') + expected_name = expected_c.get("name") if expected_name is not None: db_coll = self.db[expected_name] else: db_coll = self.db.test - db_coll = db_coll.with_options( - read_concern=ReadConcern(level="local")) - self.assertEqual(list(db_coll.find()), expected_c['data']) + db_coll = db_coll.with_options(read_concern=ReadConcern(level="local")) + self.assertEqual(list(db_coll.find()), expected_c["data"]) return run_scenario @@ -175,53 +177,68 @@ def run_scenario(self): class TestWriteOpsComparison(unittest.TestCase): def test_InsertOneEquals(self): - self.assertEqual(InsertOne({'foo': 42}), InsertOne({'foo': 42})) + self.assertEqual(InsertOne({"foo": 42}), InsertOne({"foo": 42})) def test_InsertOneNotEquals(self): - self.assertNotEqual(InsertOne({'foo': 42}), InsertOne({'foo': 23})) + self.assertNotEqual(InsertOne({"foo": 42}), InsertOne({"foo": 23})) def test_DeleteOneEquals(self): - self.assertEqual(DeleteOne({'foo': 42}), DeleteOne({'foo': 42})) + self.assertEqual(DeleteOne({"foo": 42}), DeleteOne({"foo": 42})) def test_DeleteOneNotEquals(self): - self.assertNotEqual(DeleteOne({'foo': 42}), DeleteOne({'foo': 23})) + self.assertNotEqual(DeleteOne({"foo": 42}), DeleteOne({"foo": 23})) def test_DeleteManyEquals(self): - self.assertEqual(DeleteMany({'foo': 42}), DeleteMany({'foo': 42})) + self.assertEqual(DeleteMany({"foo": 42}), DeleteMany({"foo": 42})) def test_DeleteManyNotEquals(self): - self.assertNotEqual(DeleteMany({'foo': 42}), DeleteMany({'foo': 23})) + self.assertNotEqual(DeleteMany({"foo": 42}), DeleteMany({"foo": 23})) def test_DeleteOneNotEqualsDeleteMany(self): - self.assertNotEqual(DeleteOne({'foo': 42}), DeleteMany({'foo': 42})) + self.assertNotEqual(DeleteOne({"foo": 42}), DeleteMany({"foo": 42})) def test_ReplaceOneEquals(self): - self.assertEqual(ReplaceOne({'foo': 42}, {'bar': 42}, upsert=False), - ReplaceOne({'foo': 42}, {'bar': 42}, upsert=False)) + self.assertEqual( + ReplaceOne({"foo": 42}, {"bar": 42}, upsert=False), + ReplaceOne({"foo": 42}, {"bar": 42}, upsert=False), + ) def test_ReplaceOneNotEquals(self): - self.assertNotEqual(ReplaceOne({'foo': 42}, {'bar': 42}, upsert=False), - ReplaceOne({'foo': 42}, {'bar': 42}, upsert=True)) + self.assertNotEqual( + ReplaceOne({"foo": 42}, {"bar": 42}, upsert=False), + ReplaceOne({"foo": 42}, {"bar": 42}, upsert=True), + ) def test_UpdateOneEquals(self): - self.assertEqual(UpdateOne({'foo': 42}, {'$set': {'bar': 42}}), - UpdateOne({'foo': 42}, {'$set': {'bar': 42}})) + self.assertEqual( + UpdateOne({"foo": 42}, {"$set": {"bar": 42}}), + UpdateOne({"foo": 42}, {"$set": {"bar": 42}}), + ) def test_UpdateOneNotEquals(self): - self.assertNotEqual(UpdateOne({'foo': 42}, {'$set': {'bar': 42}}), - UpdateOne({'foo': 42}, {'$set': {'bar': 23}})) + self.assertNotEqual( + UpdateOne({"foo": 42}, {"$set": {"bar": 42}}), + UpdateOne({"foo": 42}, {"$set": {"bar": 23}}), + ) def test_UpdateManyEquals(self): - self.assertEqual(UpdateMany({'foo': 42}, {'$set': {'bar': 42}}), - UpdateMany({'foo': 42}, {'$set': {'bar': 42}})) + self.assertEqual( + UpdateMany({"foo": 42}, {"$set": {"bar": 42}}), + UpdateMany({"foo": 42}, {"$set": {"bar": 42}}), + ) def test_UpdateManyNotEquals(self): - self.assertNotEqual(UpdateMany({'foo': 42}, {'$set': {'bar': 42}}), - UpdateMany({'foo': 42}, {'$set': {'bar': 23}})) + self.assertNotEqual( + UpdateMany({"foo": 42}, {"$set": {"bar": 42}}), + UpdateMany({"foo": 42}, {"$set": {"bar": 23}}), + ) def test_UpdateOneNotEqualsUpdateMany(self): - self.assertNotEqual(UpdateOne({'foo': 42}, {'$set': {'bar': 42}}), - UpdateMany({'foo': 42}, {'$set': {'bar': 42}})) + self.assertNotEqual( + UpdateOne({"foo": 42}, {"$set": {"bar": 42}}), + UpdateMany({"foo": 42}, {"$set": {"bar": 42}}), + ) + if __name__ == "__main__": unittest.main() diff --git a/test/test_cursor.py b/test/test_cursor.py index f741b8b0cc..7a80b003df 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -19,42 +19,47 @@ import random import re import sys -import time import threading +import time sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import ( + AllowListEventListener, + EventListener, + OvertCommandListener, + ignore_deprecations, + rs_or_single_client, +) + from bson import decode_all from bson.code import Code from bson.son import SON -from pymongo import (ASCENDING, - DESCENDING) +from pymongo import ASCENDING, DESCENDING from pymongo.collation import Collation from pymongo.cursor import Cursor, CursorType -from pymongo.errors import (ConfigurationError, - ExecutionTimeout, - InvalidOperation, - OperationFailure) +from pymongo.errors import ( + ConfigurationError, + ExecutionTimeout, + InvalidOperation, + OperationFailure, +) from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern -from test import (client_context, - unittest, - IntegrationTest) -from test.utils import (EventListener, - OvertCommandListener, - ignore_deprecations, - rs_or_single_client, - AllowListEventListener) class TestCursor(IntegrationTest): def test_deepcopy_cursor_littered_with_regexes(self): - cursor = self.db.test.find({ - "x": re.compile("^hmmm.*"), - "y": [re.compile("^hmm.*")], - "z": {"a": [re.compile("^hm.*")]}, - re.compile("^key.*"): {"a": [re.compile("^hm.*")]}}) + cursor = self.db.test.find( + { + "x": re.compile("^hmmm.*"), + "y": [re.compile("^hmm.*")], + "z": {"a": [re.compile("^hm.*")]}, + re.compile("^key.*"): {"a": [re.compile("^hm.*")]}, + } + ) cursor2 = copy.deepcopy(cursor) self.assertEqual(cursor._Cursor__spec, cursor2._Cursor__spec) # type: ignore @@ -65,19 +70,15 @@ def test_add_remove_option(self): cursor.add_option(2) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) self.assertEqual(2, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.add_option(32) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) self.assertEqual(34, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.add_option(128) - cursor2 = self.db.test.find( - cursor_type=CursorType.TAILABLE_AWAIT).add_option(128) + cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT).add_option(128) self.assertEqual(162, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) self.assertEqual(162, cursor._Cursor__query_flags) cursor.add_option(128) @@ -86,13 +87,11 @@ def test_add_remove_option(self): cursor.remove_option(128) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) self.assertEqual(34, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.remove_option(32) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) self.assertEqual(2, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) self.assertEqual(2, cursor._Cursor__query_flags) cursor.remove_option(32) @@ -102,8 +101,7 @@ def test_add_remove_option(self): cursor = self.db.test.find(no_cursor_timeout=True) self.assertEqual(16, cursor._Cursor__query_flags) cursor2 = self.db.test.find().add_option(16) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.remove_option(16) self.assertEqual(0, cursor._Cursor__query_flags) @@ -111,8 +109,7 @@ def test_add_remove_option(self): cursor = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) self.assertEqual(34, cursor._Cursor__query_flags) cursor2 = self.db.test.find().add_option(34) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.remove_option(32) self.assertEqual(2, cursor._Cursor__query_flags) @@ -120,8 +117,7 @@ def test_add_remove_option(self): cursor = self.db.test.find(allow_partial_results=True) self.assertEqual(128, cursor._Cursor__query_flags) cursor2 = self.db.test.find().add_option(128) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.remove_option(128) self.assertEqual(0, cursor._Cursor__query_flags) @@ -134,8 +130,7 @@ def test_add_remove_option_exhaust(self): cursor = self.db.test.find(cursor_type=CursorType.EXHAUST) self.assertEqual(64, cursor._Cursor__query_flags) cursor2 = self.db.test.find().add_option(64) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) self.assertTrue(cursor._Cursor__exhaust) cursor.remove_option(64) self.assertEqual(0, cursor._Cursor__query_flags) @@ -146,7 +141,7 @@ def test_allow_disk_use(self): db.pymongo_test.drop() coll = db.pymongo_test - self.assertRaises(TypeError, coll.find().allow_disk_use, 'baz') + self.assertRaises(TypeError, coll.find().allow_disk_use, "baz") cursor = coll.find().allow_disk_use(True) self.assertEqual(True, cursor._Cursor__allow_disk_use) # type: ignore @@ -157,7 +152,7 @@ def test_max_time_ms(self): db = self.db db.pymongo_test.drop() coll = db.pymongo_test - self.assertRaises(TypeError, coll.find().max_time_ms, 'foo') + self.assertRaises(TypeError, coll.find().max_time_ms, "foo") coll.insert_one({"amalia": 1}) coll.insert_one({"amalia": 2}) @@ -178,12 +173,9 @@ def test_max_time_ms(self): self.assertTrue(coll.find_one(max_time_ms=1000)) client = self.client - if (not client_context.is_mongos - and client_context.test_commands_enabled): + if not client_context.is_mongos and client_context.test_commands_enabled: # Cursor parses server timeout error in response to initial query. - client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="alwaysOn") + client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: cursor = coll.find().max_time_ms(1) try: @@ -192,19 +184,16 @@ def test_max_time_ms(self): pass else: self.fail("ExecutionTimeout not raised") - self.assertRaises(ExecutionTimeout, - coll.find_one, max_time_ms=1) + self.assertRaises(ExecutionTimeout, coll.find_one, max_time_ms=1) finally: - client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="off") + client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") def test_max_await_time_ms(self): db = self.db db.pymongo_test.drop() coll = db.create_collection("pymongo_test", capped=True, size=4096) - self.assertRaises(TypeError, coll.find().max_await_time_ms, 'foo') + self.assertRaises(TypeError, coll.find().max_await_time_ms, "foo") coll.insert_one({"amalia": 1}) coll.insert_one({"amalia": 2}) @@ -222,95 +211,91 @@ def test_max_await_time_ms(self): self.assertEqual(None, cursor._Cursor__max_await_time_ms) # If cursor is tailable_await and timeout is set - cursor = coll.find( - cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99) + cursor = coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99) self.assertEqual(99, cursor._Cursor__max_await_time_ms) - cursor = coll.find( - cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms( - 10).max_await_time_ms(90) + cursor = ( + coll.find(cursor_type=CursorType.TAILABLE_AWAIT) + .max_await_time_ms(10) + .max_await_time_ms(90) + ) self.assertEqual(90, cursor._Cursor__max_await_time_ms) - listener = AllowListEventListener('find', 'getMore') - coll = rs_or_single_client( - event_listeners=[listener])[self.db.name].pymongo_test + listener = AllowListEventListener("find", "getMore") + coll = rs_or_single_client(event_listeners=[listener])[self.db.name].pymongo_test results = listener.results # Tailable_await defaults. list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT)) # find - self.assertFalse('maxTimeMS' in results['started'][0].command) + self.assertFalse("maxTimeMS" in results["started"][0].command) # getMore - self.assertFalse('maxTimeMS' in results['started'][1].command) + self.assertFalse("maxTimeMS" in results["started"][1].command) results.clear() # Tailable_await with max_await_time_ms set. - list(coll.find( - cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99)) + list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertFalse('maxTimeMS' in results['started'][0].command) + self.assertEqual("find", results["started"][0].command_name) + self.assertFalse("maxTimeMS" in results["started"][0].command) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertTrue('maxTimeMS' in results['started'][1].command) - self.assertEqual(99, results['started'][1].command['maxTimeMS']) + self.assertEqual("getMore", results["started"][1].command_name) + self.assertTrue("maxTimeMS" in results["started"][1].command) + self.assertEqual(99, results["started"][1].command["maxTimeMS"]) results.clear() # Tailable_await with max_time_ms - list(coll.find( - cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99)) + list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertTrue('maxTimeMS' in results['started'][0].command) - self.assertEqual(99, results['started'][0].command['maxTimeMS']) + self.assertEqual("find", results["started"][0].command_name) + self.assertTrue("maxTimeMS" in results["started"][0].command) + self.assertEqual(99, results["started"][0].command["maxTimeMS"]) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertFalse('maxTimeMS' in results['started'][1].command) + self.assertEqual("getMore", results["started"][1].command_name) + self.assertFalse("maxTimeMS" in results["started"][1].command) results.clear() # Tailable_await with both max_time_ms and max_await_time_ms - list(coll.find( - cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms( - 99).max_await_time_ms(99)) + list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99).max_await_time_ms(99)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertTrue('maxTimeMS' in results['started'][0].command) - self.assertEqual(99, results['started'][0].command['maxTimeMS']) + self.assertEqual("find", results["started"][0].command_name) + self.assertTrue("maxTimeMS" in results["started"][0].command) + self.assertEqual(99, results["started"][0].command["maxTimeMS"]) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertTrue('maxTimeMS' in results['started'][1].command) - self.assertEqual(99, results['started'][1].command['maxTimeMS']) + self.assertEqual("getMore", results["started"][1].command_name) + self.assertTrue("maxTimeMS" in results["started"][1].command) + self.assertEqual(99, results["started"][1].command["maxTimeMS"]) results.clear() # Non tailable_await with max_await_time_ms list(coll.find(batch_size=1).max_await_time_ms(99)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertFalse('maxTimeMS' in results['started'][0].command) + self.assertEqual("find", results["started"][0].command_name) + self.assertFalse("maxTimeMS" in results["started"][0].command) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertFalse('maxTimeMS' in results['started'][1].command) + self.assertEqual("getMore", results["started"][1].command_name) + self.assertFalse("maxTimeMS" in results["started"][1].command) results.clear() # Non tailable_await with max_time_ms list(coll.find(batch_size=1).max_time_ms(99)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertTrue('maxTimeMS' in results['started'][0].command) - self.assertEqual(99, results['started'][0].command['maxTimeMS']) + self.assertEqual("find", results["started"][0].command_name) + self.assertTrue("maxTimeMS" in results["started"][0].command) + self.assertEqual(99, results["started"][0].command["maxTimeMS"]) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertFalse('maxTimeMS' in results['started'][1].command) + self.assertEqual("getMore", results["started"][1].command_name) + self.assertFalse("maxTimeMS" in results["started"][1].command) # Non tailable_await with both max_time_ms and max_await_time_ms list(coll.find(batch_size=1).max_time_ms(99).max_await_time_ms(88)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertTrue('maxTimeMS' in results['started'][0].command) - self.assertEqual(99, results['started'][0].command['maxTimeMS']) + self.assertEqual("find", results["started"][0].command_name) + self.assertTrue("maxTimeMS" in results["started"][0].command) + self.assertEqual(99, results["started"][0].command["maxTimeMS"]) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertFalse('maxTimeMS' in results['started'][1].command) + self.assertEqual("getMore", results["started"][1].command_name) + self.assertFalse("maxTimeMS" in results["started"][1].command) @client_context.require_test_commands @client_context.require_no_mongos @@ -322,9 +307,7 @@ def test_max_time_ms_getmore(self): # Send initial query before turning on failpoint. next(cursor) - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="alwaysOn") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: try: # Iterate up to first getmore. @@ -334,9 +317,7 @@ def test_max_time_ms_getmore(self): else: self.fail("ExecutionTimeout not raised") finally: - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="off") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") def test_explain(self): a = self.db.test.find() @@ -351,10 +332,9 @@ def test_explain_with_read_concern(self): listener = AllowListEventListener("explain") client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) - coll = client.pymongo_test.test.with_options( - read_concern=ReadConcern(level="local")) + coll = client.pymongo_test.test.with_options(read_concern=ReadConcern(level="local")) self.assertTrue(coll.find().explain()) - started = listener.results['started'] + started = listener.results["started"] self.assertEqual(len(started), 1) self.assertNotIn("readConcern", started[0].command) @@ -365,23 +345,26 @@ def test_hint(self): db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) - self.assertRaises(OperationFailure, - db.test.find({"num": 17, "foo": 17}) - .hint([("num", ASCENDING)]).explain) - self.assertRaises(OperationFailure, - db.test.find({"num": 17, "foo": 17}) - .hint([("foo", ASCENDING)]).explain) + self.assertRaises( + OperationFailure, + db.test.find({"num": 17, "foo": 17}).hint([("num", ASCENDING)]).explain, + ) + self.assertRaises( + OperationFailure, + db.test.find({"num": 17, "foo": 17}).hint([("foo", ASCENDING)]).explain, + ) spec = [("num", DESCENDING)] index = db.test.create_index(spec) first = next(db.test.find()) - self.assertEqual(0, first.get('num')) + self.assertEqual(0, first.get("num")) first = next(db.test.find().hint(spec)) - self.assertEqual(99, first.get('num')) - self.assertRaises(OperationFailure, - db.test.find({"num": 17, "foo": 17}) - .hint([("foo", ASCENDING)]).explain) + self.assertEqual(99, first.get("num")) + self.assertRaises( + OperationFailure, + db.test.find({"num": 17, "foo": 17}).hint([("foo", ASCENDING)]).explain, + ) a = db.test.find({"num": 17}) a.hint(spec) @@ -395,11 +378,11 @@ def test_hint_by_name(self): db.test.insert_many([{"i": i} for i in range(100)]) - db.test.create_index([('i', DESCENDING)], name='fooindex') + db.test.create_index([("i", DESCENDING)], name="fooindex") first = next(db.test.find()) - self.assertEqual(0, first.get('i')) - first = next(db.test.find().hint('fooindex')) - self.assertEqual(99, first.get('i')) + self.assertEqual(0, first.get("i")) + first = next(db.test.find().hint("fooindex")) + self.assertEqual(99, first.get("i")) def test_limit(self): db = self.db @@ -702,8 +685,7 @@ def test_sort(self): self.assertRaises(TypeError, db.test.find().sort, 5) self.assertRaises(ValueError, db.test.find().sort, []) self.assertRaises(TypeError, db.test.find().sort, [], ASCENDING) - self.assertRaises(TypeError, db.test.find().sort, - [("hello", DESCENDING)], DESCENDING) + self.assertRaises(TypeError, db.test.find().sort, [("hello", DESCENDING)], DESCENDING) db.test.drop() @@ -724,8 +706,7 @@ def test_sort(self): self.assertEqual(desc, expect) desc = [i["x"] for i in db.test.find().sort([("x", DESCENDING)])] self.assertEqual(desc, expect) - desc = [i["x"] for i in - db.test.find().sort("x", ASCENDING).sort("x", DESCENDING)] + desc = [i["x"] for i in db.test.find().sort("x", ASCENDING).sort("x", DESCENDING)] self.assertEqual(desc, expect) expected = [(1, 5), (2, 5), (0, 3), (7, 3), (9, 2), (2, 1), (3, 1)] @@ -736,9 +717,9 @@ def test_sort(self): for (a, b) in shuffled: db.test.insert_one({"a": a, "b": b}) - result = [(i["a"], i["b"]) for i in - db.test.find().sort([("b", DESCENDING), - ("a", ASCENDING)])] + result = [ + (i["a"], i["b"]) for i in db.test.find().sort([("b", DESCENDING), ("a", ASCENDING)]) + ] self.assertEqual(result, expected) a = db.test.find() @@ -758,42 +739,34 @@ def test_where(self): db.test.insert_many([{"x": i} for i in range(10)]) - self.assertEqual(3, len(list(db.test.find().where('this.x < 3')))) - self.assertEqual(3, - len(list(db.test.find().where(Code('this.x < 3'))))) + self.assertEqual(3, len(list(db.test.find().where("this.x < 3")))) + self.assertEqual(3, len(list(db.test.find().where(Code("this.x < 3"))))) - code_with_scope = Code('this.x < i', {"i": 3}) + code_with_scope = Code("this.x < i", {"i": 3}) if client_context.version.at_least(4, 3, 3): # MongoDB 4.4 removed support for Code with scope. with self.assertRaises(OperationFailure): list(db.test.find().where(code_with_scope)) - code_with_empty_scope = Code('this.x < 3', {}) + code_with_empty_scope = Code("this.x < 3", {}) with self.assertRaises(OperationFailure): list(db.test.find().where(code_with_empty_scope)) else: - self.assertEqual( - 3, len(list(db.test.find().where(code_with_scope)))) + self.assertEqual(3, len(list(db.test.find().where(code_with_scope)))) self.assertEqual(10, len(list(db.test.find()))) - self.assertEqual([0, 1, 2], - [a["x"] for a in - db.test.find().where('this.x < 3')]) - self.assertEqual([], - [a["x"] for a in - db.test.find({"x": 5}).where('this.x < 3')]) - self.assertEqual([5], - [a["x"] for a in - db.test.find({"x": 5}).where('this.x > 3')]) - - cursor = db.test.find().where('this.x < 3').where('this.x > 7') + self.assertEqual([0, 1, 2], [a["x"] for a in db.test.find().where("this.x < 3")]) + self.assertEqual([], [a["x"] for a in db.test.find({"x": 5}).where("this.x < 3")]) + self.assertEqual([5], [a["x"] for a in db.test.find({"x": 5}).where("this.x > 3")]) + + cursor = db.test.find().where("this.x < 3").where("this.x > 7") self.assertEqual([8, 9], [a["x"] for a in cursor]) a = db.test.find() - b = a.where('this.x > 3') + b = a.where("this.x > 3") for _ in a: break - self.assertRaises(InvalidOperation, a.where, 'this.x < 3') + self.assertRaises(InvalidOperation, a.where, "this.x < 3") def test_rewind(self): self.db.test.insert_many([{"x": i} for i in range(1, 4)]) @@ -866,26 +839,28 @@ def test_clone(self): self.assertNotEqual(cursor, cursor.clone()) # Just test attributes - cursor = self.db.test.find({"x": re.compile("^hello.*")}, - projection={'_id': False}, - skip=1, - no_cursor_timeout=True, - cursor_type=CursorType.TAILABLE_AWAIT, - sort=[("x", 1)], - allow_partial_results=True, - oplog_replay=True, - batch_size=123, - collation={'locale': 'en_US'}, - hint=[("_id", 1)], - max_scan=100, - max_time_ms=1000, - return_key=True, - show_record_id=True, - snapshot=True, - allow_disk_use=True).limit(2) - cursor.min([('a', 1)]).max([('b', 3)]) + cursor = self.db.test.find( + {"x": re.compile("^hello.*")}, + projection={"_id": False}, + skip=1, + no_cursor_timeout=True, + cursor_type=CursorType.TAILABLE_AWAIT, + sort=[("x", 1)], + allow_partial_results=True, + oplog_replay=True, + batch_size=123, + collation={"locale": "en_US"}, + hint=[("_id", 1)], + max_scan=100, + max_time_ms=1000, + return_key=True, + show_record_id=True, + snapshot=True, + allow_disk_use=True, + ).limit(2) + cursor.min([("a", 1)]).max([("b", 3)]) cursor.add_option(128) - cursor.comment('hi!') + cursor.comment("hi!") # Every attribute should be the same. cursor2 = cursor.clone() @@ -893,17 +868,17 @@ def test_clone(self): # Shallow copies can so can mutate cursor2 = copy.copy(cursor) - cursor2._Cursor__projection['cursor2'] = False - self.assertTrue('cursor2' in cursor._Cursor__projection) + cursor2._Cursor__projection["cursor2"] = False + self.assertTrue("cursor2" in cursor._Cursor__projection) # Deepcopies and shouldn't mutate cursor3 = copy.deepcopy(cursor) - cursor3._Cursor__projection['cursor3'] = False - self.assertFalse('cursor3' in cursor._Cursor__projection) + cursor3._Cursor__projection["cursor3"] = False + self.assertFalse("cursor3" in cursor._Cursor__projection) cursor4 = cursor.clone() - cursor4._Cursor__projection['cursor4'] = False - self.assertFalse('cursor4' in cursor._Cursor__projection) + cursor4._Cursor__projection["cursor4"] = False + self.assertFalse("cursor4" in cursor._Cursor__projection) # Test memo when deepcopying queries query = {"hello": "world"} @@ -912,14 +887,12 @@ def test_clone(self): cursor2 = copy.deepcopy(cursor) - self.assertNotEqual(id(cursor._Cursor__spec), - id(cursor2._Cursor__spec)) - self.assertEqual(id(cursor2._Cursor__spec['reflexive']), - id(cursor2._Cursor__spec)) + self.assertNotEqual(id(cursor._Cursor__spec), id(cursor2._Cursor__spec)) + self.assertEqual(id(cursor2._Cursor__spec["reflexive"]), id(cursor2._Cursor__spec)) self.assertEqual(len(cursor2._Cursor__spec), 2) # Ensure hints are cloned as the correct type - cursor = self.db.test.find().hint([('z', 1), ("a", 1)]) + cursor = self.db.test.find().hint([("z", 1), ("a", 1)]) cursor2 = copy.deepcopy(cursor) self.assertTrue(isinstance(cursor2._Cursor__hint, SON)) self.assertEqual(cursor._Cursor__hint, cursor2._Cursor__hint) @@ -947,46 +920,38 @@ def test_getitem_slice_index(self): self.assertRaises(IndexError, lambda: self.db.test.find()[1:2:2]) for a, b in zip(count(0), self.db.test.find()): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) self.assertEqual(100, len(list(self.db.test.find()[0:]))) for a, b in zip(count(0), self.db.test.find()[0:]): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) self.assertEqual(80, len(list(self.db.test.find()[20:]))) for a, b in zip(count(20), self.db.test.find()[20:]): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) for a, b in zip(count(99), self.db.test.find()[99:]): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) for i in self.db.test.find()[1000:]: self.fail() self.assertEqual(5, len(list(self.db.test.find()[20:25]))) - self.assertEqual(5, len(list( - self.db.test.find()[20:25]))) + self.assertEqual(5, len(list(self.db.test.find()[20:25]))) for a, b in zip(count(20), self.db.test.find()[20:25]): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) self.assertEqual(80, len(list(self.db.test.find()[40:45][20:]))) for a, b in zip(count(20), self.db.test.find()[40:45][20:]): - self.assertEqual(a, b['i']) - - self.assertEqual(80, - len(list(self.db.test.find()[40:45].limit(0).skip(20)) - ) - ) - for a, b in zip(count(20), - self.db.test.find()[40:45].limit(0).skip(20)): - self.assertEqual(a, b['i']) - - self.assertEqual(80, - len(list(self.db.test.find().limit(10).skip(40)[20:])) - ) - for a, b in zip(count(20), - self.db.test.find().limit(10).skip(40)[20:]): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) + + self.assertEqual(80, len(list(self.db.test.find()[40:45].limit(0).skip(20)))) + for a, b in zip(count(20), self.db.test.find()[40:45].limit(0).skip(20)): + self.assertEqual(a, b["i"]) + + self.assertEqual(80, len(list(self.db.test.find().limit(10).skip(40)[20:]))) + for a, b in zip(count(20), self.db.test.find().limit(10).skip(40)[20:]): + self.assertEqual(a, b["i"]) self.assertEqual(1, len(list(self.db.test.find()[:1]))) self.assertEqual(5, len(list(self.db.test.find()[:5]))) @@ -995,10 +960,7 @@ def test_getitem_slice_index(self): self.assertEqual(1, len(list(self.db.test.find()[99:1000]))) self.assertEqual(0, len(list(self.db.test.find()[10:10]))) self.assertEqual(0, len(list(self.db.test.find()[:0]))) - self.assertEqual(80, - len(list(self.db.test.find()[10:10].limit(0).skip(20)) - ) - ) + self.assertEqual(80, len(list(self.db.test.find()[10:10].limit(0).skip(20)))) self.assertRaises(IndexError, lambda: self.db.test.find()[10:8]) @@ -1006,17 +968,16 @@ def test_getitem_numeric_index(self): self.db.drop_collection("test") self.db.test.insert_many([{"i": i} for i in range(100)]) - self.assertEqual(0, self.db.test.find()[0]['i']) - self.assertEqual(50, self.db.test.find()[50]['i']) - self.assertEqual(50, self.db.test.find().skip(50)[0]['i']) - self.assertEqual(50, self.db.test.find().skip(49)[1]['i']) - self.assertEqual(50, self.db.test.find()[50]['i']) - self.assertEqual(99, self.db.test.find()[99]['i']) + self.assertEqual(0, self.db.test.find()[0]["i"]) + self.assertEqual(50, self.db.test.find()[50]["i"]) + self.assertEqual(50, self.db.test.find().skip(50)[0]["i"]) + self.assertEqual(50, self.db.test.find().skip(49)[1]["i"]) + self.assertEqual(50, self.db.test.find()[50]["i"]) + self.assertEqual(99, self.db.test.find()[99]["i"]) self.assertRaises(IndexError, lambda x: self.db.test.find()[x], -1) self.assertRaises(IndexError, lambda x: self.db.test.find()[x], 100) - self.assertRaises(IndexError, - lambda x: self.db.test.find().skip(50)[x], 50) + self.assertRaises(IndexError, lambda x: self.db.test.find().skip(50)[x], 50) def test_len(self): self.assertRaises(TypeError, len, self.db.test.find()) @@ -1032,7 +993,7 @@ def set_coll(): def test_get_more(self): db = self.db db.drop_collection("test") - db.test.insert_many([{'i': i} for i in range(10)]) + db.test.insert_many([{"i": i} for i in range(10)]) self.assertEqual(10, len(list(db.test.find().batch_size(5)))) def test_tailable(self): @@ -1075,8 +1036,10 @@ def test_tailable(self): self.assertEqual(3, db.test.count_documents({})) # __getitem__(index) - for cursor in (db.test.find(cursor_type=CursorType.TAILABLE), - db.test.find(cursor_type=CursorType.TAILABLE_AWAIT)): + for cursor in ( + db.test.find(cursor_type=CursorType.TAILABLE), + db.test.find(cursor_type=CursorType.TAILABLE_AWAIT), + ): self.assertEqual(4, cursor[0]["x"]) self.assertEqual(5, cursor[1]["x"]) self.assertEqual(6, cursor[2]["x"]) @@ -1106,6 +1069,7 @@ def iterate_cursor(): while cursor.alive: for doc in cursor: pass + t = threading.Thread(target=iterate_cursor) t.start() time.sleep(1) @@ -1114,12 +1078,10 @@ def iterate_cursor(): t.join(3) self.assertFalse(t.is_alive()) - def test_distinct(self): self.db.drop_collection("test") - self.db.test.insert_many( - [{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}]) + self.db.test.insert_many([{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}]) distinct = self.db.test.find({"a": {"$lt": 3}}).distinct("a") distinct.sort() @@ -1145,8 +1107,7 @@ def test_max_scan(self): self.assertEqual(100, len(list(self.db.test.find()))) self.assertEqual(50, len(list(self.db.test.find().max_scan(50)))) - self.assertEqual(50, len(list(self.db.test.find() - .max_scan(90).max_scan(50)))) + self.assertEqual(50, len(list(self.db.test.find().max_scan(90).max_scan(50)))) def test_with_statement(self): self.db.drop_collection("test") @@ -1165,28 +1126,32 @@ def test_with_statement(self): @client_context.require_no_mongos def test_comment(self): self.client.drop_database(self.db) - self.db.command('profile', 2) # Profile ALL commands. + self.db.command("profile", 2) # Profile ALL commands. try: - list(self.db.test.find().comment('foo')) + list(self.db.test.find().comment("foo")) count = self.db.system.profile.count_documents( - {'ns': 'pymongo_test.test', 'op': 'query', - 'command.comment': 'foo'}) + {"ns": "pymongo_test.test", "op": "query", "command.comment": "foo"} + ) self.assertEqual(count, 1) - self.db.test.find().comment('foo').distinct('type') + self.db.test.find().comment("foo").distinct("type") count = self.db.system.profile.count_documents( - {'ns': 'pymongo_test.test', 'op': 'command', - 'command.distinct': 'test', - 'command.comment': 'foo'}) + { + "ns": "pymongo_test.test", + "op": "command", + "command.distinct": "test", + "command.comment": "foo", + } + ) self.assertEqual(count, 1) finally: - self.db.command('profile', 0) # Turn off profiling. + self.db.command("profile", 0) # Turn off profiling. self.db.system.profile.drop() self.db.test.insert_many([{}, {}]) cursor = self.db.test.find() next(cursor) - self.assertRaises(InvalidOperation, cursor.comment, 'hello') + self.assertRaises(InvalidOperation, cursor.comment, "hello") def test_alive(self): self.db.test.delete_many({}) @@ -1230,8 +1195,7 @@ def assertCursorKilled(): self.assertEqual(1, len(results["started"])) self.assertEqual("killCursors", results["started"][0].command_name) self.assertEqual(1, len(results["succeeded"])) - self.assertEqual("killCursors", - results["succeeded"][0].command_name) + self.assertEqual("killCursors", results["succeeded"][0].command_name) assertCursorKilled() results.clear() @@ -1254,37 +1218,37 @@ def test_delete_not_initialized(self): cursor.__del__() # no error def test_getMore_does_not_send_readPreference(self): - listener = AllowListEventListener('find', 'getMore') - client = rs_or_single_client( - event_listeners=[listener]) + listener = AllowListEventListener("find", "getMore") + client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) # We never send primary read preference so override the default. coll = client[self.db.name].get_collection( - 'test', read_preference=ReadPreference.PRIMARY_PREFERRED) + "test", read_preference=ReadPreference.PRIMARY_PREFERRED + ) coll.delete_many({}) coll.insert_many([{} for _ in range(5)]) self.addCleanup(coll.drop) list(coll.find(batch_size=3)) - started = listener.results['started'] + started = listener.results["started"] self.assertEqual(2, len(started)) - self.assertEqual('find', started[0].command_name) + self.assertEqual("find", started[0].command_name) if client_context.is_rs or client_context.is_mongos: - self.assertIn('$readPreference', started[0].command) + self.assertIn("$readPreference", started[0].command) else: - self.assertNotIn('$readPreference', started[0].command) - self.assertEqual('getMore', started[1].command_name) - self.assertNotIn('$readPreference', started[1].command) + self.assertNotIn("$readPreference", started[0].command) + self.assertEqual("getMore", started[1].command_name) + self.assertNotIn("$readPreference", started[1].command) class TestRawBatchCursor(IntegrationTest): def test_find_raw(self): c = self.db.test c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) - batches = list(c.find_raw_batches().sort('_id')) + batches = list(c.find_raw_batches().sort("_id")) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) @@ -1292,24 +1256,27 @@ def test_find_raw(self): def test_find_raw_transaction(self): c = self.db.test c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) listener = OvertCommandListener() client = rs_or_single_client(event_listeners=[listener]) with client.start_session() as session: with session.start_transaction(): - batches = list(client[self.db.name].test.find_raw_batches( - session=session).sort('_id')) - cmd = listener.results['started'][0] - self.assertEqual(cmd.command_name, 'find') - self.assertIn('$clusterTime', cmd.command) - self.assertEqual(cmd.command['startTransaction'], True) - self.assertEqual(cmd.command['txnNumber'], 1) + batches = list( + client[self.db.name].test.find_raw_batches(session=session).sort("_id") + ) + cmd = listener.results["started"][0] + self.assertEqual(cmd.command_name, "find") + self.assertIn("$clusterTime", cmd.command) + self.assertEqual(cmd.command["startTransaction"], True) + self.assertEqual(cmd.command["txnNumber"], 1) # Ensure we update $clusterTime from the command response. - last_cmd = listener.results['succeeded'][-1] - self.assertEqual(last_cmd.reply['$clusterTime']['clusterTime'], - session.cluster_time['clusterTime']) + last_cmd = listener.results["succeeded"][-1] + self.assertEqual( + last_cmd.reply["$clusterTime"]["clusterTime"], + session.cluster_time["clusterTime"], + ) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) @@ -1319,47 +1286,42 @@ def test_find_raw_transaction(self): def test_find_raw_retryable_reads(self): c = self.db.test c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener], - retryReads=True) - with self.fail_point({ - 'mode': {'times': 1}, 'data': {'failCommands': ['find'], - 'closeConnection': True}}): - batches = list( - client[self.db.name].test.find_raw_batches().sort('_id')) + client = rs_or_single_client(event_listeners=[listener], retryReads=True) + with self.fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["find"], "closeConnection": True}} + ): + batches = list(client[self.db.name].test.find_raw_batches().sort("_id")) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - self.assertEqual(len(listener.results['started']), 2) - for cmd in listener.results['started']: - self.assertEqual(cmd.command_name, 'find') + self.assertEqual(len(listener.results["started"]), 2) + for cmd in listener.results["started"]: + self.assertEqual(cmd.command_name, "find") @client_context.require_version_min(5, 0, 0) @client_context.require_no_standalone def test_find_raw_snapshot_reads(self): - c = self.db.get_collection( - "test", write_concern=WriteConcern(w="majority")) + c = self.db.get_collection("test", write_concern=WriteConcern(w="majority")) c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener], - retryReads=True) + client = rs_or_single_client(event_listeners=[listener], retryReads=True) db = client[self.db.name] with client.start_session(snapshot=True) as session: - db.test.distinct('x', {}, session=session) - batches = list(db.test.find_raw_batches( - session=session).sort('_id')) + db.test.distinct("x", {}, session=session) + batches = list(db.test.find_raw_batches(session=session).sort("_id")) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - find_cmd = listener.results['started'][1].command - self.assertEqual(find_cmd['readConcern']['level'], 'snapshot') - self.assertIsNotNone(find_cmd['readConcern']['atClusterTime']) + find_cmd = listener.results["started"][1].command + self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") + self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) def test_explain(self): c = self.db.test @@ -1384,13 +1346,13 @@ def test_clone(self): def test_exhaust(self): c = self.db.test c.drop() - c.insert_many({'_id': i} for i in range(200)) - result = b''.join(c.find_raw_batches(cursor_type=CursorType.EXHAUST)) - self.assertEqual([{'_id': i} for i in range(200)], decode_all(result)) + c.insert_many({"_id": i} for i in range(200)) + result = b"".join(c.find_raw_batches(cursor_type=CursorType.EXHAUST)) + self.assertEqual([{"_id": i} for i in range(200)], decode_all(result)) def test_server_error(self): with self.assertRaises(OperationFailure) as exc: - next(self.db.test.find_raw_batches({'x': {'$bad': 1}})) + next(self.db.test.find_raw_batches({"x": {"$bad": 1}})) # The server response was decoded, not left raw. self.assertIsInstance(exc.exception.details, dict) @@ -1400,12 +1362,11 @@ def test_get_item(self): self.db.test.find_raw_batches()[0] def test_collation(self): - next(self.db.test.find_raw_batches(collation=Collation('en_US'))) + next(self.db.test.find_raw_batches(collation=Collation("en_US"))) - @client_context.require_no_mmap # MMAPv1 does not support read concern + @client_context.require_no_mmap # MMAPv1 does not support read concern def test_read_concern(self): - self.db.get_collection( - "test", write_concern=WriteConcern(w="majority")).insert_one({}) + self.db.get_collection("test", write_concern=WriteConcern(w="majority")).insert_one({}) c = self.db.get_collection("test", read_concern=ReadConcern("majority")) next(c.find_raw_batches()) @@ -1414,7 +1375,7 @@ def test_monitoring(self): client = rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test c.drop() - c.insert_many([{'_id': i} for i in range(10)]) + c.insert_many([{"_id": i} for i in range(10)]) listener.results.clear() cursor = c.find_raw_batches(batch_size=4) @@ -1422,19 +1383,18 @@ def test_monitoring(self): # First raw batch of 4 documents. next(cursor) - started = listener.results['started'][0] - succeeded = listener.results['succeeded'][0] - self.assertEqual(0, len(listener.results['failed'])) - self.assertEqual('find', started.command_name) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('find', succeeded.command_name) + started = listener.results["started"][0] + succeeded = listener.results["succeeded"][0] + self.assertEqual(0, len(listener.results["failed"])) + self.assertEqual("find", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("find", succeeded.command_name) csr = succeeded.reply["cursor"] self.assertEqual(csr["ns"], "pymongo_test.test") # The batch is a list of one raw bytes object. self.assertEqual(len(csr["firstBatch"]), 1) - self.assertEqual(decode_all(csr["firstBatch"][0]), - [{'_id': i} for i in range(0, 4)]) + self.assertEqual(decode_all(csr["firstBatch"][0]), [{"_id": i} for i in range(0, 4)]) listener.results.clear() @@ -1442,17 +1402,16 @@ def test_monitoring(self): next(cursor) try: results = listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertEqual('getMore', started.command_name) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('getMore', succeeded.command_name) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertEqual("getMore", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getMore", succeeded.command_name) csr = succeeded.reply["cursor"] self.assertEqual(csr["ns"], "pymongo_test.test") self.assertEqual(len(csr["nextBatch"]), 1) - self.assertEqual(decode_all(csr["nextBatch"][0]), - [{'_id': i} for i in range(4, 8)]) + self.assertEqual(decode_all(csr["nextBatch"][0]), [{"_id": i} for i in range(4, 8)]) finally: # Finish the cursor. tuple(cursor) @@ -1466,9 +1425,9 @@ def setUpClass(cls): def test_aggregate_raw(self): c = self.db.test c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) - batches = list(c.aggregate_raw_batches([{'$sort': {'_id': 1}}])) + batches = list(c.aggregate_raw_batches([{"$sort": {"_id": 1}}])) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) @@ -1476,24 +1435,29 @@ def test_aggregate_raw(self): def test_aggregate_raw_transaction(self): c = self.db.test c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) listener = OvertCommandListener() client = rs_or_single_client(event_listeners=[listener]) with client.start_session() as session: with session.start_transaction(): - batches = list(client[self.db.name].test.aggregate_raw_batches( - [{'$sort': {'_id': 1}}], session=session)) - cmd = listener.results['started'][0] - self.assertEqual(cmd.command_name, 'aggregate') - self.assertIn('$clusterTime', cmd.command) - self.assertEqual(cmd.command['startTransaction'], True) - self.assertEqual(cmd.command['txnNumber'], 1) + batches = list( + client[self.db.name].test.aggregate_raw_batches( + [{"$sort": {"_id": 1}}], session=session + ) + ) + cmd = listener.results["started"][0] + self.assertEqual(cmd.command_name, "aggregate") + self.assertIn("$clusterTime", cmd.command) + self.assertEqual(cmd.command["startTransaction"], True) + self.assertEqual(cmd.command["txnNumber"], 1) # Ensure we update $clusterTime from the command response. - last_cmd = listener.results['succeeded'][-1] - self.assertEqual(last_cmd.reply['$clusterTime']['clusterTime'], - session.cluster_time['clusterTime']) + last_cmd = listener.results["succeeded"][-1] + self.assertEqual( + last_cmd.reply["$clusterTime"]["clusterTime"], + session.cluster_time["clusterTime"], + ) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) @@ -1502,62 +1466,63 @@ def test_aggregate_raw_transaction(self): def test_aggregate_raw_retryable_reads(self): c = self.db.test c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener], - retryReads=True) - with self.fail_point({ - 'mode': {'times': 1}, 'data': {'failCommands': ['aggregate'], - 'closeConnection': True}}): - batches = list(client[self.db.name].test.aggregate_raw_batches( - [{'$sort': {'_id': 1}}])) + client = rs_or_single_client(event_listeners=[listener], retryReads=True) + with self.fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["aggregate"], "closeConnection": True}} + ): + batches = list(client[self.db.name].test.aggregate_raw_batches([{"$sort": {"_id": 1}}])) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - self.assertEqual(len(listener.results['started']), 3) - cmds = listener.results['started'] - self.assertEqual(cmds[0].command_name, 'aggregate') - self.assertEqual(cmds[1].command_name, 'aggregate') + self.assertEqual(len(listener.results["started"]), 3) + cmds = listener.results["started"] + self.assertEqual(cmds[0].command_name, "aggregate") + self.assertEqual(cmds[1].command_name, "aggregate") @client_context.require_version_min(5, 0, -1) @client_context.require_no_standalone def test_aggregate_raw_snapshot_reads(self): - c = self.db.get_collection( - "test", write_concern=WriteConcern(w="majority")) + c = self.db.get_collection("test", write_concern=WriteConcern(w="majority")) c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener], - retryReads=True) + client = rs_or_single_client(event_listeners=[listener], retryReads=True) db = client[self.db.name] with client.start_session(snapshot=True) as session: - db.test.distinct('x', {}, session=session) - batches = list(db.test.aggregate_raw_batches( - [{'$sort': {'_id': 1}}], session=session)) + db.test.distinct("x", {}, session=session) + batches = list(db.test.aggregate_raw_batches([{"$sort": {"_id": 1}}], session=session)) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - find_cmd = listener.results['started'][1].command - self.assertEqual(find_cmd['readConcern']['level'], 'snapshot') - self.assertIsNotNone(find_cmd['readConcern']['atClusterTime']) + find_cmd = listener.results["started"][1].command + self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") + self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) def test_server_error(self): c = self.db.test c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) - c.insert_one({'_id': 10, 'x': 'not a number'}) + c.insert_one({"_id": 10, "x": "not a number"}) with self.assertRaises(OperationFailure) as exc: - list(self.db.test.aggregate_raw_batches([{ - '$sort': {'_id': 1}, - }, { - '$project': {'x': {'$multiply': [2, '$x']}} - }], batchSize=4)) + list( + self.db.test.aggregate_raw_batches( + [ + { + "$sort": {"_id": 1}, + }, + {"$project": {"x": {"$multiply": [2, "$x"]}}}, + ], + batchSize=4, + ) + ) # The server response was decoded, not left raw. self.assertIsInstance(exc.exception.details, dict) @@ -1567,25 +1532,25 @@ def test_get_item(self): self.db.test.aggregate_raw_batches([])[0] def test_collation(self): - next(self.db.test.aggregate_raw_batches([], collation=Collation('en_US'))) + next(self.db.test.aggregate_raw_batches([], collation=Collation("en_US"))) def test_monitoring(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test c.drop() - c.insert_many([{'_id': i} for i in range(10)]) + c.insert_many([{"_id": i} for i in range(10)]) listener.results.clear() - cursor = c.aggregate_raw_batches([{'$sort': {'_id': 1}}], batchSize=4) + cursor = c.aggregate_raw_batches([{"$sort": {"_id": 1}}], batchSize=4) # Start cursor, no initial batch. - started = listener.results['started'][0] - succeeded = listener.results['succeeded'][0] - self.assertEqual(0, len(listener.results['failed'])) - self.assertEqual('aggregate', started.command_name) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('aggregate', succeeded.command_name) + started = listener.results["started"][0] + succeeded = listener.results["succeeded"][0] + self.assertEqual(0, len(listener.results["failed"])) + self.assertEqual("aggregate", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("aggregate", succeeded.command_name) csr = succeeded.reply["cursor"] self.assertEqual(csr["ns"], "pymongo_test.test") @@ -1597,18 +1562,17 @@ def test_monitoring(self): n = 0 for batch in cursor: results = listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertEqual('getMore', started.command_name) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('getMore', succeeded.command_name) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertEqual("getMore", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getMore", succeeded.command_name) csr = succeeded.reply["cursor"] self.assertEqual(csr["ns"], "pymongo_test.test") self.assertEqual(len(csr["nextBatch"]), 1) self.assertEqual(csr["nextBatch"][0], batch) - self.assertEqual(decode_all(batch), - [{'_id': i} for i in range(n, min(n + 4, 10))]) + self.assertEqual(decode_all(batch), [{"_id": i} for i in range(n, min(n + 4, 10))]) n += 4 listener.results.clear() diff --git a/test/test_custom_types.py b/test/test_custom_types.py index eee47b9d2b..4659a62e62 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -17,7 +17,6 @@ import datetime import sys import tempfile - from collections import OrderedDict from decimal import Decimal from random import random @@ -25,32 +24,37 @@ sys.path[0:0] = [""] -from bson import (Decimal128, - decode, - decode_all, - decode_file_iter, - decode_iter, - encode, - RE_TYPE, - _BUILT_IN_TYPES, - _dict_to_bson, - _bson_to_dict) -from bson.codec_options import (CodecOptions, TypeCodec, TypeDecoder, - TypeEncoder, TypeRegistry) +from test import client_context, unittest +from test.test_client import IntegrationTest +from test.utils import rs_client + +from bson import ( + _BUILT_IN_TYPES, + RE_TYPE, + Decimal128, + _bson_to_dict, + _dict_to_bson, + decode, + decode_all, + decode_file_iter, + decode_iter, + encode, +) +from bson.codec_options import ( + CodecOptions, + TypeCodec, + TypeDecoder, + TypeEncoder, + TypeRegistry, +) from bson.errors import InvalidDocument from bson.int64 import Int64 from bson.raw_bson import RawBSONDocument - from gridfs import GridIn, GridOut - from pymongo.collection import ReturnDocument from pymongo.errors import DuplicateKeyError from pymongo.message import _CursorAddress -from test import client_context, unittest -from test.test_client import IntegrationTest -from test.utils import rs_client - class DecimalEncoder(TypeEncoder): @property @@ -74,8 +78,7 @@ class DecimalCodec(DecimalDecoder, DecimalEncoder): pass -DECIMAL_CODECOPTS = CodecOptions( - type_registry=TypeRegistry([DecimalCodec()])) +DECIMAL_CODECOPTS = CodecOptions(type_registry=TypeRegistry([DecimalCodec()])) class UndecipherableInt64Type(object): @@ -91,39 +94,55 @@ def __eq__(self, other): class UndecipherableIntDecoder(TypeDecoder): bson_type = Int64 + def transform_bson(self, value): return UndecipherableInt64Type(value) class UndecipherableIntEncoder(TypeEncoder): python_type = UndecipherableInt64Type + def transform_python(self, value): return Int64(value.value) UNINT_DECODER_CODECOPTS = CodecOptions( - type_registry=TypeRegistry([UndecipherableIntDecoder(), ])) + type_registry=TypeRegistry( + [ + UndecipherableIntDecoder(), + ] + ) +) -UNINT_CODECOPTS = CodecOptions(type_registry=TypeRegistry( - [UndecipherableIntDecoder(), UndecipherableIntEncoder()])) +UNINT_CODECOPTS = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntDecoder(), UndecipherableIntEncoder()]) +) class UppercaseTextDecoder(TypeDecoder): bson_type = str + def transform_bson(self, value): return value.upper() -UPPERSTR_DECODER_CODECOPTS = CodecOptions(type_registry=TypeRegistry( - [UppercaseTextDecoder(),])) +UPPERSTR_DECODER_CODECOPTS = CodecOptions( + type_registry=TypeRegistry( + [ + UppercaseTextDecoder(), + ] + ) +) def type_obfuscating_decoder_factory(rt_type): class ResumeTokenToNanDecoder(TypeDecoder): bson_type = rt_type + def transform_bson(self, value): return "NaN" + return ResumeTokenToNanDecoder @@ -135,43 +154,42 @@ def roundtrip(self, doc): self.assertEqual(doc, rt_document) def test_encode_decode_roundtrip(self): - self.roundtrip({'average': Decimal('56.47')}) - self.roundtrip({'average': {'b': Decimal('56.47')}}) - self.roundtrip({'average': [Decimal('56.47')]}) - self.roundtrip({'average': [[Decimal('56.47')]]}) - self.roundtrip({'average': [{'b': Decimal('56.47')}]}) + self.roundtrip({"average": Decimal("56.47")}) + self.roundtrip({"average": {"b": Decimal("56.47")}}) + self.roundtrip({"average": [Decimal("56.47")]}) + self.roundtrip({"average": [[Decimal("56.47")]]}) + self.roundtrip({"average": [{"b": Decimal("56.47")}]}) @no_type_check def test_decode_all(self): documents = [] for dec in range(3): - documents.append({'average': Decimal('56.4%s' % (dec,))}) + documents.append({"average": Decimal("56.4%s" % (dec,))}) bsonstream = bytes() for doc in documents: bsonstream += encode(doc, codec_options=self.codecopts) - self.assertEqual( - decode_all(bsonstream, self.codecopts), documents) + self.assertEqual(decode_all(bsonstream, self.codecopts), documents) @no_type_check def test__bson_to_dict(self): - document = {'average': Decimal('56.47')} + document = {"average": Decimal("56.47")} rawbytes = encode(document, codec_options=self.codecopts) decoded_document = _bson_to_dict(rawbytes, self.codecopts) self.assertEqual(document, decoded_document) @no_type_check def test__dict_to_bson(self): - document = {'average': Decimal('56.47')} + document = {"average": Decimal("56.47")} rawbytes = encode(document, codec_options=self.codecopts) encoded_document = _dict_to_bson(document, False, self.codecopts) self.assertEqual(encoded_document, rawbytes) def _generate_multidocument_bson_stream(self): inp_num = [str(random() * 100)[:4] for _ in range(10)] - docs = [{'n': Decimal128(dec)} for dec in inp_num] - edocs = [{'n': Decimal(dec)} for dec in inp_num] + docs = [{"n": Decimal128(dec)} for dec in inp_num] + edocs = [{"n": Decimal(dec)} for dec in inp_num] bsonstream = b"" for doc in docs: bsonstream += encode(doc) @@ -180,8 +198,7 @@ def _generate_multidocument_bson_stream(self): @no_type_check def test_decode_iter(self): expected, bson_data = self._generate_multidocument_bson_stream() - for expected_doc, decoded_doc in zip( - expected, decode_iter(bson_data, self.codecopts)): + for expected_doc, decoded_doc in zip(expected, decode_iter(bson_data, self.codecopts)): self.assertEqual(expected_doc, decoded_doc) @no_type_check @@ -191,26 +208,24 @@ def test_decode_file_iter(self): fileobj.write(bson_data) fileobj.seek(0) - for expected_doc, decoded_doc in zip( - expected, decode_file_iter(fileobj, self.codecopts)): + for expected_doc, decoded_doc in zip(expected, decode_file_iter(fileobj, self.codecopts)): self.assertEqual(expected_doc, decoded_doc) fileobj.close() -class TestCustomPythonBSONTypeToBSONMonolithicCodec(CustomBSONTypeTests, - unittest.TestCase): +class TestCustomPythonBSONTypeToBSONMonolithicCodec(CustomBSONTypeTests, unittest.TestCase): @classmethod def setUpClass(cls): cls.codecopts = DECIMAL_CODECOPTS -class TestCustomPythonBSONTypeToBSONMultiplexedCodec(CustomBSONTypeTests, - unittest.TestCase): +class TestCustomPythonBSONTypeToBSONMultiplexedCodec(CustomBSONTypeTests, unittest.TestCase): @classmethod def setUpClass(cls): codec_options = CodecOptions( - type_registry=TypeRegistry((DecimalEncoder(), DecimalDecoder()))) + type_registry=TypeRegistry((DecimalEncoder(), DecimalDecoder())) + ) cls.codecopts = codec_options @@ -221,29 +236,29 @@ def _get_codec_options(self, fallback_encoder): def test_simple(self): codecopts = self._get_codec_options(lambda x: Decimal128(x)) - document = {'average': Decimal('56.47')} + document = {"average": Decimal("56.47")} bsonbytes = encode(document, codec_options=codecopts) - exp_document = {'average': Decimal128('56.47')} + exp_document = {"average": Decimal128("56.47")} exp_bsonbytes = encode(exp_document) self.assertEqual(bsonbytes, exp_bsonbytes) def test_erroring_fallback_encoder(self): - codecopts = self._get_codec_options(lambda _: 1/0) + codecopts = self._get_codec_options(lambda _: 1 / 0) # fallback converter should not be invoked when encoding known types. encode( - {'a': 1, 'b': Decimal128('1.01'), 'c': {'arr': ['abc', 3.678]}}, - codec_options=codecopts) + {"a": 1, "b": Decimal128("1.01"), "c": {"arr": ["abc", 3.678]}}, codec_options=codecopts + ) # expect an error when encoding a custom type. - document = {'average': Decimal('56.47')} + document = {"average": Decimal("56.47")} with self.assertRaises(ZeroDivisionError): encode(document, codec_options=codecopts) def test_noop_fallback_encoder(self): codecopts = self._get_codec_options(lambda x: x) - document = {'average': Decimal('56.47')} + document = {"average": Decimal("56.47")} with self.assertRaises(InvalidDocument): encode(document, codec_options=codecopts) @@ -253,8 +268,9 @@ def fallback_encoder(value): return Decimal128(value) except: raise TypeError("cannot encode type %s" % (type(value))) + codecopts = self._get_codec_options(fallback_encoder) - document = {'average': Decimal} + document = {"average": Decimal} with self.assertRaises(TypeError): encode(document, codec_options=codecopts) @@ -262,8 +278,9 @@ def fallback_encoder(value): class TestBSONTypeEnDeCodecs(unittest.TestCase): def test_instantiation(self): msg = "Can't instantiate abstract class" + def run_test(base, attrs, fail): - codec = type('testcodec', (base,), attrs) + codec = type("testcodec", (base,), attrs) if fail: with self.assertRaisesRegex(TypeError, msg): codec() @@ -273,24 +290,46 @@ def run_test(base, attrs, fail): class MyType(object): pass - run_test(TypeEncoder, {'python_type': MyType,}, fail=True) - run_test(TypeEncoder, {'transform_python': lambda s, x: x}, fail=True) - run_test(TypeEncoder, {'transform_python': lambda s, x: x, - 'python_type': MyType}, fail=False) - - run_test(TypeDecoder, {'bson_type': Decimal128, }, fail=True) - run_test(TypeDecoder, {'transform_bson': lambda s, x: x}, fail=True) - run_test(TypeDecoder, {'transform_bson': lambda s, x: x, - 'bson_type': Decimal128}, fail=False) - - run_test(TypeCodec, {'bson_type': Decimal128, - 'python_type': MyType}, fail=True) - run_test(TypeCodec, {'transform_bson': lambda s, x: x, - 'transform_python': lambda s, x: x}, fail=True) - run_test(TypeCodec, {'python_type': MyType, - 'transform_python': lambda s, x: x, - 'transform_bson': lambda s, x: x, - 'bson_type': Decimal128}, fail=False) + run_test( + TypeEncoder, + { + "python_type": MyType, + }, + fail=True, + ) + run_test(TypeEncoder, {"transform_python": lambda s, x: x}, fail=True) + run_test( + TypeEncoder, {"transform_python": lambda s, x: x, "python_type": MyType}, fail=False + ) + + run_test( + TypeDecoder, + { + "bson_type": Decimal128, + }, + fail=True, + ) + run_test(TypeDecoder, {"transform_bson": lambda s, x: x}, fail=True) + run_test( + TypeDecoder, {"transform_bson": lambda s, x: x, "bson_type": Decimal128}, fail=False + ) + + run_test(TypeCodec, {"bson_type": Decimal128, "python_type": MyType}, fail=True) + run_test( + TypeCodec, + {"transform_bson": lambda s, x: x, "transform_python": lambda s, x: x}, + fail=True, + ) + run_test( + TypeCodec, + { + "python_type": MyType, + "transform_python": lambda s, x: x, + "transform_bson": lambda s, x: x, + "bson_type": Decimal128, + }, + fail=False, + ) def test_type_checks(self): self.assertTrue(issubclass(TypeCodec, TypeEncoder)) @@ -332,6 +371,7 @@ def fallback_encoder_A2BSON(value): # transforms B into something encodable class B2BSON(TypeEncoder): python_type = TypeB + def transform_python(self, value): return value.value @@ -340,6 +380,7 @@ def transform_python(self, value): # BSON-encodable. class A2B(TypeEncoder): python_type = TypeA + def transform_python(self, value): return TypeB(value.value) @@ -348,6 +389,7 @@ def transform_python(self, value): # BSON-encodable. class B2A(TypeEncoder): python_type = TypeB + def transform_python(self, value): return TypeA(value.value) @@ -360,37 +402,37 @@ def transform_python(self, value): cls.A2B = A2B def test_encode_fallback_then_custom(self): - codecopts = CodecOptions(type_registry=TypeRegistry( - [self.B2BSON()], fallback_encoder=self.fallback_encoder_A2B)) - testdoc = {'x': self.TypeA(123)} - expected_bytes = encode({'x': 123}) + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2BSON()], fallback_encoder=self.fallback_encoder_A2B) + ) + testdoc = {"x": self.TypeA(123)} + expected_bytes = encode({"x": 123}) - self.assertEqual(encode(testdoc, codec_options=codecopts), - expected_bytes) + self.assertEqual(encode(testdoc, codec_options=codecopts), expected_bytes) def test_encode_custom_then_fallback(self): - codecopts = CodecOptions(type_registry=TypeRegistry( - [self.B2A()], fallback_encoder=self.fallback_encoder_A2BSON)) - testdoc = {'x': self.TypeB(123)} - expected_bytes = encode({'x': 123}) + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2A()], fallback_encoder=self.fallback_encoder_A2BSON) + ) + testdoc = {"x": self.TypeB(123)} + expected_bytes = encode({"x": 123}) - self.assertEqual(encode(testdoc, codec_options=codecopts), - expected_bytes) + self.assertEqual(encode(testdoc, codec_options=codecopts), expected_bytes) def test_chaining_encoders_fails(self): - codecopts = CodecOptions(type_registry=TypeRegistry( - [self.A2B(), self.B2BSON()])) + codecopts = CodecOptions(type_registry=TypeRegistry([self.A2B(), self.B2BSON()])) with self.assertRaises(InvalidDocument): - encode({'x': self.TypeA(123)}, codec_options=codecopts) + encode({"x": self.TypeA(123)}, codec_options=codecopts) def test_infinite_loop_exceeds_max_recursion_depth(self): - codecopts = CodecOptions(type_registry=TypeRegistry( - [self.B2A()], fallback_encoder=self.fallback_encoder_A2B)) + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2A()], fallback_encoder=self.fallback_encoder_A2B) + ) # Raises max recursion depth exceeded error with self.assertRaises(RuntimeError): - encode({'x': self.TypeA(100)}, codec_options=codecopts) + encode({"x": self.TypeA(100)}, codec_options=codecopts) class TestTypeRegistry(unittest.TestCase): @@ -449,29 +491,34 @@ def fallback_encoder(value): def test_simple(self): codec_instances = [codec() for codec in self.codecs] + def assert_proper_initialization(type_registry, codec_instances): - self.assertEqual(type_registry._encoder_map, { - self.types[0]: codec_instances[0].transform_python, - self.types[1]: codec_instances[1].transform_python}) - self.assertEqual(type_registry._decoder_map, { - int: codec_instances[0].transform_bson, - str: codec_instances[1].transform_bson}) self.assertEqual( - type_registry._fallback_encoder, self.fallback_encoder) + type_registry._encoder_map, + { + self.types[0]: codec_instances[0].transform_python, + self.types[1]: codec_instances[1].transform_python, + }, + ) + self.assertEqual( + type_registry._decoder_map, + {int: codec_instances[0].transform_bson, str: codec_instances[1].transform_bson}, + ) + self.assertEqual(type_registry._fallback_encoder, self.fallback_encoder) type_registry = TypeRegistry(codec_instances, self.fallback_encoder) assert_proper_initialization(type_registry, codec_instances) type_registry = TypeRegistry( - fallback_encoder=self.fallback_encoder, type_codecs=codec_instances) + fallback_encoder=self.fallback_encoder, type_codecs=codec_instances + ) assert_proper_initialization(type_registry, codec_instances) # Ensure codec list held by the type registry doesn't change if we # mutate the initial list. codec_instances_copy = list(codec_instances) codec_instances.pop(0) - self.assertListEqual( - type_registry._TypeRegistry__type_codecs, codec_instances_copy) + self.assertListEqual(type_registry._TypeRegistry__type_codecs, codec_instances_copy) def test_simple_separate_codecs(self): class MyIntEncoder(TypeEncoder): @@ -491,72 +538,83 @@ def transform_bson(self, value): self.assertEqual( type_registry._encoder_map, - {MyIntEncoder.python_type: codec_instances[1].transform_python}) # type: ignore + {MyIntEncoder.python_type: codec_instances[1].transform_python}, # type: ignore[has-type] + ) self.assertEqual( - type_registry._decoder_map, - {MyIntDecoder.bson_type: codec_instances[0].transform_bson}) # type: ignore + type_registry._decoder_map, {MyIntDecoder.bson_type: codec_instances[0].transform_bson} # type: ignore[has-type] + ) def test_initialize_fail(self): - err_msg = ("Expected an instance of TypeEncoder, TypeDecoder, " - "or TypeCodec, got .* instead") + err_msg = ( + "Expected an instance of TypeEncoder, TypeDecoder, " "or TypeCodec, got .* instead" + ) with self.assertRaisesRegex(TypeError, err_msg): TypeRegistry(self.codecs) # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, err_msg): - TypeRegistry([type('AnyType', (object,), {})()]) + TypeRegistry([type("AnyType", (object,), {})()]) err_msg = "fallback_encoder %r is not a callable" % (True,) with self.assertRaisesRegex(TypeError, err_msg): - TypeRegistry([], True) # type: ignore[arg-type] + TypeRegistry([], True) # type: ignore[arg-type] - err_msg = "fallback_encoder %r is not a callable" % ('hello',) + err_msg = "fallback_encoder %r is not a callable" % ("hello",) with self.assertRaisesRegex(TypeError, err_msg): - TypeRegistry(fallback_encoder='hello') # type: ignore[arg-type] + TypeRegistry(fallback_encoder="hello") # type: ignore[arg-type] def test_type_registry_repr(self): codec_instances = [codec() for codec in self.codecs] type_registry = TypeRegistry(codec_instances) - r = ("TypeRegistry(type_codecs=%r, fallback_encoder=%r)" % ( - codec_instances, None)) + r = "TypeRegistry(type_codecs=%r, fallback_encoder=%r)" % (codec_instances, None) self.assertEqual(r, repr(type_registry)) def test_type_registry_eq(self): codec_instances = [codec() for codec in self.codecs] - self.assertEqual( - TypeRegistry(codec_instances), TypeRegistry(codec_instances)) + self.assertEqual(TypeRegistry(codec_instances), TypeRegistry(codec_instances)) codec_instances_2 = [codec() for codec in self.codecs] - self.assertNotEqual( - TypeRegistry(codec_instances), TypeRegistry(codec_instances_2)) + self.assertNotEqual(TypeRegistry(codec_instances), TypeRegistry(codec_instances_2)) def test_builtin_types_override_fails(self): def run_test(base, attrs): - msg = (r"TypeEncoders cannot change how built-in types " - r"are encoded \(encoder .* transforms type .*\)") + msg = ( + r"TypeEncoders cannot change how built-in types " + r"are encoded \(encoder .* transforms type .*\)" + ) for pytype in _BUILT_IN_TYPES: - attrs.update({'python_type': pytype, - 'transform_python': lambda x: x}) - codec = type('testcodec', (base, ), attrs) + attrs.update({"python_type": pytype, "transform_python": lambda x: x}) + codec = type("testcodec", (base,), attrs) codec_instance = codec() with self.assertRaisesRegex(TypeError, msg): - TypeRegistry([codec_instance,]) + TypeRegistry( + [ + codec_instance, + ] + ) # Test only some subtypes as not all can be subclassed. - if pytype in [bool, type(None), RE_TYPE,]: + if pytype in [ + bool, + type(None), + RE_TYPE, + ]: continue class MyType(pytype): # type: ignore pass - attrs.update({'python_type': MyType, - 'transform_python': lambda x: x}) - codec = type('testcodec', (base, ), attrs) + + attrs.update({"python_type": MyType, "transform_python": lambda x: x}) + codec = type("testcodec", (base,), attrs) codec_instance = codec() with self.assertRaisesRegex(TypeError, msg): - TypeRegistry([codec_instance,]) + TypeRegistry( + [ + codec_instance, + ] + ) run_test(TypeEncoder, {}) - run_test(TypeCodec, {'bson_type': Decimal128, - 'transform_bson': lambda x: x}) + run_test(TypeCodec, {"bson_type": Decimal128, "transform_bson": lambda x: x}) class TestCollectionWCustomType(IntegrationTest): @@ -568,115 +626,127 @@ def tearDown(self): def test_command_errors_w_custom_type_decoder(self): db = self.db - test_doc = {'_id': 1, 'data': 'a'} - test = db.get_collection('test', - codec_options=UNINT_DECODER_CODECOPTS) + test_doc = {"_id": 1, "data": "a"} + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) result = test.insert_one(test_doc) - self.assertEqual(result.inserted_id, test_doc['_id']) + self.assertEqual(result.inserted_id, test_doc["_id"]) with self.assertRaises(DuplicateKeyError): test.insert_one(test_doc) def test_find_w_custom_type_decoder(self): db = self.db - input_docs = [ - {'x': Int64(k)} for k in [1, 2, 3]] + input_docs = [{"x": Int64(k)} for k in [1, 2, 3]] for doc in input_docs: db.test.insert_one(doc) - test = db.get_collection( - 'test', codec_options=UNINT_DECODER_CODECOPTS) + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) for doc in test.find({}, batch_size=1): - self.assertIsInstance(doc['x'], UndecipherableInt64Type) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) def test_find_w_custom_type_decoder_and_document_class(self): def run_test(doc_cls): db = self.db - input_docs = [ - {'x': Int64(k)} for k in [1, 2, 3]] + input_docs = [{"x": Int64(k)} for k in [1, 2, 3]] for doc in input_docs: db.test.insert_one(doc) - test = db.get_collection('test', codec_options=CodecOptions( - type_registry=TypeRegistry([UndecipherableIntDecoder()]), - document_class=doc_cls)) + test = db.get_collection( + "test", + codec_options=CodecOptions( + type_registry=TypeRegistry([UndecipherableIntDecoder()]), document_class=doc_cls + ), + ) for doc in test.find({}, batch_size=1): self.assertIsInstance(doc, doc_cls) - self.assertIsInstance(doc['x'], UndecipherableInt64Type) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) for doc_cls in [RawBSONDocument, OrderedDict]: run_test(doc_cls) def test_aggregate_w_custom_type_decoder(self): db = self.db - db.test.insert_many([ - {'status': 'in progress', 'qty': Int64(1)}, - {'status': 'complete', 'qty': Int64(10)}, - {'status': 'in progress', 'qty': Int64(1)}, - {'status': 'complete', 'qty': Int64(10)}, - {'status': 'in progress', 'qty': Int64(1)},]) - test = db.get_collection( - 'test', codec_options=UNINT_DECODER_CODECOPTS) + db.test.insert_many( + [ + {"status": "in progress", "qty": Int64(1)}, + {"status": "complete", "qty": Int64(10)}, + {"status": "in progress", "qty": Int64(1)}, + {"status": "complete", "qty": Int64(10)}, + {"status": "in progress", "qty": Int64(1)}, + ] + ) + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) pipeline: list = [ - {'$match': {'status': 'complete'}}, - {'$group': {'_id': "$status", 'total_qty': {"$sum": "$qty"}}},] + {"$match": {"status": "complete"}}, + {"$group": {"_id": "$status", "total_qty": {"$sum": "$qty"}}}, + ] result = test.aggregate(pipeline) res = list(result)[0] - self.assertEqual(res['_id'], 'complete') - self.assertIsInstance(res['total_qty'], UndecipherableInt64Type) - self.assertEqual(res['total_qty'].value, 20) + self.assertEqual(res["_id"], "complete") + self.assertIsInstance(res["total_qty"], UndecipherableInt64Type) + self.assertEqual(res["total_qty"].value, 20) def test_distinct_w_custom_type(self): self.db.drop_collection("test") - test = self.db.get_collection('test', codec_options=UNINT_CODECOPTS) + test = self.db.get_collection("test", codec_options=UNINT_CODECOPTS) values = [ UndecipherableInt64Type(1), UndecipherableInt64Type(2), UndecipherableInt64Type(3), - {"b": UndecipherableInt64Type(3)}] + {"b": UndecipherableInt64Type(3)}, + ] test.insert_many({"a": val} for val in values) self.assertEqual(values, test.distinct("a")) def test_find_one_and__w_custom_type_decoder(self): db = self.db - c = db.get_collection('test', codec_options=UNINT_DECODER_CODECOPTS) - c.insert_one({'_id': 1, 'x': Int64(1)}) - - doc = c.find_one_and_update({'_id': 1}, {'$inc': {'x': 1}}, - return_document=ReturnDocument.AFTER) - self.assertEqual(doc['_id'], 1) - self.assertIsInstance(doc['x'], UndecipherableInt64Type) - self.assertEqual(doc['x'].value, 2) - - doc = c.find_one_and_replace({'_id': 1}, {'x': Int64(3), 'y': True}, - return_document=ReturnDocument.AFTER) - self.assertEqual(doc['_id'], 1) - self.assertIsInstance(doc['x'], UndecipherableInt64Type) - self.assertEqual(doc['x'].value, 3) - self.assertEqual(doc['y'], True) - - doc = c.find_one_and_delete({'y': True}) - self.assertEqual(doc['_id'], 1) - self.assertIsInstance(doc['x'], UndecipherableInt64Type) - self.assertEqual(doc['x'].value, 3) + c = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + c.insert_one({"_id": 1, "x": Int64(1)}) + + doc = c.find_one_and_update( + {"_id": 1}, {"$inc": {"x": 1}}, return_document=ReturnDocument.AFTER + ) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 2) + + doc = c.find_one_and_replace( + {"_id": 1}, {"x": Int64(3), "y": True}, return_document=ReturnDocument.AFTER + ) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 3) + self.assertEqual(doc["y"], True) + + doc = c.find_one_and_delete({"y": True}) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 3) self.assertIsNone(c.find_one()) class TestGridFileCustomType(IntegrationTest): def setUp(self): - self.db.drop_collection('fs.files') - self.db.drop_collection('fs.chunks') + self.db.drop_collection("fs.files") + self.db.drop_collection("fs.chunks") def test_grid_out_custom_opts(self): db = self.db.with_options(codec_options=UPPERSTR_DECODER_CODECOPTS) - one = GridIn(db.fs, _id=5, filename="my_file", - contentType="text/html", chunkSize=1000, aliases=["foo"], - metadata={"foo": 'red', "bar": 'blue'}, bar=3, - baz="hello") + one = GridIn( + db.fs, + _id=5, + filename="my_file", + contentType="text/html", + chunkSize=1000, + aliases=["foo"], + metadata={"foo": "red", "bar": "blue"}, + bar=3, + baz="hello", + ) one.write(b"hello world") one.close() @@ -690,12 +760,21 @@ def test_grid_out_custom_opts(self): self.assertEqual(1000, two.chunk_size) self.assertTrue(isinstance(two.upload_date, datetime.datetime)) self.assertEqual(["foo"], two.aliases) - self.assertEqual({"foo": 'red', "bar": 'blue'}, two.metadata) + self.assertEqual({"foo": "red", "bar": "blue"}, two.metadata) self.assertEqual(3, two.bar) self.assertEqual(None, two.md5) - for attr in ["_id", "name", "content_type", "length", "chunk_size", - "upload_date", "aliases", "metadata", "md5"]: + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: self.assertRaises(AttributeError, setattr, two, attr, 5) @@ -705,11 +784,10 @@ def change_stream(self, *args, **kwargs): return self.watched_target.watch(*args, **kwargs) @no_type_check - def insert_and_check(self, change_stream, insert_doc, - expected_doc): + def insert_and_check(self, change_stream, insert_doc, expected_doc): self.input_target.insert_one(insert_doc) change = next(change_stream) - self.assertEqual(change['fullDocument'], expected_doc) + self.assertEqual(change["fullDocument"], expected_doc) @no_type_check def kill_change_stream_cursor(self, change_stream): @@ -721,18 +799,21 @@ def kill_change_stream_cursor(self, change_stream): @no_type_check def test_simple(self): - codecopts = CodecOptions(type_registry=TypeRegistry([ - UndecipherableIntEncoder(), UppercaseTextDecoder()])) + codecopts = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntEncoder(), UppercaseTextDecoder()]) + ) self.create_targets(codec_options=codecopts) input_docs = [ - {'_id': UndecipherableInt64Type(1), 'data': 'hello'}, - {'_id': 2, 'data': 'world'}, - {'_id': UndecipherableInt64Type(3), 'data': '!'},] + {"_id": UndecipherableInt64Type(1), "data": "hello"}, + {"_id": 2, "data": "world"}, + {"_id": UndecipherableInt64Type(3), "data": "!"}, + ] expected_docs = [ - {'_id': 1, 'data': 'HELLO'}, - {'_id': 2, 'data': 'WORLD'}, - {'_id': 3, 'data': '!'},] + {"_id": 1, "data": "HELLO"}, + {"_id": 2, "data": "WORLD"}, + {"_id": 3, "data": "!"}, + ] change_stream = self.change_stream() @@ -744,22 +825,22 @@ def test_simple(self): @no_type_check def test_custom_type_in_pipeline(self): - codecopts = CodecOptions(type_registry=TypeRegistry([ - UndecipherableIntEncoder(), UppercaseTextDecoder()])) + codecopts = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntEncoder(), UppercaseTextDecoder()]) + ) self.create_targets(codec_options=codecopts) input_docs = [ - {'_id': UndecipherableInt64Type(1), 'data': 'hello'}, - {'_id': 2, 'data': 'world'}, - {'_id': UndecipherableInt64Type(3), 'data': '!'}] - expected_docs = [ - {'_id': 2, 'data': 'WORLD'}, - {'_id': 3, 'data': '!'}] + {"_id": UndecipherableInt64Type(1), "data": "hello"}, + {"_id": 2, "data": "world"}, + {"_id": UndecipherableInt64Type(3), "data": "!"}, + ] + expected_docs = [{"_id": 2, "data": "WORLD"}, {"_id": 3, "data": "!"}] # UndecipherableInt64Type should be encoded with the TypeRegistry. change_stream = self.change_stream( - [{'$match': {'documentKey._id': { - '$gte': UndecipherableInt64Type(2)}}}]) + [{"$match": {"documentKey._id": {"$gte": UndecipherableInt64Type(2)}}}] + ) self.input_target.insert_one(input_docs[0]) self.insert_and_check(change_stream, input_docs[1], expected_docs[0]) @@ -773,17 +854,17 @@ def test_break_resume_token(self): change_stream = self.change_stream() self.input_target.insert_one({"data": "test"}) change = next(change_stream) - resume_token_decoder = type_obfuscating_decoder_factory( - type(change['_id']['_data'])) + resume_token_decoder = type_obfuscating_decoder_factory(type(change["_id"]["_data"])) # Custom-decoding the resumeToken type breaks resume tokens. - codecopts = CodecOptions(type_registry=TypeRegistry([ - resume_token_decoder(), UndecipherableIntEncoder()])) + codecopts = CodecOptions( + type_registry=TypeRegistry([resume_token_decoder(), UndecipherableIntEncoder()]) + ) # Re-create targets, change stream and proceed. self.create_targets(codec_options=codecopts) - docs = [{'_id': 1}, {'_id': 2}, {'_id': 3}] + docs = [{"_id": 1}, {"_id": 2}, {"_id": 3}] change_stream = self.change_stream() self.insert_and_check(change_stream, docs[0], docs[0]) @@ -795,27 +876,27 @@ def test_break_resume_token(self): @no_type_check def test_document_class(self): def run_test(doc_cls): - codecopts = CodecOptions(type_registry=TypeRegistry([ - UppercaseTextDecoder(), UndecipherableIntEncoder()]), - document_class=doc_cls) + codecopts = CodecOptions( + type_registry=TypeRegistry([UppercaseTextDecoder(), UndecipherableIntEncoder()]), + document_class=doc_cls, + ) self.create_targets(codec_options=codecopts) change_stream = self.change_stream() - doc = {'a': UndecipherableInt64Type(101), 'b': 'xyz'} + doc = {"a": UndecipherableInt64Type(101), "b": "xyz"} self.input_target.insert_one(doc) change = next(change_stream) self.assertIsInstance(change, doc_cls) - self.assertEqual(change['fullDocument']['a'], 101) - self.assertEqual(change['fullDocument']['b'], 'XYZ') + self.assertEqual(change["fullDocument"]["a"], 101) + self.assertEqual(change["fullDocument"]["b"], "XYZ") for doc_cls in [OrderedDict, RawBSONDocument]: run_test(doc_cls) -class TestCollectionChangeStreamsWCustomTypes( - IntegrationTest, ChangeStreamsWCustomTypesTestMixin): +class TestCollectionChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): @classmethod @client_context.require_no_mmap @client_context.require_no_standalone @@ -827,16 +908,14 @@ def tearDown(self): self.input_target.drop() def create_targets(self, *args, **kwargs): - self.watched_target = self.db.get_collection( - 'test', *args, **kwargs) + self.watched_target = self.db.get_collection("test", *args, **kwargs) self.input_target = self.watched_target # Ensure the collection exists and is empty. self.input_target.insert_one({}) self.input_target.delete_many({}) -class TestDatabaseChangeStreamsWCustomTypes( - IntegrationTest, ChangeStreamsWCustomTypesTestMixin): +class TestDatabaseChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): @classmethod @client_context.require_version_min(4, 0, 0) @client_context.require_no_mmap @@ -850,15 +929,13 @@ def tearDown(self): self.client.drop_database(self.watched_target) def create_targets(self, *args, **kwargs): - self.watched_target = self.client.get_database( - self.db.name, *args, **kwargs) + self.watched_target = self.client.get_database(self.db.name, *args, **kwargs) self.input_target = self.watched_target.test # Insert a record to ensure db, coll are created. - self.input_target.insert_one({'data': 'dummy'}) + self.input_target.insert_one({"data": "dummy"}) -class TestClusterChangeStreamsWCustomTypes( - IntegrationTest, ChangeStreamsWCustomTypesTestMixin): +class TestClusterChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): @classmethod @client_context.require_version_min(4, 0, 0) @client_context.require_no_mmap @@ -872,15 +949,15 @@ def tearDown(self): self.client.drop_database(self.db) def create_targets(self, *args, **kwargs): - codec_options = kwargs.pop('codec_options', None) + codec_options = kwargs.pop("codec_options", None) if codec_options: - kwargs['type_registry'] = codec_options.type_registry - kwargs['document_class'] = codec_options.document_class + kwargs["type_registry"] = codec_options.type_registry + kwargs["document_class"] = codec_options.document_class self.watched_target = rs_client(*args, **kwargs) self.addCleanup(self.watched_target.close) self.input_target = self.watched_target[self.db.name].test # Insert a record to ensure db, coll are created. - self.input_target.insert_one({'data': 'dummy'}) + self.input_target.insert_one({"data": "dummy"}) if __name__ == "__main__": diff --git a/test/test_data_lake.py b/test/test_data_lake.py index 2954efe651..863b3a4f59 100644 --- a/test/test_data_lake.py +++ b/test/test_data_lake.py @@ -19,33 +19,37 @@ sys.path[0:0] = [""] -from pymongo.auth import MECHANISMS -from test import client_context, unittest, IntegrationTest +from test import IntegrationTest, client_context, unittest from test.crud_v2_format import TestCrudV2 from test.utils import ( - rs_client_noauth, rs_or_single_client, OvertCommandListener, TestCreator) + OvertCommandListener, + TestCreator, + rs_client_noauth, + rs_or_single_client, +) +from pymongo.auth import MECHANISMS # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "data_lake") +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data_lake") class TestDataLakeMustConnect(IntegrationTest): def test_connected_to_data_lake(self): - data_lake = os.environ.get('DATA_LAKE') + data_lake = os.environ.get("DATA_LAKE") if not data_lake: - self.skipTest('DATA_LAKE is not set') + self.skipTest("DATA_LAKE is not set") - self.assertTrue(client_context.is_data_lake, - 'client context.is_data_lake must be True when ' - 'DATA_LAKE is set') + self.assertTrue( + client_context.is_data_lake, + "client context.is_data_lake must be True when " "DATA_LAKE is set", + ) class TestDataLakeProse(IntegrationTest): # Default test database and collection names. - TEST_DB = 'test' - TEST_COLLECTION = 'driverdata' + TEST_DB = "test" + TEST_COLLECTION = "driverdata" @classmethod @client_context.require_data_lake @@ -56,8 +60,7 @@ def setUpClass(cls): def test_1(self): listener = OvertCommandListener() client = rs_or_single_client(event_listeners=[listener]) - cursor = client[self.TEST_DB][self.TEST_COLLECTION].find( - {}, batch_size=2) + cursor = client[self.TEST_DB][self.TEST_COLLECTION].find({}, batch_size=2) next(cursor) # find command assertions @@ -69,13 +72,12 @@ def test_1(self): # killCursors command assertions cursor.close() started = listener.results["started"][-1] - self.assertEqual(started.command_name, 'killCursors') + self.assertEqual(started.command_name, "killCursors") succeeded = listener.results["succeeded"][-1] - self.assertEqual(succeeded.command_name, 'killCursors') + self.assertEqual(succeeded.command_name, "killCursors") self.assertIn(cursor_id, started.command["cursors"]) - target_ns = ".".join([started.command['$db'], - started.command['killCursors']]) + target_ns = ".".join([started.command["$db"], started.command["killCursors"]]) self.assertEqual(cursor_ns, target_ns) self.assertIn(cursor_id, succeeded.reply["cursorsKilled"]) @@ -83,19 +85,19 @@ def test_1(self): # Test no auth def test_2(self): client = rs_client_noauth() - client.admin.command('ping') + client.admin.command("ping") # Test with auth def test_3(self): - for mechanism in ['SCRAM-SHA-1', 'SCRAM-SHA-256']: + for mechanism in ["SCRAM-SHA-1", "SCRAM-SHA-256"]: client = rs_or_single_client(authMechanism=mechanism) client[self.TEST_DB][self.TEST_COLLECTION].find_one() class DataLakeTestSpec(TestCrudV2): # Default test database and collection names. - TEST_DB = 'test' - TEST_COLLECTION = 'driverdata' + TEST_DB = "test" + TEST_COLLECTION = "driverdata" @classmethod @client_context.require_data_lake diff --git a/test/test_database.py b/test/test_database.py index 096eb5b979..9a08d971db 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -21,43 +21,44 @@ sys.path[0:0] = [""] +from test import IntegrationTest, SkipTest, client_context, unittest +from test.test_custom_types import DECIMAL_CODECOPTS +from test.utils import ( + IMPOSSIBLE_WRITE_CONCERN, + DeprecationFilter, + OvertCommandListener, + ignore_deprecations, + rs_or_single_client, + server_started_with_auth, + wait_until, +) + from bson.codec_options import CodecOptions -from bson.int64 import Int64 -from bson.regex import Regex from bson.dbref import DBRef +from bson.int64 import Int64 from bson.objectid import ObjectId +from bson.regex import Regex from bson.son import SON -from pymongo import (auth, - helpers) +from pymongo import auth, helpers from pymongo.collection import Collection from pymongo.database import Database -from pymongo.errors import (CollectionInvalid, - ConfigurationError, - ExecutionTimeout, - InvalidName, - OperationFailure, - WriteConcernError) +from pymongo.errors import ( + CollectionInvalid, + ConfigurationError, + ExecutionTimeout, + InvalidName, + OperationFailure, + WriteConcernError, +) from pymongo.mongo_client import MongoClient from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern -from test import (client_context, - SkipTest, - unittest, - IntegrationTest) -from test.utils import (ignore_deprecations, - rs_or_single_client, - server_started_with_auth, - wait_until, - DeprecationFilter, - IMPOSSIBLE_WRITE_CONCERN, - OvertCommandListener) -from test.test_custom_types import DECIMAL_CODECOPTS class TestDatabaseNoConnect(unittest.TestCase): - """Test Database features on a client that does not connect. - """ + """Test Database features on a client that does not connect.""" + client: MongoClient @classmethod @@ -69,18 +70,17 @@ def test_name(self): self.assertRaises(InvalidName, Database, self.client, "my db") self.assertRaises(InvalidName, Database, self.client, 'my"db') self.assertRaises(InvalidName, Database, self.client, "my\x00db") - self.assertRaises(InvalidName, Database, - self.client, "my\u0000db") + self.assertRaises(InvalidName, Database, self.client, "my\u0000db") self.assertEqual("name", Database(self.client, "name").name) def test_get_collection(self): codec_options = CodecOptions(tz_aware=True) write_concern = WriteConcern(w=2, j=True) - read_concern = ReadConcern('majority') + read_concern = ReadConcern("majority") coll = self.client.pymongo_test.get_collection( - 'foo', codec_options, ReadPreference.SECONDARY, write_concern, - read_concern) - self.assertEqual('foo', coll.name) + "foo", codec_options, ReadPreference.SECONDARY, write_concern, read_concern + ) + self.assertEqual("foo", coll.name) self.assertEqual(codec_options, coll.codec_options) self.assertEqual(ReadPreference.SECONDARY, coll.read_preference) self.assertEqual(write_concern, coll.write_concern) @@ -88,7 +88,7 @@ def test_get_collection(self): def test_getattr(self): db = self.client.pymongo_test - self.assertTrue(isinstance(db['_does_not_exist'], Collection)) + self.assertTrue(isinstance(db["_does_not_exist"], Collection)) with self.assertRaises(AttributeError) as context: db._does_not_exist @@ -96,24 +96,19 @@ def test_getattr(self): # Message should be: "AttributeError: Database has no attribute # '_does_not_exist'. To access the _does_not_exist collection, # use database['_does_not_exist']". - self.assertIn("has no attribute '_does_not_exist'", - str(context.exception)) + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) def test_iteration(self): self.assertRaises(TypeError, next, self.client.pymongo_test) class TestDatabase(IntegrationTest): - def test_equality(self): - self.assertNotEqual(Database(self.client, "test"), - Database(self.client, "mike")) - self.assertEqual(Database(self.client, "test"), - Database(self.client, "test")) + self.assertNotEqual(Database(self.client, "test"), Database(self.client, "mike")) + self.assertEqual(Database(self.client, "test"), Database(self.client, "test")) # Explicitly test inequality - self.assertFalse(Database(self.client, "test") != - Database(self.client, "test")) + self.assertFalse(Database(self.client, "test") != Database(self.client, "test")) def test_hashable(self): self.assertIn(self.client.test, {Database(self.client, "test")}) @@ -126,9 +121,10 @@ def test_get_coll(self): self.assertEqual(db.test.mike, db["test.mike"]) def test_repr(self): - self.assertEqual(repr(Database(self.client, "pymongo_test")), - "Database(%r, %s)" % (self.client, - repr("pymongo_test"))) + self.assertEqual( + repr(Database(self.client, "pymongo_test")), + "Database(%r, %s)" % (self.client, repr("pymongo_test")), + ) def test_create_collection(self): db = Database(self.client, "pymongo_test") @@ -165,7 +161,8 @@ def test_list_collection_names(self): db.systemcoll.test.insert_one({}) no_system_collections = db.list_collection_names( - filter={"name": {"$regex": r"^(?!system\.)"}}) + filter={"name": {"$regex": r"^(?!system\.)"}} + ) for coll in no_system_collections: self.assertTrue(not coll.startswith("system.")) self.assertIn("systemcoll.test", no_system_collections) @@ -192,8 +189,7 @@ def test_list_collection_names_filter(self): self.addCleanup(client.drop_database, db.name) # Should not send nameOnly. - for filter in ({'options.capped': True}, - {'options.capped': True, 'name': 'capped'}): + for filter in ({"options.capped": True}, {"options.capped": True, "name": "capped"}): results.clear() names = db.list_collection_names(filter=filter) self.assertEqual(names, ["capped"]) @@ -201,7 +197,7 @@ def test_list_collection_names_filter(self): # Should send nameOnly (except on 2.6). filter: Any - for filter in (None, {}, {'name': {'$in': ['capped', 'non_capped']}}): + for filter in (None, {}, {"name": {"$in": ["capped", "non_capped"]}}): results.clear() names = db.list_collection_names(filter=filter) self.assertIn("capped", names) @@ -239,8 +235,10 @@ def test_list_collections(self): coll_cnt: dict = {} # Checking if is there any collection which don't exists. - if (len(set(colls) - set(["test","test.mike"])) == 0 or - len(set(colls) - set(["test","test.mike","system.indexes"])) == 0): + if ( + len(set(colls) - set(["test", "test.mike"])) == 0 + or len(set(colls) - set(["test", "test.mike", "system.indexes"])) == 0 + ): self.assertTrue(True) else: self.assertTrue(False) @@ -254,7 +252,7 @@ def test_list_collections(self): db.drop_collection("test") db.create_collection("test", capped=True, size=4096) - results = db.list_collections(filter={'options.capped': True}) + results = db.list_collections(filter={"options.capped": True}) colls = [result["name"] for result in results] # Checking only capped collections are present @@ -277,8 +275,10 @@ def test_list_collections(self): coll_cnt = {} # Checking if is there any collection which don't exists. - if (len(set(colls) - set(["test"])) == 0 or - len(set(colls) - set(["test","system.indexes"])) == 0): + if ( + len(set(colls) - set(["test"])) == 0 + or len(set(colls) - set(["test", "system.indexes"])) == 0 + ): self.assertTrue(True) else: self.assertTrue(False) @@ -287,13 +287,13 @@ def test_list_collections(self): def test_list_collection_names_single_socket(self): client = rs_or_single_client(maxPoolSize=1) - client.drop_database('test_collection_names_single_socket') + client.drop_database("test_collection_names_single_socket") db = client.test_collection_names_single_socket for i in range(200): db.create_collection(str(i)) db.list_collection_names() # Must not hang. - client.drop_database('test_collection_names_single_socket') + client.drop_database("test_collection_names_single_socket") def test_drop_collection(self): db = Database(self.client, "pymongo_test") @@ -325,10 +325,9 @@ def test_drop_collection(self): db.drop_collection(db.test.doesnotexist) if client_context.is_rs: - db_wc = Database(self.client, 'pymongo_test', - write_concern=IMPOSSIBLE_WRITE_CONCERN) + db_wc = Database(self.client, "pymongo_test", write_concern=IMPOSSIBLE_WRITE_CONCERN) with self.assertRaises(WriteConcernError): - db_wc.drop_collection('test') + db_wc.drop_collection("test") def test_validate_collection(self): db = self.client.pymongo_test @@ -338,10 +337,8 @@ def test_validate_collection(self): db.test.insert_one({"dummy": "object"}) - self.assertRaises(OperationFailure, db.validate_collection, - "test.doesnotexist") - self.assertRaises(OperationFailure, db.validate_collection, - db.test.doesnotexist) + self.assertRaises(OperationFailure, db.validate_collection, "test.doesnotexist") + self.assertRaises(OperationFailure, db.validate_collection, db.test.doesnotexist) self.assertTrue(db.validate_collection("test")) self.assertTrue(db.validate_collection(db.test)) @@ -357,10 +354,9 @@ def test_validate_collection_background(self): coll = db.test self.assertTrue(db.validate_collection(coll, background=False)) # The inMemory storage engine does not support background=True. - if client_context.storage_engine != 'inMemory': + if client_context.storage_engine != "inMemory": self.assertTrue(db.validate_collection(coll, background=True)) - self.assertTrue( - db.validate_collection(coll, scandata=True, background=True)) + self.assertTrue(db.validate_collection(coll, scandata=True, background=True)) # The server does not support background=True with full=True. # Assert that we actually send the background option by checking # that this combination fails. @@ -381,24 +377,25 @@ def test_command(self): def test_command_with_regex(self): db = self.client.pymongo_test db.test.drop() - db.test.insert_one({'r': re.compile('.*')}) - db.test.insert_one({'r': Regex('.*')}) + db.test.insert_one({"r": re.compile(".*")}) + db.test.insert_one({"r": Regex(".*")}) - result = db.command('aggregate', 'test', pipeline=[], cursor={}) - for doc in result['cursor']['firstBatch']: - self.assertTrue(isinstance(doc['r'], Regex)) + result = db.command("aggregate", "test", pipeline=[], cursor={}) + for doc in result["cursor"]["firstBatch"]: + self.assertTrue(isinstance(doc["r"], Regex)) def test_password_digest(self): self.assertRaises(TypeError, auth._password_digest, 5) self.assertRaises(TypeError, auth._password_digest, True) self.assertRaises(TypeError, auth._password_digest, None) - self.assertTrue(isinstance(auth._password_digest("mike", "password"), - str)) - self.assertEqual(auth._password_digest("mike", "password"), - "cd7e45b3b2767dc2fa9b6b548457ed00") - self.assertEqual(auth._password_digest("Gustave", "Dor\xe9"), - "81e0e2364499209f466e75926a162d73") + self.assertTrue(isinstance(auth._password_digest("mike", "password"), str)) + self.assertEqual( + auth._password_digest("mike", "password"), "cd7e45b3b2767dc2fa9b6b548457ed00" + ) + self.assertEqual( + auth._password_digest("Gustave", "Dor\xe9"), "81e0e2364499209f466e75926a162d73" + ) def test_id_ordering(self): # PyMongo attempts to have _id show up first @@ -409,11 +406,11 @@ def test_id_ordering(self): # with hash randomization enabled (e.g. tox). db = self.client.pymongo_test db.test.drop() - db.test.insert_one(SON([("hello", "world"), - ("_id", 5)])) + db.test.insert_one(SON([("hello", "world"), ("_id", 5)])) db = self.client.get_database( - "pymongo_test", codec_options=CodecOptions(document_class=SON)) + "pymongo_test", codec_options=CodecOptions(document_class=SON) + ) cursor = db.test.find() for x in cursor: for (k, v) in x.items(): @@ -432,10 +429,8 @@ def test_deref(self): obj = {"x": True} key = db.test.insert_one(obj).inserted_id self.assertEqual(obj, db.dereference(DBRef("test", key))) - self.assertEqual(obj, - db.dereference(DBRef("test", key, "pymongo_test"))) - self.assertRaises(ValueError, - db.dereference, DBRef("test", key, "foo")) + self.assertEqual(obj, db.dereference(DBRef("test", key, "pymongo_test"))) + self.assertRaises(ValueError, db.dereference, DBRef("test", key, "foo")) self.assertEqual(None, db.dereference(DBRef("test", 4))) obj = {"_id": 4} @@ -448,10 +443,11 @@ def test_deref_kwargs(self): db.test.insert_one({"_id": 4, "foo": "bar"}) db = self.client.get_database( - "pymongo_test", codec_options=CodecOptions(document_class=SON)) - self.assertEqual(SON([("foo", "bar")]), - db.dereference(DBRef("test", 4), - projection={"_id": False})) + "pymongo_test", codec_options=CodecOptions(document_class=SON) + ) + self.assertEqual( + SON([("foo", "bar")]), db.dereference(DBRef("test", 4), projection={"_id": False}) + ) # TODO some of these tests belong in the collection level testing. def test_insert_find_one(self): @@ -486,12 +482,12 @@ def test_long(self): db = self.client.pymongo_test db.test.drop() db.test.insert_one({"x": 9223372036854775807}) - retrieved = db.test.find_one()['x'] # type: ignore + retrieved = db.test.find_one()["x"] # type: ignore self.assertEqual(Int64(9223372036854775807), retrieved) self.assertIsInstance(retrieved, Int64) db.test.delete_many({}) db.test.insert_one({"x": Int64(1)}) - retrieved = db.test.find_one()['x'] # type: ignore + retrieved = db.test.find_one()["x"] # type: ignore self.assertEqual(Int64(1), retrieved) self.assertIsInstance(retrieved, Int64) @@ -533,11 +529,10 @@ def test_command_response_without_ok(self): # Sometimes (SERVER-10891) the server's response to a badly-formatted # command document will have no 'ok' field. We should raise # OperationFailure instead of KeyError. - self.assertRaises(OperationFailure, - helpers._check_command_response, {}, None) + self.assertRaises(OperationFailure, helpers._check_command_response, {}, None) try: - helpers._check_command_response({'$err': 'foo'}, None) + helpers._check_command_response({"$err": "foo"}, None) except OperationFailure as e: self.assertEqual(e.args[0], "foo, full error: {'$err': 'foo'}") else: @@ -545,64 +540,59 @@ def test_command_response_without_ok(self): def test_mongos_response(self): error_document = { - 'ok': 0, - 'errmsg': 'outer', - 'raw': {'shard0/host0,host1': {'ok': 0, 'errmsg': 'inner'}}} + "ok": 0, + "errmsg": "outer", + "raw": {"shard0/host0,host1": {"ok": 0, "errmsg": "inner"}}, + } with self.assertRaises(OperationFailure) as context: helpers._check_command_response(error_document, None) - self.assertIn('inner', str(context.exception)) + self.assertIn("inner", str(context.exception)) # If a shard has no primary and you run a command like dbstats, which # cannot be run on a secondary, mongos's response includes empty "raw" # errors. See SERVER-15428. - error_document = { - 'ok': 0, - 'errmsg': 'outer', - 'raw': {'shard0/host0,host1': {}}} + error_document = {"ok": 0, "errmsg": "outer", "raw": {"shard0/host0,host1": {}}} with self.assertRaises(OperationFailure) as context: helpers._check_command_response(error_document, None) - self.assertIn('outer', str(context.exception)) + self.assertIn("outer", str(context.exception)) # Raw error has ok: 0 but no errmsg. Not a known case, but test it. - error_document = { - 'ok': 0, - 'errmsg': 'outer', - 'raw': {'shard0/host0,host1': {'ok': 0}}} + error_document = {"ok": 0, "errmsg": "outer", "raw": {"shard0/host0,host1": {"ok": 0}}} with self.assertRaises(OperationFailure) as context: helpers._check_command_response(error_document, None) - self.assertIn('outer', str(context.exception)) + self.assertIn("outer", str(context.exception)) @client_context.require_test_commands @client_context.require_no_mongos def test_command_max_time_ms(self): - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="alwaysOn") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: db = self.client.pymongo_test - db.command('count', 'test') - self.assertRaises(ExecutionTimeout, db.command, - 'count', 'test', maxTimeMS=1) - pipeline = [{'$project': {'name': 1, 'count': 1}}] + db.command("count", "test") + self.assertRaises(ExecutionTimeout, db.command, "count", "test", maxTimeMS=1) + pipeline = [{"$project": {"name": 1, "count": 1}}] # Database command helper. - db.command('aggregate', 'test', pipeline=pipeline, cursor={}) - self.assertRaises(ExecutionTimeout, db.command, - 'aggregate', 'test', - pipeline=pipeline, cursor={}, maxTimeMS=1) + db.command("aggregate", "test", pipeline=pipeline, cursor={}) + self.assertRaises( + ExecutionTimeout, + db.command, + "aggregate", + "test", + pipeline=pipeline, + cursor={}, + maxTimeMS=1, + ) # Collection helper. db.test.aggregate(pipeline=pipeline) - self.assertRaises(ExecutionTimeout, - db.test.aggregate, pipeline, maxTimeMS=1) + self.assertRaises(ExecutionTimeout, db.test.aggregate, pipeline, maxTimeMS=1) finally: - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="off") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") def test_with_options(self): codec_options = DECIMAL_CODECOPTS @@ -611,13 +601,22 @@ def test_with_options(self): read_concern = ReadConcern(level="majority") # List of all options to compare. - allopts = ['name', 'client', 'codec_options', - 'read_preference', 'write_concern', 'read_concern'] + allopts = [ + "name", + "client", + "codec_options", + "read_preference", + "write_concern", + "read_concern", + ] db1 = self.client.get_database( - 'with_options_test', codec_options=codec_options, - read_preference=read_preference, write_concern=write_concern, - read_concern=read_concern) + "with_options_test", + codec_options=codec_options, + read_preference=read_preference, + write_concern=write_concern, + read_concern=read_concern, + ) # Case 1: swap no options db2 = db1.with_options() @@ -625,22 +624,25 @@ def test_with_options(self): self.assertEqual(getattr(db1, opt), getattr(db2, opt)) # Case 2: swap all options - newopts = {'codec_options': CodecOptions(), - 'read_preference': ReadPreference.PRIMARY, - 'write_concern': WriteConcern(w=1), - 'read_concern': ReadConcern(level="local")} + newopts = { + "codec_options": CodecOptions(), + "read_preference": ReadPreference.PRIMARY, + "write_concern": WriteConcern(w=1), + "read_concern": ReadConcern(level="local"), + } db2 = db1.with_options(**newopts) # type: ignore[arg-type] for opt in newopts: - self.assertEqual( - getattr(db2, opt), newopts.get(opt, getattr(db1, opt))) + self.assertEqual(getattr(db2, opt), newopts.get(opt, getattr(db1, opt))) class TestDatabaseAggregation(IntegrationTest): def setUp(self): - self.pipeline: List[Mapping[str, Any]] = [{"$listLocalSessions": {}}, - {"$limit": 1}, - {"$addFields": {"dummy": "dummy field"}}, - {"$project": {"_id": 0, "dummy": 1}}] + self.pipeline: List[Mapping[str, Any]] = [ + {"$listLocalSessions": {}}, + {"$limit": 1}, + {"$addFields": {"dummy": "dummy field"}}, + {"$project": {"_id": 0, "dummy": 1}}, + ] self.result = {"dummy": "dummy field"} self.admin = self.client.admin @@ -660,8 +662,7 @@ def test_database_aggregation_fake_cursor(self): # SERVER-43287 disallows writing with $out to the admin db, use # $merge instead. db_name = "pymongo_test" - write_stage = { - "$merge": {"into": {"db": db_name, "coll": coll_name}}} + write_stage = {"$merge": {"into": {"db": db_name, "coll": coll_name}}} output_coll = self.client[db_name][coll_name] output_coll.drop() self.addCleanup(output_coll.drop) diff --git a/test/test_dbref.py b/test/test_dbref.py index 348b1d14de..8e98bd8ce5 100644 --- a/test/test_dbref.py +++ b/test/test_dbref.py @@ -17,14 +17,15 @@ import pickle import sys from typing import Any + sys.path[0:0] = [""] -from bson import encode, decode -from bson.dbref import DBRef -from bson.objectid import ObjectId +from copy import deepcopy from test import unittest -from copy import deepcopy +from bson import decode, encode +from bson.dbref import DBRef +from bson.objectid import ObjectId class TestDBRef(unittest.TestCase): @@ -57,53 +58,45 @@ def bar(): self.assertRaises(AttributeError, bar) def test_repr(self): - self.assertEqual(repr(DBRef("coll", - ObjectId("1234567890abcdef12345678"))), - "DBRef('coll', ObjectId('1234567890abcdef12345678'))") - self.assertEqual(repr(DBRef("coll", - ObjectId("1234567890abcdef12345678"))), - "DBRef(%s, ObjectId('1234567890abcdef12345678'))" - % (repr('coll'),) - ) - self.assertEqual(repr(DBRef("coll", 5, foo="bar")), - "DBRef('coll', 5, foo='bar')") - self.assertEqual(repr(DBRef("coll", - ObjectId("1234567890abcdef12345678"), "foo")), - "DBRef('coll', ObjectId('1234567890abcdef12345678'), " - "'foo')") + self.assertEqual( + repr(DBRef("coll", ObjectId("1234567890abcdef12345678"))), + "DBRef('coll', ObjectId('1234567890abcdef12345678'))", + ) + self.assertEqual( + repr(DBRef("coll", ObjectId("1234567890abcdef12345678"))), + "DBRef(%s, ObjectId('1234567890abcdef12345678'))" % (repr("coll"),), + ) + self.assertEqual(repr(DBRef("coll", 5, foo="bar")), "DBRef('coll', 5, foo='bar')") + self.assertEqual( + repr(DBRef("coll", ObjectId("1234567890abcdef12345678"), "foo")), + "DBRef('coll', ObjectId('1234567890abcdef12345678'), " "'foo')", + ) def test_equality(self): obj_id = ObjectId("1234567890abcdef12345678") - self.assertEqual(DBRef('foo', 5), DBRef('foo', 5)) + self.assertEqual(DBRef("foo", 5), DBRef("foo", 5)) self.assertEqual(DBRef("coll", obj_id), DBRef("coll", obj_id)) - self.assertNotEqual(DBRef("coll", obj_id), - DBRef("coll", obj_id, "foo")) + self.assertNotEqual(DBRef("coll", obj_id), DBRef("coll", obj_id, "foo")) self.assertNotEqual(DBRef("coll", obj_id), DBRef("col", obj_id)) - self.assertNotEqual(DBRef("coll", obj_id), - DBRef("coll", ObjectId(b"123456789011"))) + self.assertNotEqual(DBRef("coll", obj_id), DBRef("coll", ObjectId(b"123456789011"))) self.assertNotEqual(DBRef("coll", obj_id), 4) - self.assertNotEqual(DBRef("coll", obj_id, "foo"), - DBRef("coll", obj_id, "bar")) + self.assertNotEqual(DBRef("coll", obj_id, "foo"), DBRef("coll", obj_id, "bar")) # Explicitly test inequality - self.assertFalse(DBRef('foo', 5) != DBRef('foo', 5)) + self.assertFalse(DBRef("foo", 5) != DBRef("foo", 5)) self.assertFalse(DBRef("coll", obj_id) != DBRef("coll", obj_id)) - self.assertFalse(DBRef("coll", obj_id, "foo") != - DBRef("coll", obj_id, "foo")) + self.assertFalse(DBRef("coll", obj_id, "foo") != DBRef("coll", obj_id, "foo")) def test_kwargs(self): - self.assertEqual(DBRef("coll", 5, foo="bar"), - DBRef("coll", 5, foo="bar")) + self.assertEqual(DBRef("coll", 5, foo="bar"), DBRef("coll", 5, foo="bar")) self.assertNotEqual(DBRef("coll", 5, foo="bar"), DBRef("coll", 5)) - self.assertNotEqual(DBRef("coll", 5, foo="bar"), - DBRef("coll", 5, foo="baz")) + self.assertNotEqual(DBRef("coll", 5, foo="bar"), DBRef("coll", 5, foo="baz")) self.assertEqual("bar", DBRef("coll", 5, foo="bar").foo) - self.assertRaises(AttributeError, getattr, - DBRef("coll", 5, foo="bar"), "bar") + self.assertRaises(AttributeError, getattr, DBRef("coll", 5, foo="bar"), "bar") def test_deepcopy(self): - a = DBRef('coll', 'asdf', 'db', x=[1]) + a = DBRef("coll", "asdf", "db", x=[1]) b = deepcopy(a) self.assertEqual(a, b) @@ -116,19 +109,19 @@ def test_deepcopy(self): self.assertEqual(b.x, [2]) def test_pickling(self): - dbr = DBRef('coll', 5, foo='bar') + dbr = DBRef("coll", 5, foo="bar") for protocol in [0, 1, 2, -1]: pkl = pickle.dumps(dbr, protocol=protocol) dbr2 = pickle.loads(pkl) self.assertEqual(dbr, dbr2) def test_dbref_hash(self): - dbref_1a = DBRef('collection', 'id', 'database') - dbref_1b = DBRef('collection', 'id', 'database') + dbref_1a = DBRef("collection", "id", "database") + dbref_1b = DBRef("collection", "id", "database") self.assertEqual(hash(dbref_1a), hash(dbref_1b)) - dbref_2a = DBRef('collection', 'id', 'database', custom='custom') - dbref_2b = DBRef('collection', 'id', 'database', custom='custom') + dbref_2a = DBRef("collection", "id", "database", custom="custom") + dbref_2b = DBRef("collection", "id", "database", custom="custom") self.assertEqual(hash(dbref_2a), hash(dbref_2b)) self.assertNotEqual(hash(dbref_1a), hash(dbref_2a)) @@ -158,12 +151,12 @@ def test_decoding_1_2_3(self): {"foo": 1, "$ref": "coll0", "$id": 1, "$db": "db0", "bar": 1}, ]: with self.subTest(doc=doc): - decoded = decode(encode({'dbref': doc})) - dbref = decoded['dbref'] + decoded = decode(encode({"dbref": doc})) + dbref = decoded["dbref"] self.assertIsInstance(dbref, DBRef) - self.assertEqual(dbref.collection, doc['$ref']) - self.assertEqual(dbref.id, doc['$id']) - self.assertEqual(dbref.database, doc.get('$db')) + self.assertEqual(dbref.collection, doc["$ref"]) + self.assertEqual(dbref.id, doc["$id"]) + self.assertEqual(dbref.database, doc.get("$db")) for extra in set(doc.keys()) - {"$ref", "$id", "$db"}: self.assertEqual(getattr(dbref, extra), doc[extra]) @@ -180,8 +173,8 @@ def test_decoding_4_5(self): {"$ref": "coll0", "$id": 1, "$db": 1}, ]: with self.subTest(doc=doc): - decoded = decode(encode({'dbref': doc})) - dbref = decoded['dbref'] + decoded = decode(encode({"dbref": doc})) + dbref = decoded["dbref"] self.assertIsInstance(dbref, dict) def test_encoding_1_2(self): @@ -201,9 +194,9 @@ def test_encoding_1_2(self): ]: with self.subTest(doc=doc): # Decode the test input to a DBRef via a BSON roundtrip. - encoded_doc = encode({'dbref': doc}) + encoded_doc = encode({"dbref": doc}) decoded = decode(encoded_doc) - dbref = decoded['dbref'] + dbref = decoded["dbref"] self.assertIsInstance(dbref, DBRef) # Encode the DBRef. encoded_dbref = encode(decoded) @@ -224,9 +217,9 @@ def test_encoding_3(self): ]: with self.subTest(doc=doc): # Decode the test input to a DBRef via a BSON roundtrip. - encoded_doc = encode({'dbref': doc}) + encoded_doc = encode({"dbref": doc}) decoded = decode(encoded_doc) - dbref = decoded['dbref'] + dbref = decoded["dbref"] self.assertIsInstance(dbref, DBRef) # Encode the DBRef. encoded_dbref = encode(decoded) diff --git a/test/test_decimal128.py b/test/test_decimal128.py index 3988a4559a..b46f94f594 100644 --- a/test/test_decimal128.py +++ b/test/test_decimal128.py @@ -16,41 +16,39 @@ import pickle import sys - from decimal import Decimal sys.path[0:0] = [""] -from bson.decimal128 import Decimal128, create_decimal128_context from test import client_context, unittest -class TestDecimal128(unittest.TestCase): +from bson.decimal128 import Decimal128, create_decimal128_context + +class TestDecimal128(unittest.TestCase): @client_context.require_connection def test_round_trip(self): coll = client_context.client.pymongo_test.test coll.drop() - dec128 = Decimal128.from_bid( - b'\x00@cR\xbf\xc6\x01\x00\x00\x00\x00\x00\x00\x00\x1c0') - coll.insert_one({'dec128': dec128}) - doc = coll.find_one({'dec128': dec128}) + dec128 = Decimal128.from_bid(b"\x00@cR\xbf\xc6\x01\x00\x00\x00\x00\x00\x00\x00\x1c0") + coll.insert_one({"dec128": dec128}) + doc = coll.find_one({"dec128": dec128}) assert doc is not None self.assertIsNotNone(doc) - self.assertEqual(doc['dec128'], dec128) + self.assertEqual(doc["dec128"], dec128) def test_pickle(self): - dec128 = Decimal128.from_bid( - b'\x00@cR\xbf\xc6\x01\x00\x00\x00\x00\x00\x00\x00\x1c0') + dec128 = Decimal128.from_bid(b"\x00@cR\xbf\xc6\x01\x00\x00\x00\x00\x00\x00\x00\x1c0") for protocol in range(pickle.HIGHEST_PROTOCOL + 1): pkl = pickle.dumps(dec128, protocol=protocol) self.assertEqual(dec128, pickle.loads(pkl)) def test_special(self): - dnan = Decimal('NaN') - dnnan = Decimal('-NaN') - dsnan = Decimal('sNaN') - dnsnan = Decimal('-sNaN') + dnan = Decimal("NaN") + dnnan = Decimal("-NaN") + dsnan = Decimal("sNaN") + dnsnan = Decimal("-sNaN") dnan128 = Decimal128(dnan) dnnan128 = Decimal128(dnnan) dsnan128 = Decimal128(dsnan) @@ -70,5 +68,5 @@ def test_decimal128_context(self): self.assertEqual("0E-6176", str(ctx.copy().create_decimal("1E-6177"))) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index c3a50709ac..51b168b0a0 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -21,40 +21,41 @@ sys.path[0:0] = [""] -from bson import json_util, Timestamp -from pymongo import (common, - monitoring) -from pymongo.errors import (AutoReconnect, - ConfigurationError, - NetworkTimeout, - NotPrimaryError, - OperationFailure) -from pymongo.helpers import (_check_command_response, - _check_write_command_response) +from test import IntegrationTest, unittest +from test.pymongo_mocks import DummyMonitor +from test.utils import ( + CMAPListener, + HeartbeatEventListener, + TestCreator, + assertion_context, + client_context, + get_pool, + rs_or_single_client, + server_name_to_type, + single_client, + wait_until, +) +from test.utils_spec_runner import SpecRunner, SpecRunnerThread + +from bson import Timestamp, json_util +from pymongo import common, monitoring +from pymongo.errors import ( + AutoReconnect, + ConfigurationError, + NetworkTimeout, + NotPrimaryError, + OperationFailure, +) from pymongo.hello import Hello, HelloCompat -from pymongo.server_description import ServerDescription, SERVER_TYPE +from pymongo.helpers import _check_command_response, _check_write_command_response +from pymongo.server_description import SERVER_TYPE, ServerDescription from pymongo.settings import TopologySettings from pymongo.topology import Topology, _ErrorContext from pymongo.topology_description import TOPOLOGY_TYPE from pymongo.uri_parser import parse_uri -from test import unittest, IntegrationTest -from test.utils import (assertion_context, - CMAPListener, - client_context, - get_pool, - HeartbeatEventListener, - server_name_to_type, - rs_or_single_client, - single_client, - TestCreator, - wait_until) -from test.utils_spec_runner import SpecRunner, SpecRunnerThread -from test.pymongo_mocks import DummyMonitor - # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'discovery_and_monitoring') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "discovery_and_monitoring") def create_mock_topology(uri, monitor_class=DummyMonitor): @@ -62,19 +63,20 @@ def create_mock_topology(uri, monitor_class=DummyMonitor): replica_set_name = None direct_connection = None load_balanced = None - if 'replicaset' in parsed_uri['options']: - replica_set_name = parsed_uri['options']['replicaset'] - if 'directConnection' in parsed_uri['options']: - direct_connection = parsed_uri['options']['directConnection'] - if 'loadBalanced' in parsed_uri['options']: - load_balanced = parsed_uri['options']['loadBalanced'] + if "replicaset" in parsed_uri["options"]: + replica_set_name = parsed_uri["options"]["replicaset"] + if "directConnection" in parsed_uri["options"]: + direct_connection = parsed_uri["options"]["directConnection"] + if "loadBalanced" in parsed_uri["options"]: + load_balanced = parsed_uri["options"]["loadBalanced"] topology_settings = TopologySettings( - parsed_uri['nodelist'], + parsed_uri["nodelist"], replica_set_name=replica_set_name, monitor_class=monitor_class, direct_connection=direct_connection, - load_balanced=load_balanced) + load_balanced=load_balanced, + ) c = Topology(topology_settings) c.open() @@ -82,43 +84,42 @@ def create_mock_topology(uri, monitor_class=DummyMonitor): def got_hello(topology, server_address, hello_response): - server_description = ServerDescription( - server_address, Hello(hello_response), 0) + server_description = ServerDescription(server_address, Hello(hello_response), 0) topology.on_change(server_description) def got_app_error(topology, app_error): - server_address = common.partition_node(app_error['address']) + server_address = common.partition_node(app_error["address"]) server = topology.get_server_by_address(server_address) - error_type = app_error['type'] - generation = app_error.get( - 'generation', server.pool.gen.get_overall()) - when = app_error['when'] - max_wire_version = app_error['maxWireVersion'] + error_type = app_error["type"] + generation = app_error.get("generation", server.pool.gen.get_overall()) + when = app_error["when"] + max_wire_version = app_error["maxWireVersion"] # XXX: We could get better test coverage by mocking the errors on the # Pool/SocketInfo. try: - if error_type == 'command': - _check_command_response(app_error['response'], max_wire_version) - _check_write_command_response(app_error['response']) - elif error_type == 'network': - raise AutoReconnect('mock non-timeout network error') - elif error_type == 'timeout': - raise NetworkTimeout('mock network timeout error') + if error_type == "command": + _check_command_response(app_error["response"], max_wire_version) + _check_write_command_response(app_error["response"]) + elif error_type == "network": + raise AutoReconnect("mock non-timeout network error") + elif error_type == "timeout": + raise NetworkTimeout("mock network timeout error") else: - raise AssertionError('unknown error type: %s' % (error_type,)) + raise AssertionError("unknown error type: %s" % (error_type,)) assert False except (AutoReconnect, NotPrimaryError, OperationFailure) as e: - if when == 'beforeHandshakeCompletes': + if when == "beforeHandshakeCompletes": completed_handshake = False - elif when == 'afterHandshakeCompletes': + elif when == "afterHandshakeCompletes": completed_handshake = True else: - assert False, 'Unknown when field %s' % (when,) + assert False, "Unknown when field %s" % (when,) topology.handle_error( - server_address, _ErrorContext(e, max_wire_version, generation, - completed_handshake, None)) + server_address, + _ErrorContext(e, max_wire_version, generation, completed_handshake, None), + ) def get_type(topology, hostname): @@ -139,14 +140,12 @@ def server_type_name(server_type): def check_outcome(self, topology, outcome): - expected_servers = outcome['servers'] + expected_servers = outcome["servers"] # Check weak equality before proceeding. - self.assertEqual( - len(topology.description.server_descriptions()), - len(expected_servers)) + self.assertEqual(len(topology.description.server_descriptions()), len(expected_servers)) - if outcome.get('compatible') is False: + if outcome.get("compatible") is False: with self.assertRaises(ConfigurationError): topology.description.check_compatible() else: @@ -160,64 +159,58 @@ def check_outcome(self, topology, outcome): self.assertTrue(topology.has_server(node)) actual_server = topology.get_server_by_address(node) actual_server_description = actual_server.description - expected_server_type = server_name_to_type(expected_server['type']) + expected_server_type = server_name_to_type(expected_server["type"]) self.assertEqual( server_type_name(expected_server_type), - server_type_name(actual_server_description.server_type)) + server_type_name(actual_server_description.server_type), + ) - self.assertEqual( - expected_server.get('setName'), - actual_server_description.replica_set_name) + self.assertEqual(expected_server.get("setName"), actual_server_description.replica_set_name) - self.assertEqual( - expected_server.get('setVersion'), - actual_server_description.set_version) + self.assertEqual(expected_server.get("setVersion"), actual_server_description.set_version) - self.assertEqual( - expected_server.get('electionId'), - actual_server_description.election_id) + self.assertEqual(expected_server.get("electionId"), actual_server_description.election_id) self.assertEqual( - expected_server.get('topologyVersion'), - actual_server_description.topology_version) + expected_server.get("topologyVersion"), actual_server_description.topology_version + ) - expected_pool = expected_server.get('pool') + expected_pool = expected_server.get("pool") if expected_pool: - self.assertEqual( - expected_pool.get('generation'), - actual_server.pool.gen.get_overall()) + self.assertEqual(expected_pool.get("generation"), actual_server.pool.gen.get_overall()) - self.assertEqual(outcome['setName'], topology.description.replica_set_name) - self.assertEqual(outcome.get('logicalSessionTimeoutMinutes'), - topology.description.logical_session_timeout_minutes) + self.assertEqual(outcome["setName"], topology.description.replica_set_name) + self.assertEqual( + outcome.get("logicalSessionTimeoutMinutes"), + topology.description.logical_session_timeout_minutes, + ) - expected_topology_type = getattr(TOPOLOGY_TYPE, outcome['topologyType']) - self.assertEqual(topology_type_name(expected_topology_type), - topology_type_name(topology.description.topology_type)) + expected_topology_type = getattr(TOPOLOGY_TYPE, outcome["topologyType"]) + self.assertEqual( + topology_type_name(expected_topology_type), + topology_type_name(topology.description.topology_type), + ) - self.assertEqual(outcome.get('maxSetVersion'), - topology.description.max_set_version) - self.assertEqual(outcome.get('maxElectionId'), - topology.description.max_election_id) + self.assertEqual(outcome.get("maxSetVersion"), topology.description.max_set_version) + self.assertEqual(outcome.get("maxElectionId"), topology.description.max_election_id) def create_test(scenario_def): def run_scenario(self): - c = create_mock_topology(scenario_def['uri']) + c = create_mock_topology(scenario_def["uri"]) - for i, phase in enumerate(scenario_def['phases']): + for i, phase in enumerate(scenario_def["phases"]): # Including the phase description makes failures easier to debug. - description = phase.get('description', str(i)) - with assertion_context('phase: %s' % (description,)): - for response in phase.get('responses', []): - got_hello( - c, common.partition_node(response[0]), response[1]) + description = phase.get("description", str(i)) + with assertion_context("phase: %s" % (description,)): + for response in phase.get("responses", []): + got_hello(c, common.partition_node(response[0]), response[1]) - for app_error in phase.get('applicationErrors', []): + for app_error in phase.get("applicationErrors", []): got_app_error(c, app_error) - check_outcome(self, c, phase['outcome']) + check_outcome(self, c, phase["outcome"]) return run_scenario @@ -232,8 +225,7 @@ def create_tests(): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = 'test_%s_%s' % ( - dirname, os.path.splitext(filename)[0]) + test_name = "test_%s_%s" % (dirname, os.path.splitext(filename)[0]) new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) @@ -244,17 +236,16 @@ def create_tests(): class TestClusterTimeComparison(unittest.TestCase): def test_cluster_time_comparison(self): - t = create_mock_topology('mongodb://host') + t = create_mock_topology("mongodb://host") def send_cluster_time(time, inc, should_update): old = t.max_cluster_time() - new = {'clusterTime': Timestamp(time, inc)} - got_hello(t, - ('host', 27017), - {'ok': 1, - 'minWireVersion': 0, - 'maxWireVersion': 6, - '$clusterTime': new}) + new = {"clusterTime": Timestamp(time, inc)} + got_hello( + t, + ("host", 27017), + {"ok": 1, "minWireVersion": 0, "maxWireVersion": 6, "$clusterTime": new}, + ) actual = t.max_cluster_time() if should_update: @@ -270,7 +261,6 @@ def send_cluster_time(time, inc, should_update): class TestIgnoreStaleErrors(IntegrationTest): - def test_ignore_stale_connection_errors(self): N_THREADS = 5 barrier = threading.Barrier(N_THREADS, timeout=30) @@ -278,22 +268,22 @@ def test_ignore_stale_connection_errors(self): self.addCleanup(client.close) # Wait for initial discovery. - client.admin.command('ping') + client.admin.command("ping") pool = get_pool(client) starting_generation = pool.gen.get_overall() - wait_until(lambda: len(pool.sockets) == N_THREADS, 'created sockets') + wait_until(lambda: len(pool.sockets) == N_THREADS, "created sockets") def mock_command(*args, **kwargs): # Synchronize all threads to ensure they use the same generation. barrier.wait() - raise AutoReconnect('mock SocketInfo.command error') + raise AutoReconnect("mock SocketInfo.command error") for sock in pool.sockets: sock.command = mock_command def insert_command(i): try: - client.test.command('insert', 'test', documents=[{'i': i}]) + client.test.command("insert", "test", documents=[{"i": i}]) except AutoReconnect as exc: pass @@ -306,11 +296,10 @@ def insert_command(i): t.join() # Expect a single pool reset for the network error - self.assertEqual( - starting_generation+1, pool.gen.get_overall()) + self.assertEqual(starting_generation + 1, pool.gen.get_overall()) # Server should be selectable. - client.admin.command('ping') + client.admin.command("ping") class CMAPHeartbeatListener(HeartbeatEventListener, CMAPListener): @@ -322,52 +311,52 @@ class TestPoolManagement(IntegrationTest): def test_pool_unpause(self): # This test implements the prose test "Connection Pool Management" listener = CMAPHeartbeatListener() - client = single_client(appName="SDAMPoolManagementTest", - heartbeatFrequencyMS=500, - event_listeners=[listener]) + client = single_client( + appName="SDAMPoolManagementTest", heartbeatFrequencyMS=500, event_listeners=[listener] + ) self.addCleanup(client.close) # Assert that ConnectionPoolReadyEvent occurs after the first # ServerHeartbeatSucceededEvent. listener.wait_for_event(monitoring.PoolReadyEvent, 1) pool_ready = listener.events_by_type(monitoring.PoolReadyEvent)[0] - hb_succeeded = listener.events_by_type( - monitoring.ServerHeartbeatSucceededEvent)[0] - self.assertGreater( - listener.events.index(pool_ready), - listener.events.index(hb_succeeded)) + hb_succeeded = listener.events_by_type(monitoring.ServerHeartbeatSucceededEvent)[0] + self.assertGreater(listener.events.index(pool_ready), listener.events.index(hb_succeeded)) listener.reset() fail_hello = { - 'mode': {'times': 2}, - 'data': { - 'failCommands': [HelloCompat.LEGACY_CMD, 'hello'], - 'errorCode': 1234, - 'appName': 'SDAMPoolManagementTest', + "mode": {"times": 2}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 1234, + "appName": "SDAMPoolManagementTest", }, } with self.fail_point(fail_hello): listener.wait_for_event(monitoring.ServerHeartbeatFailedEvent, 1) listener.wait_for_event(monitoring.PoolClearedEvent, 1) - listener.wait_for_event( - monitoring.ServerHeartbeatSucceededEvent, 1) + listener.wait_for_event(monitoring.ServerHeartbeatSucceededEvent, 1) listener.wait_for_event(monitoring.PoolReadyEvent, 1) class TestIntegration(SpecRunner): # Location of JSON test specifications. TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'discovery_and_monitoring_integration') + os.path.dirname(os.path.realpath(__file__)), "discovery_and_monitoring_integration" + ) def _event_count(self, event): - if event == 'ServerMarkedUnknownEvent': + if event == "ServerMarkedUnknownEvent": + def marked_unknown(e): - return (isinstance(e, monitoring.ServerDescriptionChangedEvent) - and not e.new_description.is_server_type_known) + return ( + isinstance(e, monitoring.ServerDescriptionChangedEvent) + and not e.new_description.is_server_type_known + ) + assert self.server_listener is not None return len(self.server_listener.matching(marked_unknown)) # Only support CMAP events for now. - self.assertTrue(event.startswith('Pool') or event.startswith('Conn')) + self.assertTrue(event.startswith("Pool") or event.startswith("Conn")) event_type = getattr(monitoring, event) assert self.pool_listener is not None return self.pool_listener.event_count(event_type) @@ -377,50 +366,48 @@ def assert_event_count(self, event, count): Assert the given event was published exactly `count` times. """ - self.assertEqual(self._event_count(event), count, - 'expected %s not %r' % (count, event)) + self.assertEqual(self._event_count(event), count, "expected %s not %r" % (count, event)) def wait_for_event(self, event, count): """Run the waitForEvent test operation. Wait for a number of events to be published, or fail. """ - wait_until(lambda: self._event_count(event) >= count, - 'find %s %s event(s)' % (count, event)) + wait_until( + lambda: self._event_count(event) >= count, "find %s %s event(s)" % (count, event) + ) def configure_fail_point(self, fail_point): - """Run the configureFailPoint test operation. - """ + """Run the configureFailPoint test operation.""" self.set_fail_point(fail_point) - self.addCleanup(self.set_fail_point, { - 'configureFailPoint': fail_point['configureFailPoint'], - 'mode': 'off'}) + self.addCleanup( + self.set_fail_point, + {"configureFailPoint": fail_point["configureFailPoint"], "mode": "off"}, + ) def run_admin_command(self, command, **kwargs): - """Run the runAdminCommand test operation. - """ + """Run the runAdminCommand test operation.""" self.client.admin.command(command, **kwargs) def record_primary(self): - """Run the recordPrimary test operation. - """ + """Run the recordPrimary test operation.""" self._previous_primary = self.scenario_client.primary def wait_for_primary_change(self, timeout_ms): - """Run the waitForPrimaryChange test operation. - """ + """Run the waitForPrimaryChange test operation.""" + def primary_changed(): primary = self.scenario_client.primary if primary is None: return False return primary != self._previous_primary - timeout = timeout_ms/1000.0 - wait_until(primary_changed, 'change primary', timeout=timeout) + + timeout = timeout_ms / 1000.0 + wait_until(primary_changed, "change primary", timeout=timeout) def wait(self, ms): - """Run the "wait" test operation. - """ - time.sleep(ms/1000.0) + """Run the "wait" test operation.""" + time.sleep(ms / 1000.0) def start_thread(self, name): """Run the 'startThread' thread operation.""" @@ -431,8 +418,7 @@ def start_thread(self, name): def run_on_thread(self, sessions, collection, name, operation): """Run the 'runOnThread' operation.""" thread = self.targets[name] - thread.schedule(lambda: self._run_op( - sessions, collection, operation, False)) + thread.schedule(lambda: self._run_op(sessions, collection, operation, False)) def wait_for_thread(self, name): """Run the 'waitForThread' operation.""" @@ -441,8 +427,7 @@ def wait_for_thread(self, name): thread.join(60) if thread.exc: raise thread.exc - self.assertFalse( - thread.is_alive(), 'Thread %s is still running' % (name,)) + self.assertFalse(thread.is_alive(), "Thread %s is still running" % (name,)) def create_spec_test(scenario_def, test, name): diff --git a/test/test_dns.py b/test/test_dns.py index 8404c2aa69..d47e115f41 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -21,18 +21,20 @@ sys.path[0:0] = [""] +from test import client_context, unittest +from test.utils import wait_until + from pymongo.common import validate_read_preference_tags -from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.errors import ConfigurationError from pymongo.mongo_client import MongoClient +from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.uri_parser import parse_uri, split_hosts -from test import client_context, unittest -from test.utils import wait_until class TestDNSRepl(unittest.TestCase): - TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'srv_seedlist', 'replica-set') + TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "replica-set" + ) load_balanced = False @client_context.require_replica_set @@ -41,8 +43,9 @@ def setUp(self): class TestDNSLoadBalanced(unittest.TestCase): - TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'srv_seedlist', 'load-balanced') + TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "load-balanced" + ) load_balanced = True @client_context.require_load_balancer @@ -51,8 +54,7 @@ def setUp(self): class TestDNSSharded(unittest.TestCase): - TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'srv_seedlist', 'sharded') + TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "sharded") load_balanced = False @client_context.require_mongos @@ -61,77 +63,74 @@ def setUp(self): def create_test(test_case): - def run_test(self): if not _HAVE_DNSPYTHON: raise unittest.SkipTest("DNS tests require the dnspython module") - uri = test_case['uri'] - seeds = test_case.get('seeds') - num_seeds = test_case.get('numSeeds', len(seeds or [])) - hosts = test_case.get('hosts') + uri = test_case["uri"] + seeds = test_case.get("seeds") + num_seeds = test_case.get("numSeeds", len(seeds or [])) + hosts = test_case.get("hosts") num_hosts = test_case.get("numHosts", len(hosts or [])) - options = test_case.get('options', {}) - if 'ssl' in options: - options['tls'] = options.pop('ssl') - parsed_options = test_case.get('parsed_options') + options = test_case.get("options", {}) + if "ssl" in options: + options["tls"] = options.pop("ssl") + parsed_options = test_case.get("parsed_options") # See DRIVERS-1324, unless tls is explicitly set to False we need TLS. - needs_tls = not (options and (options.get('ssl') == False or - options.get('tls') == False)) + needs_tls = not (options and (options.get("ssl") == False or options.get("tls") == False)) if needs_tls and not client_context.tls: - self.skipTest('this test requires a TLS cluster') + self.skipTest("this test requires a TLS cluster") if not needs_tls and client_context.tls: - self.skipTest('this test requires a non-TLS cluster') + self.skipTest("this test requires a non-TLS cluster") if seeds: - seeds = split_hosts(','.join(seeds)) + seeds = split_hosts(",".join(seeds)) if hosts: - hosts = frozenset(split_hosts(','.join(hosts))) + hosts = frozenset(split_hosts(",".join(hosts))) if seeds or num_seeds: result = parse_uri(uri, validate=True) if seeds is not None: - self.assertEqual(sorted(result['nodelist']), sorted(seeds)) + self.assertEqual(sorted(result["nodelist"]), sorted(seeds)) if num_seeds is not None: - self.assertEqual(len(result['nodelist']), num_seeds) + self.assertEqual(len(result["nodelist"]), num_seeds) if options: - opts = result['options'] - if 'readpreferencetags' in opts: + opts = result["options"] + if "readpreferencetags" in opts: rpts = validate_read_preference_tags( - 'readPreferenceTags', opts.pop('readpreferencetags')) - opts['readPreferenceTags'] = rpts - self.assertEqual(result['options'], options) + "readPreferenceTags", opts.pop("readpreferencetags") + ) + opts["readPreferenceTags"] = rpts + self.assertEqual(result["options"], options) if parsed_options: for opt, expected in parsed_options.items(): - if opt == 'user': - self.assertEqual(result['username'], expected) - elif opt == 'password': - self.assertEqual(result['password'], expected) - elif opt == 'auth_database' or opt == 'db': - self.assertEqual(result['database'], expected) + if opt == "user": + self.assertEqual(result["username"], expected) + elif opt == "password": + self.assertEqual(result["password"], expected) + elif opt == "auth_database" or opt == "db": + self.assertEqual(result["database"], expected) hostname = next(iter(client_context.client.nodes))[0] # The replica set members must be configured as 'localhost'. - if hostname == 'localhost': + if hostname == "localhost": copts = client_context.default_client_options.copy() # Remove tls since SRV parsing should add it automatically. - copts.pop('tls', None) + copts.pop("tls", None) if client_context.tls: # Our test certs don't support the SRV hosts used in these # tests. - copts['tlsAllowInvalidHostnames'] = True + copts["tlsAllowInvalidHostnames"] = True client = MongoClient(uri, **copts) if num_seeds is not None: - self.assertEqual(len(client._topology_settings.seeds), - num_seeds) + self.assertEqual(len(client._topology_settings.seeds), num_seeds) if hosts is not None: - wait_until( - lambda: hosts == client.nodes, - 'match test hosts to client nodes') + wait_until(lambda: hosts == client.nodes, "match test hosts to client nodes") if num_hosts is not None: - wait_until(lambda: num_hosts == len(client.nodes), - "wait to connect to num_hosts") + wait_until( + lambda: num_hosts == len(client.nodes), "wait to connect to num_hosts" + ) # XXX: we should block until SRV poller runs at least once # and re-run these assertions. else: @@ -146,11 +145,11 @@ def run_test(self): def create_tests(cls): - for filename in glob.glob(os.path.join(cls.TEST_PATH, '*.json')): + for filename in glob.glob(os.path.join(cls.TEST_PATH, "*.json")): test_suffix, _ = os.path.splitext(os.path.basename(filename)) with open(filename) as dns_test_file: test_method = create_test(json.load(dns_test_file)) - setattr(cls, 'test_' + test_suffix, test_method) + setattr(cls, "test_" + test_suffix, test_method) create_tests(TestDNSRepl) @@ -159,26 +158,33 @@ def create_tests(cls): class TestParsingErrors(unittest.TestCase): - @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS tests require the dnspython module") def test_invalid_host(self): self.assertRaisesRegex( ConfigurationError, "Invalid URI host: mongodb is not", - MongoClient, "mongodb+srv://mongodb") + MongoClient, + "mongodb+srv://mongodb", + ) self.assertRaisesRegex( ConfigurationError, "Invalid URI host: mongodb.com is not", - MongoClient, "mongodb+srv://mongodb.com") + MongoClient, + "mongodb+srv://mongodb.com", + ) self.assertRaisesRegex( ConfigurationError, "Invalid URI host: an IP address is not", - MongoClient, "mongodb+srv://127.0.0.1") + MongoClient, + "mongodb+srv://127.0.0.1", + ) self.assertRaisesRegex( ConfigurationError, "Invalid URI host: an IP address is not", - MongoClient, "mongodb+srv://[::1]") + MongoClient, + "mongodb+srv://[::1]", + ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_encryption.py b/test/test_encryption.py index 966d9b5815..31c3dd2bcd 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -18,150 +18,147 @@ import copy import os import re -import ssl import socket +import ssl import sys import textwrap import traceback import uuid - from typing import Any from pymongo.collection import Collection sys.path[0:0] = [""] +from test import ( + CA_PEM, + CLIENT_PEM, + IntegrationTest, + PyMongoTestCase, + client_context, + unittest, +) +from test.test_bulk import BulkTestBase +from test.utils import ( + AllowListEventListener, + OvertCommandListener, + TestCreator, + TopologyEventListener, + camel_to_snake_args, + rs_or_single_client, + wait_until, +) +from test.utils_spec_runner import SpecRunner + from bson import encode, json_util -from bson.binary import (Binary, - UuidRepresentation, - JAVA_LEGACY, - STANDARD, - UUID_SUBTYPE) +from bson.binary import JAVA_LEGACY, STANDARD, UUID_SUBTYPE, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.errors import BSONError from bson.json_util import JSONOptions from bson.son import SON - from pymongo import encryption from pymongo.cursor import CursorType -from pymongo.encryption import (Algorithm, - ClientEncryption) -from pymongo.encryption_options import AutoEncryptionOpts, _HAVE_PYMONGOCRYPT -from pymongo.errors import (BulkWriteError, - ConfigurationError, - EncryptionError, - InvalidOperation, - OperationFailure, - ServerSelectionTimeoutError, - WriteError) +from pymongo.encryption import Algorithm, ClientEncryption +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + EncryptionError, + InvalidOperation, + OperationFailure, + ServerSelectionTimeoutError, + WriteError, +) from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne, ReplaceOne, UpdateOne from pymongo.write_concern import WriteConcern -from test import (unittest, CA_PEM, CLIENT_PEM, - client_context, - IntegrationTest, - PyMongoTestCase) -from test.test_bulk import BulkTestBase -from test.utils import (TestCreator, - camel_to_snake_args, - OvertCommandListener, - TopologyEventListener, - AllowListEventListener, - rs_or_single_client, - wait_until) -from test.utils_spec_runner import SpecRunner - def get_client_opts(client): return client._MongoClient__options -KMS_PROVIDERS = {'local': {'key': b'\x00'*96}} +KMS_PROVIDERS = {"local": {"key": b"\x00" * 96}} class TestAutoEncryptionOpts(PyMongoTestCase): - @unittest.skipIf(_HAVE_PYMONGOCRYPT, 'pymongocrypt is installed') + @unittest.skipIf(_HAVE_PYMONGOCRYPT, "pymongocrypt is installed") def test_init_requires_pymongocrypt(self): with self.assertRaises(ConfigurationError): - AutoEncryptionOpts({}, 'keyvault.datakeys') + AutoEncryptionOpts({}, "keyvault.datakeys") - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_init(self): - opts = AutoEncryptionOpts({}, 'keyvault.datakeys') + opts = AutoEncryptionOpts({}, "keyvault.datakeys") self.assertEqual(opts._kms_providers, {}) - self.assertEqual(opts._key_vault_namespace, 'keyvault.datakeys') + self.assertEqual(opts._key_vault_namespace, "keyvault.datakeys") self.assertEqual(opts._key_vault_client, None) self.assertEqual(opts._schema_map, None) self.assertEqual(opts._bypass_auto_encryption, False) - self.assertEqual(opts._mongocryptd_uri, 'mongodb://localhost:27020') + self.assertEqual(opts._mongocryptd_uri, "mongodb://localhost:27020") self.assertEqual(opts._mongocryptd_bypass_spawn, False) - self.assertEqual(opts._mongocryptd_spawn_path, 'mongocryptd') - self.assertEqual( - opts._mongocryptd_spawn_args, ['--idleShutdownTimeoutSecs=60']) + self.assertEqual(opts._mongocryptd_spawn_path, "mongocryptd") + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=60"]) self.assertEqual(opts._kms_ssl_contexts, {}) - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_init_spawn_args(self): # User can override idleShutdownTimeoutSecs opts = AutoEncryptionOpts( - {}, 'keyvault.datakeys', - mongocryptd_spawn_args=['--idleShutdownTimeoutSecs=88']) - self.assertEqual( - opts._mongocryptd_spawn_args, ['--idleShutdownTimeoutSecs=88']) + {}, "keyvault.datakeys", mongocryptd_spawn_args=["--idleShutdownTimeoutSecs=88"] + ) + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=88"]) # idleShutdownTimeoutSecs is added by default - opts = AutoEncryptionOpts( - {}, 'keyvault.datakeys', mongocryptd_spawn_args=[]) - self.assertEqual( - opts._mongocryptd_spawn_args, ['--idleShutdownTimeoutSecs=60']) + opts = AutoEncryptionOpts({}, "keyvault.datakeys", mongocryptd_spawn_args=[]) + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=60"]) # Also added when other options are given opts = AutoEncryptionOpts( - {}, 'keyvault.datakeys', - mongocryptd_spawn_args=['--quiet', '--port=27020']) + {}, "keyvault.datakeys", mongocryptd_spawn_args=["--quiet", "--port=27020"] + ) self.assertEqual( opts._mongocryptd_spawn_args, - ['--quiet', '--port=27020', '--idleShutdownTimeoutSecs=60']) + ["--quiet", "--port=27020", "--idleShutdownTimeoutSecs=60"], + ) - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_init_kms_tls_options(self): # Error cases: - with self.assertRaisesRegex( - TypeError, r'kms_tls_options\["kmip"\] must be a dict'): - AutoEncryptionOpts({}, 'k.d', kms_tls_options={'kmip': 1}) + with self.assertRaisesRegex(TypeError, r'kms_tls_options\["kmip"\] must be a dict'): + AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": 1}) tls_opts: Any for tls_opts in [ - {'kmip': {'tls': True, 'tlsInsecure': True}}, - {'kmip': {'tls': True, 'tlsAllowInvalidCertificates': True}}, - {'kmip': {'tls': True, 'tlsAllowInvalidHostnames': True}}, - {'kmip': {'tls': True, 'tlsDisableOCSPEndpointCheck': True}}]: - with self.assertRaisesRegex( - ConfigurationError, 'Insecure TLS options prohibited'): - opts = AutoEncryptionOpts({}, 'k.d', kms_tls_options=tls_opts) + {"kmip": {"tls": True, "tlsInsecure": True}}, + {"kmip": {"tls": True, "tlsAllowInvalidCertificates": True}}, + {"kmip": {"tls": True, "tlsAllowInvalidHostnames": True}}, + {"kmip": {"tls": True, "tlsDisableOCSPEndpointCheck": True}}, + ]: + with self.assertRaisesRegex(ConfigurationError, "Insecure TLS options prohibited"): + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) with self.assertRaises(FileNotFoundError): - AutoEncryptionOpts({}, 'k.d', kms_tls_options={ - 'kmip': {'tlsCAFile': 'does-not-exist'}}) + AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tlsCAFile": "does-not-exist"}}) # Success cases: tls_opts: Any for tls_opts in [None, {}]: - opts = AutoEncryptionOpts({}, 'k.d', kms_tls_options=tls_opts) + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) self.assertEqual(opts._kms_ssl_contexts, {}) - opts = AutoEncryptionOpts( - {}, 'k.d', kms_tls_options={'kmip': {'tls': True}, 'aws': {}}) - ctx = opts._kms_ssl_contexts['kmip'] + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tls": True}, "aws": {}}) + ctx = opts._kms_ssl_contexts["kmip"] # On < 3.7 we check hostnames manually. if sys.version_info[:2] >= (3, 7): self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) - ctx = opts._kms_ssl_contexts['aws'] + ctx = opts._kms_ssl_contexts["aws"] if sys.version_info[:2] >= (3, 7): self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) opts = AutoEncryptionOpts( - {}, 'k.d', kms_tls_options={'kmip': { - 'tlsCAFile': CA_PEM, 'tlsCertificateKeyFile': CLIENT_PEM}}) - ctx = opts._kms_ssl_contexts['kmip'] + {}, + "k.d", + kms_tls_options={"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}}, + ) + ctx = opts._kms_ssl_contexts["kmip"] if sys.version_info[:2] >= (3, 7): self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) @@ -177,9 +174,9 @@ def test_default(self): self.addCleanup(client.close) self.assertEqual(get_client_opts(client).auto_encryption_opts, None) - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_kwargs(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = MongoClient(auto_encryption_opts=opts, connect=False) self.addCleanup(client.close) self.assertEqual(get_client_opts(client).auto_encryption_opts, opts) @@ -189,7 +186,7 @@ class EncryptionIntegrationTest(IntegrationTest): """Base class for encryption integration tests.""" @classmethod - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") @client_context.require_version_min(4, 2, -1) def setUpClass(cls): super(EncryptionIntegrationTest, cls).setUpClass() @@ -204,16 +201,14 @@ def assertBinaryUUID(self, val): # Location of JSON test files. -BASE = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'client-side-encryption') -SPEC_PATH = os.path.join(BASE, 'spec') +BASE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "client-side-encryption") +SPEC_PATH = os.path.join(BASE, "spec") OPTS = CodecOptions(uuid_representation=STANDARD) # Use SON to preserve the order of fields while parsing json. Use tz_aware # =False to match how CodecOptions decodes dates. -JSON_OPTS = JSONOptions(document_class=SON, uuid_representation=STANDARD, - tz_aware=False) +JSON_OPTS = JSONOptions(document_class=SON, uuid_representation=STANDARD, tz_aware=False) def read(*paths): @@ -230,38 +225,39 @@ def bson_data(*paths): class TestClientSimple(EncryptionIntegrationTest): - def _test_auto_encrypt(self, opts): client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) # Create the encrypted field's data key. key_vault = create_key_vault( - self.client.keyvault.datakeys, - json_data('custom', 'key-document-local.json')) + self.client.keyvault.datakeys, json_data("custom", "key-document-local.json") + ) self.addCleanup(key_vault.drop) # Collection.insert_one/insert_many auto encrypts. - docs = [{'_id': 0, 'ssn': '000'}, - {'_id': 1, 'ssn': '111'}, - {'_id': 2, 'ssn': '222'}, - {'_id': 3, 'ssn': '333'}, - {'_id': 4, 'ssn': '444'}, - {'_id': 5, 'ssn': '555'}] + docs = [ + {"_id": 0, "ssn": "000"}, + {"_id": 1, "ssn": "111"}, + {"_id": 2, "ssn": "222"}, + {"_id": 3, "ssn": "333"}, + {"_id": 4, "ssn": "444"}, + {"_id": 5, "ssn": "555"}, + ] encrypted_coll = client.pymongo_test.test encrypted_coll.insert_one(docs[0]) encrypted_coll.insert_many(docs[1:3]) unack = encrypted_coll.with_options(write_concern=WriteConcern(w=0)) unack.insert_one(docs[3]) unack.insert_many(docs[4:], ordered=False) - wait_until(lambda: self.db.test.count_documents({}) == len(docs), - 'insert documents with w=0') + wait_until( + lambda: self.db.test.count_documents({}) == len(docs), "insert documents with w=0" + ) # Database.command auto decrypts. - res = client.pymongo_test.command( - 'find', 'test', filter={'ssn': '000'}) - decrypted_docs = res['cursor']['firstBatch'] - self.assertEqual(decrypted_docs, [{'_id': 0, 'ssn': '000'}]) + res = client.pymongo_test.command("find", "test", filter={"ssn": "000"}) + decrypted_docs = res["cursor"]["firstBatch"] + self.assertEqual(decrypted_docs, [{"_id": 0, "ssn": "000"}]) # Collection.find auto decrypts. decrypted_docs = list(encrypted_coll.find()) @@ -280,51 +276,48 @@ def _test_auto_encrypt(self, opts): self.assertEqual(decrypted_docs, docs) # Collection.distinct auto decrypts. - decrypted_ssns = encrypted_coll.distinct('ssn') - self.assertEqual(set(decrypted_ssns), set(d['ssn'] for d in docs)) + decrypted_ssns = encrypted_coll.distinct("ssn") + self.assertEqual(set(decrypted_ssns), set(d["ssn"] for d in docs)) # Make sure the field is actually encrypted. for encrypted_doc in self.db.test.find(): - self.assertIsInstance(encrypted_doc['_id'], int) - self.assertEncrypted(encrypted_doc['ssn']) + self.assertIsInstance(encrypted_doc["_id"], int) + self.assertEncrypted(encrypted_doc["ssn"]) # Attempt to encrypt an unencodable object. with self.assertRaises(BSONError): - encrypted_coll.insert_one({'unencodeable': object()}) + encrypted_coll.insert_one({"unencodeable": object()}) def test_auto_encrypt(self): # Configure the encrypted field via jsonSchema. - json_schema = json_data('custom', 'schema.json') + json_schema = json_data("custom", "schema.json") create_with_schema(self.db.test, json_schema) self.addCleanup(self.db.test.drop) - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") self._test_auto_encrypt(opts) def test_auto_encrypt_local_schema_map(self): # Configure the encrypted field via the local schema_map option. - schemas = {'pymongo_test.test': json_data('custom', 'schema.json')} - opts = AutoEncryptionOpts( - KMS_PROVIDERS, 'keyvault.datakeys', schema_map=schemas) + schemas = {"pymongo_test.test": json_data("custom", "schema.json")} + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas) self._test_auto_encrypt(opts) def test_use_after_close(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) - client.admin.command('ping') + client.admin.command("ping") client.close() - with self.assertRaisesRegex(InvalidOperation, - 'Cannot use MongoClient after close'): - client.admin.command('ping') + with self.assertRaisesRegex(InvalidOperation, "Cannot use MongoClient after close"): + client.admin.command("ping") class TestEncryptedBulkWrite(BulkTestBase, EncryptionIntegrationTest): - def test_upsert_uuid_standard_encrypte(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) @@ -332,126 +325,131 @@ def test_upsert_uuid_standard_encrypte(self): encrypted_coll = client.pymongo_test.test coll = encrypted_coll.with_options(codec_options=options) uuids = [uuid.uuid4() for _ in range(3)] - result = coll.bulk_write([ - UpdateOne({'_id': uuids[0]}, {'$set': {'a': 0}}, upsert=True), - ReplaceOne({'a': 1}, {'_id': uuids[1]}, upsert=True), - # This is just here to make the counts right in all cases. - ReplaceOne({'_id': uuids[2]}, {'_id': uuids[2]}, upsert=True), - ]) + result = coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 3, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': uuids[0]}, - {'index': 1, '_id': uuids[1]}, - {'index': 2, '_id': uuids[2]}]}, - result.bulk_api_result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) class TestClientMaxWireVersion(IntegrationTest): - @classmethod - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def setUpClass(cls): super(TestClientMaxWireVersion, cls).setUpClass() @client_context.require_version_max(4, 0, 99) def test_raise_max_wire_version_error(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) - msg = 'Auto-encryption requires a minimum MongoDB version of 4.2' + msg = "Auto-encryption requires a minimum MongoDB version of 4.2" with self.assertRaisesRegex(ConfigurationError, msg): client.test.test.insert_one({}) with self.assertRaisesRegex(ConfigurationError, msg): - client.admin.command('ping') + client.admin.command("ping") with self.assertRaisesRegex(ConfigurationError, msg): client.test.test.find_one({}) with self.assertRaisesRegex(ConfigurationError, msg): client.test.test.bulk_write([InsertOne({})]) def test_raise_unsupported_error(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) - msg = 'find_raw_batches does not support auto encryption' + msg = "find_raw_batches does not support auto encryption" with self.assertRaisesRegex(InvalidOperation, msg): client.test.test.find_raw_batches({}) - msg = 'aggregate_raw_batches does not support auto encryption' + msg = "aggregate_raw_batches does not support auto encryption" with self.assertRaisesRegex(InvalidOperation, msg): client.test.test.aggregate_raw_batches([]) if client_context.is_mongos: - msg = 'Exhaust cursors are not supported by mongos' + msg = "Exhaust cursors are not supported by mongos" else: - msg = 'exhaust cursors do not support auto encryption' + msg = "exhaust cursors do not support auto encryption" with self.assertRaisesRegex(InvalidOperation, msg): next(client.test.test.find(cursor_type=CursorType.EXHAUST)) class TestExplicitSimple(EncryptionIntegrationTest): - def test_encrypt_decrypt(self): client_encryption = ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) self.addCleanup(client_encryption.close) # Use standard UUID representation. - key_vault = client_context.client.keyvault.get_collection( - 'datakeys', codec_options=OPTS) + key_vault = client_context.client.keyvault.get_collection("datakeys", codec_options=OPTS) self.addCleanup(key_vault.drop) # Create the encrypted field's data key. - key_id = client_encryption.create_data_key( - 'local', key_alt_names=['name']) + key_id = client_encryption.create_data_key("local", key_alt_names=["name"]) self.assertBinaryUUID(key_id) - self.assertTrue(key_vault.find_one({'_id': key_id})) + self.assertTrue(key_vault.find_one({"_id": key_id})) # Create an unused data key to make sure filtering works. - unused_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['unused']) + unused_key_id = client_encryption.create_data_key("local", key_alt_names=["unused"]) self.assertBinaryUUID(unused_key_id) - self.assertTrue(key_vault.find_one({'_id': unused_key_id})) + self.assertTrue(key_vault.find_one({"_id": unused_key_id})) - doc = {'_id': 0, 'ssn': '000'} + doc = {"_id": 0, "ssn": "000"} encrypted_ssn = client_encryption.encrypt( - doc['ssn'], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=key_id) + doc["ssn"], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) # Ensure encryption via key_alt_name for the same key produces the # same output. encrypted_ssn2 = client_encryption.encrypt( - doc['ssn'], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name='name') + doc["ssn"], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name="name" + ) self.assertEqual(encrypted_ssn, encrypted_ssn2) # Test decryption. decrypted_ssn = client_encryption.decrypt(encrypted_ssn) - self.assertEqual(decrypted_ssn, doc['ssn']) + self.assertEqual(decrypted_ssn, doc["ssn"]) def test_validation(self): client_encryption = ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) self.addCleanup(client_encryption.close) - msg = 'value to decrypt must be a bson.binary.Binary with subtype 6' + msg = "value to decrypt must be a bson.binary.Binary with subtype 6" with self.assertRaisesRegex(TypeError, msg): - client_encryption.decrypt('str') # type: ignore[arg-type] + client_encryption.decrypt("str") # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, msg): - client_encryption.decrypt(Binary(b'123')) + client_encryption.decrypt(Binary(b"123")) - msg = 'key_id must be a bson.binary.Binary with subtype 4' + msg = "key_id must be a bson.binary.Binary with subtype 4" algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic with self.assertRaisesRegex(TypeError, msg): - client_encryption.encrypt('str', algo, key_id=uuid.uuid4()) # type: ignore[arg-type] + client_encryption.encrypt("str", algo, key_id=uuid.uuid4()) # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, msg): - client_encryption.encrypt('str', algo, key_id=Binary(b'123')) + client_encryption.encrypt("str", algo, key_id=Binary(b"123")) def test_bson_errors(self): client_encryption = ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) self.addCleanup(client_encryption.close) # Attempt to encrypt an unencodable object. @@ -460,37 +458,40 @@ def test_bson_errors(self): client_encryption.encrypt( unencodable_value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=Binary(uuid.uuid4().bytes, UUID_SUBTYPE)) + key_id=Binary(uuid.uuid4().bytes, UUID_SUBTYPE), + ) def test_codec_options(self): - with self.assertRaisesRegex(TypeError, 'codec_options must be'): + with self.assertRaisesRegex(TypeError, "codec_options must be"): ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, None) # type: ignore[arg-type] + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, None # type: ignore[arg-type] + ) opts = CodecOptions(uuid_representation=JAVA_LEGACY) client_encryption_legacy = ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, opts) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, opts + ) self.addCleanup(client_encryption_legacy.close) # Create the encrypted field's data key. - key_id = client_encryption_legacy.create_data_key('local') + key_id = client_encryption_legacy.create_data_key("local") # Encrypt a UUID with JAVA_LEGACY codec options. value = uuid.uuid4() encrypted_legacy = client_encryption_legacy.encrypt( - value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=key_id) - decrypted_value_legacy = client_encryption_legacy.decrypt( - encrypted_legacy) + value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) + decrypted_value_legacy = client_encryption_legacy.decrypt(encrypted_legacy) self.assertEqual(decrypted_value_legacy, value) # Encrypt the same UUID with STANDARD codec options. client_encryption = ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) self.addCleanup(client_encryption.close) encrypted_standard = client_encryption.encrypt( - value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=key_id) + value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) decrypted_standard = client_encryption.decrypt(encrypted_standard) self.assertEqual(decrypted_standard, value) @@ -498,163 +499,160 @@ def test_codec_options(self): self.assertNotEqual(encrypted_standard, encrypted_legacy) # Test that codec_options is applied during decryption. self.assertEqual( - client_encryption_legacy.decrypt(encrypted_standard), - Binary.from_uuid(value)) - self.assertNotEqual( - client_encryption.decrypt(encrypted_legacy), value) + client_encryption_legacy.decrypt(encrypted_standard), Binary.from_uuid(value) + ) + self.assertNotEqual(client_encryption.decrypt(encrypted_legacy), value) def test_close(self): client_encryption = ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) client_encryption.close() # Close can be called multiple times. client_encryption.close() algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic - msg = 'Cannot use closed ClientEncryption' + msg = "Cannot use closed ClientEncryption" with self.assertRaisesRegex(InvalidOperation, msg): - client_encryption.create_data_key('local') + client_encryption.create_data_key("local") with self.assertRaisesRegex(InvalidOperation, msg): - client_encryption.encrypt('val', algo, key_alt_name='name') + client_encryption.encrypt("val", algo, key_alt_name="name") with self.assertRaisesRegex(InvalidOperation, msg): - client_encryption.decrypt(Binary(b'', 6)) + client_encryption.decrypt(Binary(b"", 6)) def test_with_statement(self): with ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', - client_context.client, OPTS) as client_encryption: + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) as client_encryption: pass - with self.assertRaisesRegex( - InvalidOperation, 'Cannot use closed ClientEncryption'): - client_encryption.create_data_key('local') + with self.assertRaisesRegex(InvalidOperation, "Cannot use closed ClientEncryption"): + client_encryption.create_data_key("local") # Spec tests AWS_CREDS = { - 'accessKeyId': os.environ.get('FLE_AWS_KEY', ''), - 'secretAccessKey': os.environ.get('FLE_AWS_SECRET', '') + "accessKeyId": os.environ.get("FLE_AWS_KEY", ""), + "secretAccessKey": os.environ.get("FLE_AWS_SECRET", ""), } AWS_TEMP_CREDS = { - 'accessKeyId': os.environ.get('CSFLE_AWS_TEMP_ACCESS_KEY_ID', ''), - 'secretAccessKey': os.environ.get('CSFLE_AWS_TEMP_SECRET_ACCESS_KEY', ''), - 'sessionToken': os.environ.get('CSFLE_AWS_TEMP_SESSION_TOKEN', '') + "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), + "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), + "sessionToken": os.environ.get("CSFLE_AWS_TEMP_SESSION_TOKEN", ""), } AWS_TEMP_NO_SESSION_CREDS = { - 'accessKeyId': os.environ.get('CSFLE_AWS_TEMP_ACCESS_KEY_ID', ''), - 'secretAccessKey': os.environ.get('CSFLE_AWS_TEMP_SECRET_ACCESS_KEY', '') + "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), + "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), } AZURE_CREDS = { - 'tenantId': os.environ.get('FLE_AZURE_TENANTID', ''), - 'clientId': os.environ.get('FLE_AZURE_CLIENTID', ''), - 'clientSecret': os.environ.get('FLE_AZURE_CLIENTSECRET', '')} + "tenantId": os.environ.get("FLE_AZURE_TENANTID", ""), + "clientId": os.environ.get("FLE_AZURE_CLIENTID", ""), + "clientSecret": os.environ.get("FLE_AZURE_CLIENTSECRET", ""), +} GCP_CREDS = { - 'email': os.environ.get('FLE_GCP_EMAIL', ''), - 'privateKey': os.environ.get('FLE_GCP_PRIVATEKEY', '')} + "email": os.environ.get("FLE_GCP_EMAIL", ""), + "privateKey": os.environ.get("FLE_GCP_PRIVATEKEY", ""), +} -KMIP = {'endpoint': os.environ.get('FLE_KMIP_ENDPOINT', 'localhost:5698')} -KMS_TLS_OPTS = {'kmip': {'tlsCAFile': CA_PEM, - 'tlsCertificateKeyFile': CLIENT_PEM}} +KMIP = {"endpoint": os.environ.get("FLE_KMIP_ENDPOINT", "localhost:5698")} +KMS_TLS_OPTS = {"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}} class TestSpec(SpecRunner): - @classmethod - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def setUpClass(cls): super(TestSpec, cls).setUpClass() def parse_auto_encrypt_opts(self, opts): """Parse clientOptions.autoEncryptOpts.""" opts = camel_to_snake_args(opts) - kms_providers = opts['kms_providers'] - if 'aws' in kms_providers: - kms_providers['aws'] = AWS_CREDS + kms_providers = opts["kms_providers"] + if "aws" in kms_providers: + kms_providers["aws"] = AWS_CREDS if not any(AWS_CREDS.values()): - self.skipTest('AWS environment credentials are not set') - if 'awsTemporary' in kms_providers: - kms_providers['aws'] = AWS_TEMP_CREDS - del kms_providers['awsTemporary'] + self.skipTest("AWS environment credentials are not set") + if "awsTemporary" in kms_providers: + kms_providers["aws"] = AWS_TEMP_CREDS + del kms_providers["awsTemporary"] if not any(AWS_TEMP_CREDS.values()): - self.skipTest('AWS Temp environment credentials are not set') - if 'awsTemporaryNoSessionToken' in kms_providers: - kms_providers['aws'] = AWS_TEMP_NO_SESSION_CREDS - del kms_providers['awsTemporaryNoSessionToken'] + self.skipTest("AWS Temp environment credentials are not set") + if "awsTemporaryNoSessionToken" in kms_providers: + kms_providers["aws"] = AWS_TEMP_NO_SESSION_CREDS + del kms_providers["awsTemporaryNoSessionToken"] if not any(AWS_TEMP_NO_SESSION_CREDS.values()): - self.skipTest('AWS Temp environment credentials are not set') - if 'azure' in kms_providers: - kms_providers['azure'] = AZURE_CREDS + self.skipTest("AWS Temp environment credentials are not set") + if "azure" in kms_providers: + kms_providers["azure"] = AZURE_CREDS if not any(AZURE_CREDS.values()): - self.skipTest('Azure environment credentials are not set') - if 'gcp' in kms_providers: - kms_providers['gcp'] = GCP_CREDS + self.skipTest("Azure environment credentials are not set") + if "gcp" in kms_providers: + kms_providers["gcp"] = GCP_CREDS if not any(AZURE_CREDS.values()): - self.skipTest('GCP environment credentials are not set') - if 'kmip' in kms_providers: - kms_providers['kmip'] = KMIP - opts['kms_tls_options'] = KMS_TLS_OPTS - if 'key_vault_namespace' not in opts: - opts['key_vault_namespace'] = 'keyvault.datakeys' + self.skipTest("GCP environment credentials are not set") + if "kmip" in kms_providers: + kms_providers["kmip"] = KMIP + opts["kms_tls_options"] = KMS_TLS_OPTS + if "key_vault_namespace" not in opts: + opts["key_vault_namespace"] = "keyvault.datakeys" opts = dict(opts) return AutoEncryptionOpts(**opts) def parse_client_options(self, opts): """Override clientOptions parsing to support autoEncryptOpts.""" - encrypt_opts = opts.pop('autoEncryptOpts') + encrypt_opts = opts.pop("autoEncryptOpts") if encrypt_opts: - opts['auto_encryption_opts'] = self.parse_auto_encrypt_opts( - encrypt_opts) + opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) return super(TestSpec, self).parse_client_options(opts) def get_object_name(self, op): """Default object is collection.""" - return op.get('object', 'collection') + return op.get("object", "collection") def maybe_skip_scenario(self, test): super(TestSpec, self).maybe_skip_scenario(test) - desc = test['description'].lower() - if 'type=symbol' in desc: - self.skipTest('PyMongo does not support the symbol type') + desc = test["description"].lower() + if "type=symbol" in desc: + self.skipTest("PyMongo does not support the symbol type") def setup_scenario(self, scenario_def): """Override a test's setup.""" - key_vault_data = scenario_def['key_vault_data'] + key_vault_data = scenario_def["key_vault_data"] if key_vault_data: coll = client_context.client.get_database( - 'keyvault', - write_concern=WriteConcern(w='majority'), - codec_options=OPTS)['datakeys'] + "keyvault", write_concern=WriteConcern(w="majority"), codec_options=OPTS + )["datakeys"] coll.drop() coll.insert_many(key_vault_data) db_name = self.get_scenario_db_name(scenario_def) coll_name = self.get_scenario_coll_name(scenario_def) db = client_context.client.get_database( - db_name, write_concern=WriteConcern(w='majority'), - codec_options=OPTS) + db_name, write_concern=WriteConcern(w="majority"), codec_options=OPTS + ) coll = db[coll_name] coll.drop() - json_schema = scenario_def['json_schema'] + json_schema = scenario_def["json_schema"] if json_schema: db.create_collection( - coll_name, - validator={'$jsonSchema': json_schema}, codec_options=OPTS) + coll_name, validator={"$jsonSchema": json_schema}, codec_options=OPTS + ) else: db.create_collection(coll_name) - if scenario_def['data']: + if scenario_def["data"]: # Load data. - coll.insert_many(scenario_def['data']) + coll.insert_many(scenario_def["data"]) def allowable_errors(self, op): """Override expected error classes.""" errors = super(TestSpec, self).allowable_errors(op) # An updateOne test expects encryption to error when no $ operator # appears but pymongo raises a client side ValueError in this case. - if op['name'] == 'updateOne': + if op["name"] == "updateOne": errors += (ValueError,) return errors @@ -673,40 +671,36 @@ def run_scenario(self): # Prose Tests LOCAL_MASTER_KEY = base64.b64decode( - b'Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ' - b'5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk') + b"Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ" + b"5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" +) ALL_KMS_PROVIDERS = { - 'aws': AWS_CREDS, - 'azure': AZURE_CREDS, - 'gcp': GCP_CREDS, - 'kmip': KMIP, - 'local': {'key': LOCAL_MASTER_KEY}} - -LOCAL_KEY_ID = Binary( - base64.b64decode(b'LOCALAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) -AWS_KEY_ID = Binary( - base64.b64decode(b'AWSAAAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) -AZURE_KEY_ID = Binary( - base64.b64decode(b'AZUREAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) -GCP_KEY_ID = Binary( - base64.b64decode(b'GCPAAAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) -KMIP_KEY_ID = Binary( - base64.b64decode(b'KMIPAAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) + "aws": AWS_CREDS, + "azure": AZURE_CREDS, + "gcp": GCP_CREDS, + "kmip": KMIP, + "local": {"key": LOCAL_MASTER_KEY}, +} + +LOCAL_KEY_ID = Binary(base64.b64decode(b"LOCALAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +AWS_KEY_ID = Binary(base64.b64decode(b"AWSAAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +AZURE_KEY_ID = Binary(base64.b64decode(b"AZUREAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +GCP_KEY_ID = Binary(base64.b64decode(b"GCPAAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +KMIP_KEY_ID = Binary(base64.b64decode(b"KMIPAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) def create_with_schema(coll, json_schema): """Create and return a Collection with a jsonSchema.""" - coll.with_options(write_concern=WriteConcern(w='majority')).drop() + coll.with_options(write_concern=WriteConcern(w="majority")).drop() return coll.database.create_collection( - coll.name, validator={'$jsonSchema': json_schema}, codec_options=OPTS) + coll.name, validator={"$jsonSchema": json_schema}, codec_options=OPTS + ) def create_key_vault(vault, *data_keys): """Create the key vault collection with optional data keys.""" - vault = vault.with_options( - write_concern=WriteConcern(w='majority'), - codec_options=OPTS) + vault = vault.with_options(write_concern=WriteConcern(w="majority"), codec_options=OPTS) vault.drop() if data_keys: vault.insert_many(data_keys) @@ -722,27 +716,29 @@ class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): KMS_PROVIDERS = ALL_KMS_PROVIDERS MASTER_KEYS = { - 'aws': { - 'region': 'us-east-1', - 'key': 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-' - '4bd9-9f25-e30687b580d0'}, - 'azure': { - 'keyVaultEndpoint': 'key-vault-csfle.vault.azure.net', - 'keyName': 'key-name-csfle'}, - 'gcp': { - 'projectId': 'devprod-drivers', - 'location': 'global', - 'keyRing': 'key-ring-csfle', - 'keyName': 'key-name-csfle'}, - 'kmip': {}, - 'local': None + "aws": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-" "4bd9-9f25-e30687b580d0", + }, + "azure": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + }, + "gcp": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + }, + "kmip": {}, + "local": None, } @classmethod - @unittest.skipUnless(any([all(AWS_CREDS.values()), - all(AZURE_CREDS.values()), - all(GCP_CREDS.values())]), - 'No environment credentials are set') + @unittest.skipUnless( + any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), + "No environment credentials are set", + ) def setUpClass(cls): super(TestDataKeyDoubleEncryption, cls).setUpClass() cls.listener = OvertCommandListener() @@ -759,20 +755,21 @@ def setUpClass(cls): "encrypt": { "keyId": "/placeholder", "bsonType": "string", - "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", } } - } + }, } } opts = AutoEncryptionOpts( - cls.KMS_PROVIDERS, 'keyvault.datakeys', schema_map=schemas, - kms_tls_options=KMS_TLS_OPTS) + cls.KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas, kms_tls_options=KMS_TLS_OPTS + ) cls.client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, uuidRepresentation='standard') + auto_encryption_opts=opts, uuidRepresentation="standard" + ) cls.client_encryption = ClientEncryption( - cls.KMS_PROVIDERS, 'keyvault.datakeys', cls.client, OPTS, - kms_tls_options=KMS_TLS_OPTS) + cls.KMS_PROVIDERS, "keyvault.datakeys", cls.client, OPTS, kms_tls_options=KMS_TLS_OPTS + ) @classmethod def tearDownClass(cls): @@ -788,96 +785,98 @@ def run_test(self, provider_name): # Create data key. master_key: Any = self.MASTER_KEYS[provider_name] datakey_id = self.client_encryption.create_data_key( - provider_name, master_key=master_key, - key_alt_names=['%s_altname' % (provider_name,)]) + provider_name, master_key=master_key, key_alt_names=["%s_altname" % (provider_name,)] + ) self.assertBinaryUUID(datakey_id) - cmd = self.listener.results['started'][-1] - self.assertEqual('insert', cmd.command_name) - self.assertEqual({'w': 'majority'}, cmd.command.get('writeConcern')) - docs = list(self.vault.find({'_id': datakey_id})) + cmd = self.listener.results["started"][-1] + self.assertEqual("insert", cmd.command_name) + self.assertEqual({"w": "majority"}, cmd.command.get("writeConcern")) + docs = list(self.vault.find({"_id": datakey_id})) self.assertEqual(len(docs), 1) - self.assertEqual(docs[0]['masterKey']['provider'], provider_name) + self.assertEqual(docs[0]["masterKey"]["provider"], provider_name) # Encrypt by key_id. encrypted = self.client_encryption.encrypt( - 'hello %s' % (provider_name,), + "hello %s" % (provider_name,), Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=datakey_id) + key_id=datakey_id, + ) self.assertEncrypted(encrypted) - self.client_encrypted.db.coll.insert_one( - {'_id': provider_name, 'value': encrypted}) - doc_decrypted = self.client_encrypted.db.coll.find_one( - {'_id': provider_name}) - self.assertEqual(doc_decrypted['value'], 'hello %s' % (provider_name,)) # type: ignore + self.client_encrypted.db.coll.insert_one({"_id": provider_name, "value": encrypted}) + doc_decrypted = self.client_encrypted.db.coll.find_one({"_id": provider_name}) + self.assertEqual(doc_decrypted["value"], "hello %s" % (provider_name,)) # type: ignore # Encrypt by key_alt_name. encrypted_altname = self.client_encryption.encrypt( - 'hello %s' % (provider_name,), + "hello %s" % (provider_name,), Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name='%s_altname' % (provider_name,)) + key_alt_name="%s_altname" % (provider_name,), + ) self.assertEqual(encrypted_altname, encrypted) # Explicitly encrypting an auto encrypted field. - msg = (r'Cannot encrypt element of type binData because schema ' - r'requires that type is one of: \[ string \]') + msg = ( + r"Cannot encrypt element of type binData because schema " + r"requires that type is one of: \[ string \]" + ) with self.assertRaisesRegex(EncryptionError, msg): - self.client_encrypted.db.coll.insert_one( - {'encrypted_placeholder': encrypted}) + self.client_encrypted.db.coll.insert_one({"encrypted_placeholder": encrypted}) def test_data_key_local(self): - self.run_test('local') + self.run_test("local") - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_data_key_aws(self): - self.run_test('aws') + self.run_test("aws") - @unittest.skipUnless(any(AZURE_CREDS.values()), - 'Azure environment credentials are not set') + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") def test_data_key_azure(self): - self.run_test('azure') + self.run_test("azure") - @unittest.skipUnless(any(GCP_CREDS.values()), - 'GCP environment credentials are not set') + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") def test_data_key_gcp(self): - self.run_test('gcp') + self.run_test("gcp") def test_data_key_kmip(self): - self.run_test('kmip') + self.run_test("kmip") class TestExternalKeyVault(EncryptionIntegrationTest): - @staticmethod def kms_providers(): - return {'local': {'key': LOCAL_MASTER_KEY}} + return {"local": {"key": LOCAL_MASTER_KEY}} def _test_external_key_vault(self, with_external_key_vault): self.client.db.coll.drop() vault = create_key_vault( self.client.keyvault.datakeys, - json_data('corpus', 'corpus-key-local.json'), - json_data('corpus', 'corpus-key-aws.json')) + json_data("corpus", "corpus-key-local.json"), + json_data("corpus", "corpus-key-aws.json"), + ) self.addCleanup(vault.drop) # Configure the encrypted field via the local schema_map option. - schemas = {'db.coll': json_data('external', 'external-schema.json')} + schemas = {"db.coll": json_data("external", "external-schema.json")} if with_external_key_vault: - key_vault_client = rs_or_single_client( - username='fake-user', password='fake-pwd') + key_vault_client = rs_or_single_client(username="fake-user", password="fake-pwd") self.addCleanup(key_vault_client.close) else: key_vault_client = client_context.client opts = AutoEncryptionOpts( - self.kms_providers(), 'keyvault.datakeys', schema_map=schemas, - key_vault_client=key_vault_client) + self.kms_providers(), + "keyvault.datakeys", + schema_map=schemas, + key_vault_client=key_vault_client, + ) client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, uuidRepresentation='standard') + auto_encryption_opts=opts, uuidRepresentation="standard" + ) self.addCleanup(client_encrypted.close) client_encryption = ClientEncryption( - self.kms_providers(), 'keyvault.datakeys', key_vault_client, OPTS) + self.kms_providers(), "keyvault.datakeys", key_vault_client, OPTS + ) self.addCleanup(client_encryption.close) if with_external_key_vault: @@ -896,14 +895,15 @@ def _test_external_key_vault(self, with_external_key_vault): client_encryption.encrypt( "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=LOCAL_KEY_ID) + key_id=LOCAL_KEY_ID, + ) # AuthenticationFailed error. self.assertIsInstance(ctx.exception.cause, OperationFailure) self.assertEqual(ctx.exception.cause.code, 18) else: client_encryption.encrypt( - "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=LOCAL_KEY_ID) + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=LOCAL_KEY_ID + ) def test_external_key_vault_1(self): self._test_external_key_vault(True) @@ -913,31 +913,28 @@ def test_external_key_vault_2(self): class TestViews(EncryptionIntegrationTest): - @staticmethod def kms_providers(): - return {'local': {'key': LOCAL_MASTER_KEY}} + return {"local": {"key": LOCAL_MASTER_KEY}} def test_views_are_prohibited(self): self.client.db.view.drop() - self.client.db.create_collection('view', viewOn='coll') + self.client.db.create_collection("view", viewOn="coll") self.addCleanup(self.client.db.view.drop) - opts = AutoEncryptionOpts(self.kms_providers(), 'keyvault.datakeys') + opts = AutoEncryptionOpts(self.kms_providers(), "keyvault.datakeys") client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, uuidRepresentation='standard') + auto_encryption_opts=opts, uuidRepresentation="standard" + ) self.addCleanup(client_encrypted.close) - with self.assertRaisesRegex( - EncryptionError, 'cannot auto encrypt a view'): + with self.assertRaisesRegex(EncryptionError, "cannot auto encrypt a view"): client_encrypted.db.view.insert_one({}) class TestCorpus(EncryptionIntegrationTest): - @classmethod - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def setUpClass(cls): super(TestCorpus, cls).setUpClass() @@ -948,141 +945,158 @@ def kms_providers(): @staticmethod def fix_up_schema(json_schema): """Remove deprecated symbol/dbPointer types from json schema.""" - for key in list(json_schema['properties']): - if '_symbol_' in key or '_dbPointer_' in key: - del json_schema['properties'][key] + for key in list(json_schema["properties"]): + if "_symbol_" in key or "_dbPointer_" in key: + del json_schema["properties"][key] return json_schema @staticmethod def fix_up_curpus(corpus): """Disallow deprecated symbol/dbPointer types from corpus test.""" for key in corpus: - if '_symbol_' in key or '_dbPointer_' in key: - corpus[key]['allowed'] = False + if "_symbol_" in key or "_dbPointer_" in key: + corpus[key]["allowed"] = False return corpus @staticmethod def fix_up_curpus_encrypted(corpus_encrypted, corpus): """Fix the expected values for deprecated symbol/dbPointer types.""" for key in corpus_encrypted: - if '_symbol_' in key or '_dbPointer_' in key: + if "_symbol_" in key or "_dbPointer_" in key: corpus_encrypted[key] = copy.deepcopy(corpus[key]) return corpus_encrypted def _test_corpus(self, opts): # Drop and create the collection 'db.coll' with jsonSchema. coll = create_with_schema( - self.client.db.coll, - self.fix_up_schema(json_data('corpus', 'corpus-schema.json'))) + self.client.db.coll, self.fix_up_schema(json_data("corpus", "corpus-schema.json")) + ) self.addCleanup(coll.drop) vault = create_key_vault( self.client.keyvault.datakeys, - json_data('corpus', 'corpus-key-local.json'), - json_data('corpus', 'corpus-key-aws.json'), - json_data('corpus', 'corpus-key-azure.json'), - json_data('corpus', 'corpus-key-gcp.json'), - json_data('corpus', 'corpus-key-kmip.json')) + json_data("corpus", "corpus-key-local.json"), + json_data("corpus", "corpus-key-aws.json"), + json_data("corpus", "corpus-key-azure.json"), + json_data("corpus", "corpus-key-gcp.json"), + json_data("corpus", "corpus-key-kmip.json"), + ) self.addCleanup(vault.drop) client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, uuidRepresentation='standard') + auto_encryption_opts=opts, uuidRepresentation="standard" + ) self.addCleanup(client_encrypted.close) client_encryption = ClientEncryption( - self.kms_providers(), 'keyvault.datakeys', client_context.client, - OPTS, kms_tls_options=KMS_TLS_OPTS) + self.kms_providers(), + "keyvault.datakeys", + client_context.client, + OPTS, + kms_tls_options=KMS_TLS_OPTS, + ) self.addCleanup(client_encryption.close) - corpus = self.fix_up_curpus(json_data('corpus', 'corpus.json')) + corpus = self.fix_up_curpus(json_data("corpus", "corpus.json")) corpus_copied: SON = SON() for key, value in corpus.items(): corpus_copied[key] = copy.deepcopy(value) - if key in ('_id', 'altname_aws', 'altname_azure', 'altname_gcp', - 'altname_local', 'altname_kmip'): + if key in ( + "_id", + "altname_aws", + "altname_azure", + "altname_gcp", + "altname_local", + "altname_kmip", + ): continue - if value['method'] == 'auto': + if value["method"] == "auto": continue - if value['method'] == 'explicit': - identifier = value['identifier'] - self.assertIn(identifier, ('id', 'altname')) - kms = value['kms'] - self.assertIn(kms, ('local', 'aws', 'azure', 'gcp', 'kmip')) - if identifier == 'id': - if kms == 'local': + if value["method"] == "explicit": + identifier = value["identifier"] + self.assertIn(identifier, ("id", "altname")) + kms = value["kms"] + self.assertIn(kms, ("local", "aws", "azure", "gcp", "kmip")) + if identifier == "id": + if kms == "local": kwargs = dict(key_id=LOCAL_KEY_ID) - elif kms == 'aws': + elif kms == "aws": kwargs = dict(key_id=AWS_KEY_ID) - elif kms == 'azure': + elif kms == "azure": kwargs = dict(key_id=AZURE_KEY_ID) - elif kms == 'gcp': + elif kms == "gcp": kwargs = dict(key_id=GCP_KEY_ID) else: kwargs = dict(key_id=KMIP_KEY_ID) else: kwargs = dict(key_alt_name=kms) - self.assertIn(value['algo'], ('det', 'rand')) - if value['algo'] == 'det': - algo = (Algorithm. - AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic) + self.assertIn(value["algo"], ("det", "rand")) + if value["algo"] == "det": + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic else: algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random try: encrypted_val = client_encryption.encrypt( - value['value'], algo, **kwargs) # type: ignore[arg-type] - if not value['allowed']: - self.fail('encrypt should have failed: %r: %r' % ( - key, value)) - corpus_copied[key]['value'] = encrypted_val + value["value"], algo, **kwargs # type: ignore[arg-type] + ) + if not value["allowed"]: + self.fail("encrypt should have failed: %r: %r" % (key, value)) + corpus_copied[key]["value"] = encrypted_val except Exception: - if value['allowed']: + if value["allowed"]: tb = traceback.format_exc() - self.fail('encrypt failed: %r: %r, traceback: %s' % ( - key, value, tb)) + self.fail("encrypt failed: %r: %r, traceback: %s" % (key, value, tb)) client_encrypted.db.coll.insert_one(corpus_copied) corpus_decrypted = client_encrypted.db.coll.find_one() self.assertEqual(corpus_decrypted, corpus) - corpus_encrypted_expected = self.fix_up_curpus_encrypted(json_data( - 'corpus', 'corpus-encrypted.json'), corpus) + corpus_encrypted_expected = self.fix_up_curpus_encrypted( + json_data("corpus", "corpus-encrypted.json"), corpus + ) corpus_encrypted_actual = coll.find_one() for key, value in corpus_encrypted_actual.items(): - if key in ('_id', 'altname_aws', 'altname_azure', - 'altname_gcp', 'altname_local', 'altname_kmip'): + if key in ( + "_id", + "altname_aws", + "altname_azure", + "altname_gcp", + "altname_local", + "altname_kmip", + ): continue - if value['algo'] == 'det': - self.assertEqual( - value['value'], corpus_encrypted_expected[key]['value'], - key) - elif value['algo'] == 'rand' and value['allowed']: - self.assertNotEqual( - value['value'], corpus_encrypted_expected[key]['value'], - key) - - if value['allowed']: - decrypt_actual = client_encryption.decrypt(value['value']) + if value["algo"] == "det": + self.assertEqual(value["value"], corpus_encrypted_expected[key]["value"], key) + elif value["algo"] == "rand" and value["allowed"]: + self.assertNotEqual(value["value"], corpus_encrypted_expected[key]["value"], key) + + if value["allowed"]: + decrypt_actual = client_encryption.decrypt(value["value"]) decrypt_expected = client_encryption.decrypt( - corpus_encrypted_expected[key]['value']) + corpus_encrypted_expected[key]["value"] + ) self.assertEqual(decrypt_actual, decrypt_expected, key) else: - self.assertEqual(value['value'], corpus[key]['value'], key) + self.assertEqual(value["value"], corpus[key]["value"], key) def test_corpus(self): - opts = AutoEncryptionOpts(self.kms_providers(), 'keyvault.datakeys', - kms_tls_options=KMS_TLS_OPTS) + opts = AutoEncryptionOpts( + self.kms_providers(), "keyvault.datakeys", kms_tls_options=KMS_TLS_OPTS + ) self._test_corpus(opts) def test_corpus_local_schema(self): # Configure the encrypted field via the local schema_map option. - schemas = {'db.coll': self.fix_up_schema( - json_data('corpus', 'corpus-schema.json'))} + schemas = {"db.coll": self.fix_up_schema(json_data("corpus", "corpus-schema.json"))} opts = AutoEncryptionOpts( - self.kms_providers(), 'keyvault.datakeys', schema_map=schemas, - kms_tls_options=KMS_TLS_OPTS) + self.kms_providers(), + "keyvault.datakeys", + schema_map=schemas, + kms_tls_options=KMS_TLS_OPTS, + ) self._test_corpus(opts) @@ -1092,6 +1106,7 @@ def test_corpus_local_schema(self): class TestBsonSizeBatches(EncryptionIntegrationTest): """Prose tests for BSON size limits and batch splitting.""" + coll: Collection coll_encrypted: Collection client_encrypted: MongoClient @@ -1104,24 +1119,26 @@ def setUpClass(cls): cls.coll = db.coll cls.coll.drop() # Configure the encrypted 'db.coll' collection via jsonSchema. - json_schema = json_data('limits', 'limits-schema.json') + json_schema = json_data("limits", "limits-schema.json") db.create_collection( - 'coll', validator={'$jsonSchema': json_schema}, codec_options=OPTS, - write_concern=WriteConcern(w='majority')) + "coll", + validator={"$jsonSchema": json_schema}, + codec_options=OPTS, + write_concern=WriteConcern(w="majority"), + ) # Create the key vault. coll = client_context.client.get_database( - 'keyvault', - write_concern=WriteConcern(w='majority'), - codec_options=OPTS)['datakeys'] + "keyvault", write_concern=WriteConcern(w="majority"), codec_options=OPTS + )["datakeys"] coll.drop() - coll.insert_one(json_data('limits', 'limits-key.json')) + coll.insert_one(json_data("limits", "limits-key.json")) - opts = AutoEncryptionOpts( - {'local': {'key': LOCAL_MASTER_KEY}}, 'keyvault.datakeys') + opts = AutoEncryptionOpts({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") cls.listener = OvertCommandListener() cls.client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, event_listeners=[cls.listener]) + auto_encryption_opts=opts, event_listeners=[cls.listener] + ) cls.coll_encrypted = cls.client_encrypted.db.coll @classmethod @@ -1131,103 +1148,96 @@ def tearDownClass(cls): super(TestBsonSizeBatches, cls).tearDownClass() def test_01_insert_succeeds_under_2MiB(self): - doc = {'_id': 'over_2mib_under_16mib', 'unencrypted': 'a' * _2_MiB} + doc = {"_id": "over_2mib_under_16mib", "unencrypted": "a" * _2_MiB} self.coll_encrypted.insert_one(doc) # Same with bulk_write. - doc['_id'] = 'over_2mib_under_16mib_bulk' + doc["_id"] = "over_2mib_under_16mib_bulk" self.coll_encrypted.bulk_write([InsertOne(doc)]) def test_02_insert_succeeds_over_2MiB_post_encryption(self): - doc = {'_id': 'encryption_exceeds_2mib', - 'unencrypted': 'a' * ((2**21) - 2000)} - doc.update(json_data('limits', 'limits-doc.json')) + doc = {"_id": "encryption_exceeds_2mib", "unencrypted": "a" * ((2**21) - 2000)} + doc.update(json_data("limits", "limits-doc.json")) self.coll_encrypted.insert_one(doc) # Same with bulk_write. - doc['_id'] = 'encryption_exceeds_2mib_bulk' + doc["_id"] = "encryption_exceeds_2mib_bulk" self.coll_encrypted.bulk_write([InsertOne(doc)]) def test_03_bulk_batch_split(self): - doc1 = {'_id': 'over_2mib_1', 'unencrypted': 'a' * _2_MiB} - doc2 = {'_id': 'over_2mib_2', 'unencrypted': 'a' * _2_MiB} + doc1 = {"_id": "over_2mib_1", "unencrypted": "a" * _2_MiB} + doc2 = {"_id": "over_2mib_2", "unencrypted": "a" * _2_MiB} self.listener.reset() self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) - self.assertEqual( - self.listener.started_command_names(), ['insert', 'insert']) + self.assertEqual(self.listener.started_command_names(), ["insert", "insert"]) def test_04_bulk_batch_split(self): - limits_doc = json_data('limits', 'limits-doc.json') - doc1 = {'_id': 'encryption_exceeds_2mib_1', - 'unencrypted': 'a' * (_2_MiB - 2000)} + limits_doc = json_data("limits", "limits-doc.json") + doc1 = {"_id": "encryption_exceeds_2mib_1", "unencrypted": "a" * (_2_MiB - 2000)} doc1.update(limits_doc) - doc2 = {'_id': 'encryption_exceeds_2mib_2', - 'unencrypted': 'a' * (_2_MiB - 2000)} + doc2 = {"_id": "encryption_exceeds_2mib_2", "unencrypted": "a" * (_2_MiB - 2000)} doc2.update(limits_doc) self.listener.reset() self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) - self.assertEqual( - self.listener.started_command_names(), ['insert', 'insert']) + self.assertEqual(self.listener.started_command_names(), ["insert", "insert"]) def test_05_insert_succeeds_just_under_16MiB(self): - doc = {'_id': 'under_16mib', 'unencrypted': 'a' * (_16_MiB - 2000)} + doc = {"_id": "under_16mib", "unencrypted": "a" * (_16_MiB - 2000)} self.coll_encrypted.insert_one(doc) # Same with bulk_write. - doc['_id'] = 'under_16mib_bulk' + doc["_id"] = "under_16mib_bulk" self.coll_encrypted.bulk_write([InsertOne(doc)]) def test_06_insert_fails_over_16MiB(self): - limits_doc = json_data('limits', 'limits-doc.json') - doc = {'_id': 'encryption_exceeds_16mib', - 'unencrypted': 'a' * (_16_MiB - 2000)} + limits_doc = json_data("limits", "limits-doc.json") + doc = {"_id": "encryption_exceeds_16mib", "unencrypted": "a" * (_16_MiB - 2000)} doc.update(limits_doc) - with self.assertRaisesRegex(WriteError, 'object to insert too large'): + with self.assertRaisesRegex(WriteError, "object to insert too large"): self.coll_encrypted.insert_one(doc) # Same with bulk_write. - doc['_id'] = 'encryption_exceeds_16mib_bulk' + doc["_id"] = "encryption_exceeds_16mib_bulk" with self.assertRaises(BulkWriteError) as ctx: self.coll_encrypted.bulk_write([InsertOne(doc)]) - err = ctx.exception.details['writeErrors'][0] - self.assertEqual(2, err['code']) - self.assertIn('object to insert too large', err['errmsg']) + err = ctx.exception.details["writeErrors"][0] + self.assertEqual(2, err["code"]) + self.assertIn("object to insert too large", err["errmsg"]) class TestCustomEndpoint(EncryptionIntegrationTest): """Prose tests for creating data keys with a custom endpoint.""" @classmethod - @unittest.skipUnless(any([all(AWS_CREDS.values()), - all(AZURE_CREDS.values()), - all(GCP_CREDS.values())]), - 'No environment credentials are set') + @unittest.skipUnless( + any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), + "No environment credentials are set", + ) def setUpClass(cls): super(TestCustomEndpoint, cls).setUpClass() def setUp(self): - kms_providers = {'aws': AWS_CREDS, - 'azure': AZURE_CREDS, - 'gcp': GCP_CREDS, - 'kmip': KMIP} + kms_providers = {"aws": AWS_CREDS, "azure": AZURE_CREDS, "gcp": GCP_CREDS, "kmip": KMIP} self.client_encryption = ClientEncryption( kms_providers=kms_providers, - key_vault_namespace='keyvault.datakeys', + key_vault_namespace="keyvault.datakeys", key_vault_client=client_context.client, codec_options=OPTS, - kms_tls_options=KMS_TLS_OPTS) + kms_tls_options=KMS_TLS_OPTS, + ) kms_providers_invalid = copy.deepcopy(kms_providers) - kms_providers_invalid['azure']['identityPlatformEndpoint'] = 'doesnotexist.invalid:443' - kms_providers_invalid['gcp']['endpoint'] = 'doesnotexist.invalid:443' - kms_providers_invalid['kmip']['endpoint'] = 'doesnotexist.local:5698' + kms_providers_invalid["azure"]["identityPlatformEndpoint"] = "doesnotexist.invalid:443" + kms_providers_invalid["gcp"]["endpoint"] = "doesnotexist.invalid:443" + kms_providers_invalid["kmip"]["endpoint"] = "doesnotexist.local:5698" self.client_encryption_invalid = ClientEncryption( kms_providers=kms_providers_invalid, - key_vault_namespace='keyvault.datakeys', + key_vault_namespace="keyvault.datakeys", key_vault_client=client_context.client, codec_options=OPTS, - kms_tls_options=KMS_TLS_OPTS) + kms_tls_options=KMS_TLS_OPTS, + ) self._kmip_host_error = None self._invalid_host_error = None @@ -1236,131 +1246,134 @@ def tearDown(self): self.client_encryption_invalid.close() def run_test_expected_success(self, provider_name, master_key): - data_key_id = self.client_encryption.create_data_key( - provider_name, master_key=master_key) + data_key_id = self.client_encryption.create_data_key(provider_name, master_key=master_key) encrypted = self.client_encryption.encrypt( - 'test', Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=data_key_id) - self.assertEqual('test', self.client_encryption.decrypt(encrypted)) + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=data_key_id + ) + self.assertEqual("test", self.client_encryption.decrypt(encrypted)) - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_01_aws_region_key(self): self.run_test_expected_success( - 'aws', - {"region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0")}) + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + }, + ) - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_02_aws_region_key_endpoint(self): self.run_test_expected_success( - 'aws', - {"region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "kms.us-east-1.amazonaws.com"}) - - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-1.amazonaws.com", + }, + ) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_03_aws_region_key_endpoint_port(self): self.run_test_expected_success( - 'aws', - {"region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "kms.us-east-1.amazonaws.com:443"}) - - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-1.amazonaws.com:443", + }, + ) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_04_aws_endpoint_invalid_port(self): master_key = { "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "kms.us-east-1.amazonaws.com:12345" + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-1.amazonaws.com:12345", } with self.assertRaises(EncryptionError) as ctx: - self.client_encryption.create_data_key( - 'aws', master_key=master_key) + self.client_encryption.create_data_key("aws", master_key=master_key) self.assertIsInstance(ctx.exception.cause, socket.error) - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_05_aws_endpoint_wrong_region(self): master_key = { "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "kms.us-east-2.amazonaws.com" + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-2.amazonaws.com", } # The full error should be something like: # "Credential should be scoped to a valid region, not 'us-east-1'" # but we only check for "us-east-1" to avoid breaking on slight # changes to AWS' error message. - with self.assertRaisesRegex(EncryptionError, 'us-east-1'): - self.client_encryption.create_data_key( - 'aws', master_key=master_key) + with self.assertRaisesRegex(EncryptionError, "us-east-1"): + self.client_encryption.create_data_key("aws", master_key=master_key) - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_06_aws_endpoint_invalid_host(self): master_key = { "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "doesnotexist.invalid" + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "doesnotexist.invalid", } with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): - self.client_encryption.create_data_key( - 'aws', master_key=master_key) + self.client_encryption.create_data_key("aws", master_key=master_key) - @unittest.skipUnless(any(AZURE_CREDS.values()), - 'Azure environment credentials are not set') + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") def test_07_azure(self): - master_key = {'keyVaultEndpoint': 'key-vault-csfle.vault.azure.net', - 'keyName': 'key-name-csfle'} - self.run_test_expected_success('azure', master_key) + master_key = { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + } + self.run_test_expected_success("azure", master_key) # The full error should be something like: # "[Errno 8] nodename nor servname provided, or not known" with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): - self.client_encryption_invalid.create_data_key( - 'azure', master_key=master_key) + self.client_encryption_invalid.create_data_key("azure", master_key=master_key) - @unittest.skipUnless(any(GCP_CREDS.values()), - 'GCP environment credentials are not set') + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") def test_08_gcp_valid_endpoint(self): master_key = { "projectId": "devprod-drivers", "location": "global", "keyRing": "key-ring-csfle", "keyName": "key-name-csfle", - "endpoint": "cloudkms.googleapis.com:443"} - self.run_test_expected_success('gcp', master_key) + "endpoint": "cloudkms.googleapis.com:443", + } + self.run_test_expected_success("gcp", master_key) # The full error should be something like: # "[Errno 8] nodename nor servname provided, or not known" with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): - self.client_encryption_invalid.create_data_key( - 'gcp', master_key=master_key) + self.client_encryption_invalid.create_data_key("gcp", master_key=master_key) - @unittest.skipUnless(any(GCP_CREDS.values()), - 'GCP environment credentials are not set') + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") def test_09_gcp_invalid_endpoint(self): master_key = { "projectId": "devprod-drivers", "location": "global", "keyRing": "key-ring-csfle", "keyName": "key-name-csfle", - "endpoint": "doesnotexist.invalid:443"} + "endpoint": "doesnotexist.invalid:443", + } # The full error should be something like: # "Invalid KMS response, no access_token returned. HTTP status=200" with self.assertRaisesRegex(EncryptionError, "Invalid KMS response"): - self.client_encryption.create_data_key( - 'gcp', master_key=master_key) + self.client_encryption.create_data_key("gcp", master_key=master_key) def dns_error(self, host, port): # The full error should be something like: @@ -1372,96 +1385,93 @@ def dns_error(self, host, port): @property def invalid_host_error(self): if self._invalid_host_error is None: - self._invalid_host_error = self.dns_error( - 'doesnotexist.invalid', 443) + self._invalid_host_error = self.dns_error("doesnotexist.invalid", 443) return self._invalid_host_error @property def kmip_host_error(self): if self._kmip_host_error is None: - self._kmip_host_error = self.dns_error('doesnotexist.local', 5698) + self._kmip_host_error = self.dns_error("doesnotexist.local", 5698) return self._kmip_host_error def test_10_kmip_invalid_endpoint(self): - key = {'keyId': '1'} - self.run_test_expected_success('kmip', key) + key = {"keyId": "1"} + self.run_test_expected_success("kmip", key) with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): - self.client_encryption_invalid.create_data_key('kmip', key) + self.client_encryption_invalid.create_data_key("kmip", key) def test_11_kmip_master_key_endpoint(self): - key = {'keyId': '1', 'endpoint': KMIP['endpoint']} - self.run_test_expected_success('kmip', key) + key = {"keyId": "1", "endpoint": KMIP["endpoint"]} + self.run_test_expected_success("kmip", key) # Override invalid endpoint: - data_key_id = self.client_encryption_invalid.create_data_key( - 'kmip', master_key=key) + data_key_id = self.client_encryption_invalid.create_data_key("kmip", master_key=key) encrypted = self.client_encryption_invalid.encrypt( - 'test', Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=data_key_id) - self.assertEqual( - 'test', self.client_encryption_invalid.decrypt(encrypted)) + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=data_key_id + ) + self.assertEqual("test", self.client_encryption_invalid.decrypt(encrypted)) def test_12_kmip_master_key_invalid_endpoint(self): - key = {'keyId': '1', 'endpoint': 'doesnotexist.local:5698'} + key = {"keyId": "1", "endpoint": "doesnotexist.local:5698"} with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): - self.client_encryption.create_data_key('kmip', key) + self.client_encryption.create_data_key("kmip", key) class AzureGCPEncryptionTestMixin(object): DEK = None KMS_PROVIDER_MAP = None - KEYVAULT_DB = 'keyvault' - KEYVAULT_COLL = 'datakeys' + KEYVAULT_DB = "keyvault" + KEYVAULT_COLL = "datakeys" client: MongoClient def setUp(self): - keyvault = self.client.get_database( - self.KEYVAULT_DB).get_collection( - self.KEYVAULT_COLL) + keyvault = self.client.get_database(self.KEYVAULT_DB).get_collection(self.KEYVAULT_COLL) create_key_vault(keyvault, self.DEK) def _test_explicit(self, expectation): client_encryption = ClientEncryption( self.KMS_PROVIDER_MAP, # type: ignore[arg-type] - '.'.join([self.KEYVAULT_DB, self.KEYVAULT_COLL]), + ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]), client_context.client, - OPTS) + OPTS, + ) self.addCleanup(client_encryption.close) ciphertext = client_encryption.encrypt( - 'string0', + "string0", algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=Binary.from_uuid(self.DEK['_id'], STANDARD)) + key_id=Binary.from_uuid(self.DEK["_id"], STANDARD), + ) self.assertEqual(bytes(ciphertext), base64.b64decode(expectation)) - self.assertEqual(client_encryption.decrypt(ciphertext), 'string0') + self.assertEqual(client_encryption.decrypt(ciphertext), "string0") def _test_automatic(self, expectation_extjson, payload): encrypted_db = "db" encrypted_coll = "coll" - keyvault_namespace = '.'.join([self.KEYVAULT_DB, self.KEYVAULT_COLL]) + keyvault_namespace = ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]) encryption_opts = AutoEncryptionOpts( self.KMS_PROVIDER_MAP, # type: ignore[arg-type] keyvault_namespace, - schema_map=self.SCHEMA_MAP) + schema_map=self.SCHEMA_MAP, + ) - insert_listener = AllowListEventListener('insert') + insert_listener = AllowListEventListener("insert") client = rs_or_single_client( - auto_encryption_opts=encryption_opts, - event_listeners=[insert_listener]) + auto_encryption_opts=encryption_opts, event_listeners=[insert_listener] + ) self.addCleanup(client.close) coll = client.get_database(encrypted_db).get_collection( - encrypted_coll, codec_options=OPTS, - write_concern=WriteConcern("majority")) + encrypted_coll, codec_options=OPTS, write_concern=WriteConcern("majority") + ) coll.drop() - expected_document = json_util.loads( - expectation_extjson, json_options=JSON_OPTS) + expected_document = json_util.loads(expectation_extjson, json_options=JSON_OPTS) coll.insert_one(payload) - event = insert_listener.results['started'][0] - inserted_doc = event.command['documents'][0] + event = insert_listener.results["started"][0] + inserted_doc = event.command["documents"][0] for key, value in expected_document.items(): self.assertEqual(value, inserted_doc[key]) @@ -1471,108 +1481,112 @@ def _test_automatic(self, expectation_extjson, payload): self.assertEqual(output_doc[key], value) -class TestAzureEncryption(AzureGCPEncryptionTestMixin, - EncryptionIntegrationTest): +class TestAzureEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): @classmethod - @unittest.skipUnless(any(AZURE_CREDS.values()), - 'Azure environment credentials are not set') + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") def setUpClass(cls): - cls.KMS_PROVIDER_MAP = {'azure': AZURE_CREDS} - cls.DEK = json_data(BASE, 'custom', 'azure-dek.json') - cls.SCHEMA_MAP = json_data(BASE, 'custom', 'azure-gcp-schema.json') + cls.KMS_PROVIDER_MAP = {"azure": AZURE_CREDS} + cls.DEK = json_data(BASE, "custom", "azure-dek.json") + cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") super(TestAzureEncryption, cls).setUpClass() def test_explicit(self): return self._test_explicit( - 'AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==') + "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==" + ) def test_automatic(self): - expected_document_extjson = textwrap.dedent(""" + expected_document_extjson = textwrap.dedent( + """ {"secret_azure": { "$binary": { "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", "subType": "06"} - }}""") - return self._test_automatic( - expected_document_extjson, {"secret_azure": "string0"}) + }}""" + ) + return self._test_automatic(expected_document_extjson, {"secret_azure": "string0"}) -class TestGCPEncryption(AzureGCPEncryptionTestMixin, - EncryptionIntegrationTest): +class TestGCPEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): @classmethod - @unittest.skipUnless(any(GCP_CREDS.values()), - 'GCP environment credentials are not set') + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") def setUpClass(cls): - cls.KMS_PROVIDER_MAP = {'gcp': GCP_CREDS} - cls.DEK = json_data(BASE, 'custom', 'gcp-dek.json') - cls.SCHEMA_MAP = json_data(BASE, 'custom', 'azure-gcp-schema.json') + cls.KMS_PROVIDER_MAP = {"gcp": GCP_CREDS} + cls.DEK = json_data(BASE, "custom", "gcp-dek.json") + cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") super(TestGCPEncryption, cls).setUpClass() def test_explicit(self): return self._test_explicit( - 'ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==') + "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==" + ) def test_automatic(self): - expected_document_extjson = textwrap.dedent(""" + expected_document_extjson = textwrap.dedent( + """ {"secret_gcp": { "$binary": { "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", "subType": "06"} - }}""") - return self._test_automatic( - expected_document_extjson, {"secret_gcp": "string0"}) + }}""" + ) + return self._test_automatic(expected_document_extjson, {"secret_gcp": "string0"}) # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#deadlock-tests class TestDeadlockProse(EncryptionIntegrationTest): def setUp(self): self.client_test = rs_or_single_client( - maxPoolSize=1, readConcernLevel='majority', w='majority', - uuidRepresentation='standard') + maxPoolSize=1, readConcernLevel="majority", w="majority", uuidRepresentation="standard" + ) self.addCleanup(self.client_test.close) self.client_keyvault_listener = OvertCommandListener() self.client_keyvault = rs_or_single_client( - maxPoolSize=1, readConcernLevel='majority', w='majority', - event_listeners=[self.client_keyvault_listener]) + maxPoolSize=1, + readConcernLevel="majority", + w="majority", + event_listeners=[self.client_keyvault_listener], + ) self.addCleanup(self.client_keyvault.close) self.client_test.keyvault.datakeys.drop() self.client_test.db.coll.drop() - self.client_test.keyvault.datakeys.insert_one( - json_data('external', 'external-key.json')) + self.client_test.keyvault.datakeys.insert_one(json_data("external", "external-key.json")) _ = self.client_test.db.create_collection( - 'coll', validator={'$jsonSchema': json_data( - 'external', 'external-schema.json')}, - codec_options=OPTS) + "coll", + validator={"$jsonSchema": json_data("external", "external-schema.json")}, + codec_options=OPTS, + ) client_encryption = ClientEncryption( - kms_providers={'local': {'key': LOCAL_MASTER_KEY}}, - key_vault_namespace='keyvault.datakeys', - key_vault_client=self.client_test, codec_options=OPTS) + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=self.client_test, + codec_options=OPTS, + ) self.ciphertext = client_encryption.encrypt( - 'string0', Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name='local') + "string0", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name="local" + ) client_encryption.close() self.client_listener = OvertCommandListener() self.topology_listener = TopologyEventListener() - self.optargs = ({'local': {'key': LOCAL_MASTER_KEY}}, 'keyvault.datakeys') + self.optargs = ({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") def _run_test(self, max_pool_size, auto_encryption_opts): client_encrypted = rs_or_single_client( - readConcernLevel='majority', - w='majority', + readConcernLevel="majority", + w="majority", maxPoolSize=max_pool_size, auto_encryption_opts=auto_encryption_opts, - event_listeners=[self.client_listener, self.topology_listener]) + event_listeners=[self.client_listener, self.topology_listener], + ) if auto_encryption_opts._bypass_auto_encryption == True: - self.client_test.db.coll.insert_one( - {"_id": 0, "encrypted": self.ciphertext}) + self.client_test.db.coll.insert_one({"_id": 0, "encrypted": self.ciphertext}) elif auto_encryption_opts._bypass_auto_encryption == False: - client_encrypted.db.coll.insert_one( - {"_id": 0, "encrypted": "string0"}) + client_encrypted.db.coll.insert_one({"_id": 0, "encrypted": "string0"}) else: raise RuntimeError("bypass_auto_encryption must be a bool") @@ -1582,162 +1596,170 @@ def _run_test(self, max_pool_size, auto_encryption_opts): self.addCleanup(client_encrypted.close) def test_case_1(self): - self._run_test(max_pool_size=1, - auto_encryption_opts=AutoEncryptionOpts( - *self.optargs, - bypass_auto_encryption=False, - key_vault_client=None)) + self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=None + ), + ) - cev = self.client_listener.results['started'] + cev = self.client_listener.results["started"] self.assertEqual(len(cev), 4) - self.assertEqual(cev[0].command_name, 'listCollections') - self.assertEqual(cev[0].database_name, 'db') - self.assertEqual(cev[1].command_name, 'find') - self.assertEqual(cev[1].database_name, 'keyvault') - self.assertEqual(cev[2].command_name, 'insert') - self.assertEqual(cev[2].database_name, 'db') - self.assertEqual(cev[3].command_name, 'find') - self.assertEqual(cev[3].database_name, 'db') + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") + self.assertEqual(cev[2].command_name, "insert") + self.assertEqual(cev[2].database_name, "db") + self.assertEqual(cev[3].command_name, "find") + self.assertEqual(cev[3].database_name, "db") - self.assertEqual(len(self.topology_listener.results['opened']), 2) + self.assertEqual(len(self.topology_listener.results["opened"]), 2) def test_case_2(self): - self._run_test(max_pool_size=1, - auto_encryption_opts=AutoEncryptionOpts( - *self.optargs, - bypass_auto_encryption=False, - key_vault_client=self.client_keyvault)) + self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=self.client_keyvault + ), + ) - cev = self.client_listener.results['started'] + cev = self.client_listener.results["started"] self.assertEqual(len(cev), 3) - self.assertEqual(cev[0].command_name, 'listCollections') - self.assertEqual(cev[0].database_name, 'db') - self.assertEqual(cev[1].command_name, 'insert') - self.assertEqual(cev[1].database_name, 'db') - self.assertEqual(cev[2].command_name, 'find') - self.assertEqual(cev[2].database_name, 'db') - - cev = self.client_keyvault_listener.results['started'] + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "insert") + self.assertEqual(cev[1].database_name, "db") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "db") + + cev = self.client_keyvault_listener.results["started"] self.assertEqual(len(cev), 1) - self.assertEqual(cev[0].command_name, 'find') - self.assertEqual(cev[0].database_name, 'keyvault') + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") - self.assertEqual(len(self.topology_listener.results['opened']), 2) + self.assertEqual(len(self.topology_listener.results["opened"]), 2) def test_case_3(self): - self._run_test(max_pool_size=1, - auto_encryption_opts=AutoEncryptionOpts( - *self.optargs, - bypass_auto_encryption=True, - key_vault_client=None)) + self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=None + ), + ) - cev = self.client_listener.results['started'] + cev = self.client_listener.results["started"] self.assertEqual(len(cev), 2) - self.assertEqual(cev[0].command_name, 'find') - self.assertEqual(cev[0].database_name, 'db') - self.assertEqual(cev[1].command_name, 'find') - self.assertEqual(cev[1].database_name, 'keyvault') + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") - self.assertEqual(len(self.topology_listener.results['opened']), 2) + self.assertEqual(len(self.topology_listener.results["opened"]), 2) def test_case_4(self): - self._run_test(max_pool_size=1, - auto_encryption_opts=AutoEncryptionOpts( - *self.optargs, - bypass_auto_encryption=True, - key_vault_client=self.client_keyvault)) + self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=self.client_keyvault + ), + ) - cev = self.client_listener.results['started'] + cev = self.client_listener.results["started"] self.assertEqual(len(cev), 1) - self.assertEqual(cev[0].command_name, 'find') - self.assertEqual(cev[0].database_name, 'db') + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") - cev = self.client_keyvault_listener.results['started'] + cev = self.client_keyvault_listener.results["started"] self.assertEqual(len(cev), 1) - self.assertEqual(cev[0].command_name, 'find') - self.assertEqual(cev[0].database_name, 'keyvault') + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") - self.assertEqual(len(self.topology_listener.results['opened']), 1) + self.assertEqual(len(self.topology_listener.results["opened"]), 1) def test_case_5(self): - self._run_test(max_pool_size=None, - auto_encryption_opts=AutoEncryptionOpts( - *self.optargs, - bypass_auto_encryption=False, - key_vault_client=None)) + self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=None + ), + ) - cev = self.client_listener.results['started'] + cev = self.client_listener.results["started"] self.assertEqual(len(cev), 5) - self.assertEqual(cev[0].command_name, 'listCollections') - self.assertEqual(cev[0].database_name, 'db') - self.assertEqual(cev[1].command_name, 'listCollections') - self.assertEqual(cev[1].database_name, 'keyvault') - self.assertEqual(cev[2].command_name, 'find') - self.assertEqual(cev[2].database_name, 'keyvault') - self.assertEqual(cev[3].command_name, 'insert') - self.assertEqual(cev[3].database_name, 'db') - self.assertEqual(cev[4].command_name, 'find') - self.assertEqual(cev[4].database_name, 'db') - - self.assertEqual(len(self.topology_listener.results['opened']), 1) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "listCollections") + self.assertEqual(cev[1].database_name, "keyvault") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "keyvault") + self.assertEqual(cev[3].command_name, "insert") + self.assertEqual(cev[3].database_name, "db") + self.assertEqual(cev[4].command_name, "find") + self.assertEqual(cev[4].database_name, "db") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) def test_case_6(self): - self._run_test(max_pool_size=None, - auto_encryption_opts=AutoEncryptionOpts( - *self.optargs, - bypass_auto_encryption=False, - key_vault_client=self.client_keyvault)) + self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=self.client_keyvault + ), + ) - cev = self.client_listener.results['started'] + cev = self.client_listener.results["started"] self.assertEqual(len(cev), 3) - self.assertEqual(cev[0].command_name, 'listCollections') - self.assertEqual(cev[0].database_name, 'db') - self.assertEqual(cev[1].command_name, 'insert') - self.assertEqual(cev[1].database_name, 'db') - self.assertEqual(cev[2].command_name, 'find') - self.assertEqual(cev[2].database_name, 'db') - - cev = self.client_keyvault_listener.results['started'] + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "insert") + self.assertEqual(cev[1].database_name, "db") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "db") + + cev = self.client_keyvault_listener.results["started"] self.assertEqual(len(cev), 1) - self.assertEqual(cev[0].command_name, 'find') - self.assertEqual(cev[0].database_name, 'keyvault') + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") - self.assertEqual(len(self.topology_listener.results['opened']), 1) + self.assertEqual(len(self.topology_listener.results["opened"]), 1) def test_case_7(self): - self._run_test(max_pool_size=None, - auto_encryption_opts=AutoEncryptionOpts( - *self.optargs, - bypass_auto_encryption=True, - key_vault_client=None)) + self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=None + ), + ) - cev = self.client_listener.results['started'] + cev = self.client_listener.results["started"] self.assertEqual(len(cev), 2) - self.assertEqual(cev[0].command_name, 'find') - self.assertEqual(cev[0].database_name, 'db') - self.assertEqual(cev[1].command_name, 'find') - self.assertEqual(cev[1].database_name, 'keyvault') + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") - self.assertEqual(len(self.topology_listener.results['opened']), 1) + self.assertEqual(len(self.topology_listener.results["opened"]), 1) def test_case_8(self): - self._run_test(max_pool_size=None, - auto_encryption_opts=AutoEncryptionOpts( - *self.optargs, - bypass_auto_encryption=True, - key_vault_client=self.client_keyvault)) + self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=self.client_keyvault + ), + ) - cev = self.client_listener.results['started'] + cev = self.client_listener.results["started"] self.assertEqual(len(cev), 1) - self.assertEqual(cev[0].command_name, 'find') - self.assertEqual(cev[0].database_name, 'db') + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") - cev = self.client_keyvault_listener.results['started'] + cev = self.client_keyvault_listener.results["started"] self.assertEqual(len(cev), 1) - self.assertEqual(cev[0].command_name, 'find') - self.assertEqual(cev[0].database_name, 'keyvault') + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") - self.assertEqual(len(self.topology_listener.results['opened']), 1) + self.assertEqual(len(self.topology_listener.results["opened"]), 1) # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#bypass-spawning-mongocryptd @@ -1746,220 +1768,210 @@ def test_mongocryptd_bypass_spawn(self): # Lower the mongocryptd timeout to reduce the test run time. self._original_timeout = encryption._MONGOCRYPTD_TIMEOUT_MS encryption._MONGOCRYPTD_TIMEOUT_MS = 500 + def reset_timeout(): encryption._MONGOCRYPTD_TIMEOUT_MS = self._original_timeout + self.addCleanup(reset_timeout) # Configure the encrypted field via the local schema_map option. - schemas = {'db.coll': json_data('external', 'external-schema.json')} + schemas = {"db.coll": json_data("external", "external-schema.json")} opts = AutoEncryptionOpts( - {'local': {'key': LOCAL_MASTER_KEY}}, - 'keyvault.datakeys', + {"local": {"key": LOCAL_MASTER_KEY}}, + "keyvault.datakeys", schema_map=schemas, mongocryptd_bypass_spawn=True, - mongocryptd_uri='mongodb://localhost:27027/', + mongocryptd_uri="mongodb://localhost:27027/", mongocryptd_spawn_args=[ - '--pidfilepath=bypass-spawning-mongocryptd.pid', - '--port=27027'] + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=27027", + ], ) client_encrypted = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client_encrypted.close) - with self.assertRaisesRegex(EncryptionError, 'Timeout'): - client_encrypted.db.coll.insert_one({'encrypted': 'test'}) + with self.assertRaisesRegex(EncryptionError, "Timeout"): + client_encrypted.db.coll.insert_one({"encrypted": "test"}) def test_bypassAutoEncryption(self): opts = AutoEncryptionOpts( - {'local': {'key': LOCAL_MASTER_KEY}}, - 'keyvault.datakeys', + {"local": {"key": LOCAL_MASTER_KEY}}, + "keyvault.datakeys", bypass_auto_encryption=True, mongocryptd_spawn_args=[ - '--pidfilepath=bypass-spawning-mongocryptd.pid', - '--port=27027'] + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=27027", + ], ) client_encrypted = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client_encrypted.close) client_encrypted.db.coll.insert_one({"unencrypted": "test"}) # Validate that mongocryptd was not spawned: - mongocryptd_client = MongoClient( - 'mongodb://localhost:27027/?serverSelectionTimeoutMS=500') + mongocryptd_client = MongoClient("mongodb://localhost:27027/?serverSelectionTimeoutMS=500") with self.assertRaises(ServerSelectionTimeoutError): - mongocryptd_client.admin.command('ping') + mongocryptd_client.admin.command("ping") # https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#kms-tls-tests class TestKmsTLSProse(EncryptionIntegrationTest): - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def setUp(self): super(TestKmsTLSProse, self).setUp() self.patch_system_certs(CA_PEM) self.client_encrypted = ClientEncryption( - {'aws': AWS_CREDS}, 'keyvault.datakeys', self.client, OPTS) + {"aws": AWS_CREDS}, "keyvault.datakeys", self.client, OPTS + ) self.addCleanup(self.client_encrypted.close) def test_invalid_kms_certificate_expired(self): key = { - "region": "us-east-1", - "key": "arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - "endpoint": "mongodb://127.0.0.1:8000", + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "mongodb://127.0.0.1:8000", } # Some examples: # certificate verify failed: certificate has expired (_ssl.c:1129) # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) - with self.assertRaisesRegex( - EncryptionError, 'expired|certificate verify failed'): - self.client_encrypted.create_data_key('aws', master_key=key) + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encrypted.create_data_key("aws", master_key=key) def test_invalid_hostname_in_kms_certificate(self): key = { - "region": "us-east-1", - "key": "arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - "endpoint": "mongodb://127.0.0.1:8001", + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "mongodb://127.0.0.1:8001", } # Some examples: # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" # hostname '127.0.0.1' doesn't match 'wronghost.com' - with self.assertRaisesRegex( - EncryptionError, 'IP address mismatch|wronghost'): - self.client_encrypted.create_data_key('aws', master_key=key) + with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + self.client_encrypted.create_data_key("aws", master_key=key) # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#kms-tls-options-tests class TestKmsTLSOptions(EncryptionIntegrationTest): - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def setUp(self): super(TestKmsTLSOptions, self).setUp() # 1, create client with only tlsCAFile. providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) - providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8002' - providers['gcp']['endpoint'] = '127.0.0.1:8002' + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:8002" + providers["gcp"]["endpoint"] = "127.0.0.1:8002" kms_tls_opts_ca_only = { - 'aws': {'tlsCAFile': CA_PEM}, - 'azure': {'tlsCAFile': CA_PEM}, - 'gcp': {'tlsCAFile': CA_PEM}, - 'kmip': {'tlsCAFile': CA_PEM}, + "aws": {"tlsCAFile": CA_PEM}, + "azure": {"tlsCAFile": CA_PEM}, + "gcp": {"tlsCAFile": CA_PEM}, + "kmip": {"tlsCAFile": CA_PEM}, } self.client_encryption_no_client_cert = ClientEncryption( - providers, 'keyvault.datakeys', self.client, OPTS, - kms_tls_options=kms_tls_opts_ca_only) + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) self.addCleanup(self.client_encryption_no_client_cert.close) # 2, same providers as above but with tlsCertificateKeyFile. kms_tls_opts = copy.deepcopy(kms_tls_opts_ca_only) for p in kms_tls_opts: - kms_tls_opts[p]['tlsCertificateKeyFile'] = CLIENT_PEM + kms_tls_opts[p]["tlsCertificateKeyFile"] = CLIENT_PEM self.client_encryption_with_tls = ClientEncryption( - providers, 'keyvault.datakeys', self.client, OPTS, - kms_tls_options=kms_tls_opts) + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts + ) self.addCleanup(self.client_encryption_with_tls.close) # 3, update endpoints to expired host. providers: dict = copy.deepcopy(providers) - providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8000' - providers['gcp']['endpoint'] = '127.0.0.1:8000' - providers['kmip']['endpoint'] = '127.0.0.1:8000' + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:8000" + providers["gcp"]["endpoint"] = "127.0.0.1:8000" + providers["kmip"]["endpoint"] = "127.0.0.1:8000" self.client_encryption_expired = ClientEncryption( - providers, 'keyvault.datakeys', self.client, OPTS, - kms_tls_options=kms_tls_opts_ca_only) + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) self.addCleanup(self.client_encryption_expired.close) # 3, update endpoints to invalid host. providers: dict = copy.deepcopy(providers) - providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8001' - providers['gcp']['endpoint'] = '127.0.0.1:8001' - providers['kmip']['endpoint'] = '127.0.0.1:8001' + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:8001" + providers["gcp"]["endpoint"] = "127.0.0.1:8001" + providers["kmip"]["endpoint"] = "127.0.0.1:8001" self.client_encryption_invalid_hostname = ClientEncryption( - providers, 'keyvault.datakeys', self.client, OPTS, - kms_tls_options=kms_tls_opts_ca_only) + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) self.addCleanup(self.client_encryption_invalid_hostname.close) # Errors when client has no cert, some examples: # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) - self.cert_error = ('certificate required|SSL handshake failed|' - 'KMS connection closed|Connection reset by peer') + self.cert_error = ( + "certificate required|SSL handshake failed|" + "KMS connection closed|Connection reset by peer" + ) # On Python 3.10+ this error might be: # EOF occurred in violation of protocol (_ssl.c:2384) if sys.version_info[:2] >= (3, 10): - self.cert_error += '|EOF' + self.cert_error += "|EOF" # On Windows this error might be: # [WinError 10054] An existing connection was forcibly closed by the remote host - if sys.platform == 'win32': - self.cert_error += '|forcibly closed' + if sys.platform == "win32": + self.cert_error += "|forcibly closed" def test_01_aws(self): key = { - 'region': 'us-east-1', - 'key': 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0', - 'endpoint': '127.0.0.1:8002', + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "127.0.0.1:8002", } with self.assertRaisesRegex(EncryptionError, self.cert_error): - self.client_encryption_no_client_cert.create_data_key('aws', key) + self.client_encryption_no_client_cert.create_data_key("aws", key) # "parse error" here means that the TLS handshake succeeded. - with self.assertRaisesRegex(EncryptionError, 'parse error'): - self.client_encryption_with_tls.create_data_key('aws', key) + with self.assertRaisesRegex(EncryptionError, "parse error"): + self.client_encryption_with_tls.create_data_key("aws", key) # Some examples: # certificate verify failed: certificate has expired (_ssl.c:1129) # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) - key['endpoint'] = '127.0.0.1:8000' - with self.assertRaisesRegex( - EncryptionError, 'expired|certificate verify failed'): - self.client_encryption_expired.create_data_key('aws', key) + key["endpoint"] = "127.0.0.1:8000" + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encryption_expired.create_data_key("aws", key) # Some examples: # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" # hostname '127.0.0.1' doesn't match 'wronghost.com' - key['endpoint'] = '127.0.0.1:8001' - with self.assertRaisesRegex( - EncryptionError, 'IP address mismatch|wronghost'): - self.client_encryption_invalid_hostname.create_data_key('aws', key) + key["endpoint"] = "127.0.0.1:8001" + with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + self.client_encryption_invalid_hostname.create_data_key("aws", key) def test_02_azure(self): - key = {'keyVaultEndpoint': 'doesnotexist.local', 'keyName': 'foo'} + key = {"keyVaultEndpoint": "doesnotexist.local", "keyName": "foo"} # Missing client cert error. with self.assertRaisesRegex(EncryptionError, self.cert_error): - self.client_encryption_no_client_cert.create_data_key('azure', key) + self.client_encryption_no_client_cert.create_data_key("azure", key) # "HTTP status=404" here means that the TLS handshake succeeded. - with self.assertRaisesRegex(EncryptionError, 'HTTP status=404'): - self.client_encryption_with_tls.create_data_key('azure', key) + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + self.client_encryption_with_tls.create_data_key("azure", key) # Expired cert error. - with self.assertRaisesRegex( - EncryptionError, 'expired|certificate verify failed'): - self.client_encryption_expired.create_data_key('azure', key) + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encryption_expired.create_data_key("azure", key) # Invalid cert hostname error. - with self.assertRaisesRegex( - EncryptionError, 'IP address mismatch|wronghost'): - self.client_encryption_invalid_hostname.create_data_key( - 'azure', key) + with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + self.client_encryption_invalid_hostname.create_data_key("azure", key) def test_03_gcp(self): - key = {'projectId': 'foo', 'location': 'bar', 'keyRing': 'baz', - 'keyName': 'foo'} + key = {"projectId": "foo", "location": "bar", "keyRing": "baz", "keyName": "foo"} # Missing client cert error. with self.assertRaisesRegex(EncryptionError, self.cert_error): - self.client_encryption_no_client_cert.create_data_key('gcp', key) + self.client_encryption_no_client_cert.create_data_key("gcp", key) # "HTTP status=404" here means that the TLS handshake succeeded. - with self.assertRaisesRegex(EncryptionError, 'HTTP status=404'): - self.client_encryption_with_tls.create_data_key('gcp', key) + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + self.client_encryption_with_tls.create_data_key("gcp", key) # Expired cert error. - with self.assertRaisesRegex( - EncryptionError, 'expired|certificate verify failed'): - self.client_encryption_expired.create_data_key('gcp', key) + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encryption_expired.create_data_key("gcp", key) # Invalid cert hostname error. - with self.assertRaisesRegex( - EncryptionError, 'IP address mismatch|wronghost'): - self.client_encryption_invalid_hostname.create_data_key('gcp', key) + with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + self.client_encryption_invalid_hostname.create_data_key("gcp", key) def test_04_kmip(self): # Missing client cert error. with self.assertRaisesRegex(EncryptionError, self.cert_error): - self.client_encryption_no_client_cert.create_data_key('kmip') - self.client_encryption_with_tls.create_data_key('kmip') + self.client_encryption_no_client_cert.create_data_key("kmip") + self.client_encryption_with_tls.create_data_key("kmip") # Expired cert error. - with self.assertRaisesRegex( - EncryptionError, 'expired|certificate verify failed'): - self.client_encryption_expired.create_data_key('kmip') + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encryption_expired.create_data_key("kmip") # Invalid cert hostname error. - with self.assertRaisesRegex( - EncryptionError, 'IP address mismatch|wronghost'): - self.client_encryption_invalid_hostname.create_data_key('kmip') + with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + self.client_encryption_invalid_hostname.create_data_key("kmip") if __name__ == "__main__": diff --git a/test/test_errors.py b/test/test_errors.py index 53c55f8167..8a225b6548 100644 --- a/test/test_errors.py +++ b/test/test_errors.py @@ -18,12 +18,14 @@ sys.path[0:0] = [""] -from pymongo.errors import (BulkWriteError, - EncryptionError, - NotPrimaryError, - OperationFailure) -from test import (PyMongoTestCase, - unittest) +from test import PyMongoTestCase, unittest + +from pymongo.errors import ( + BulkWriteError, + EncryptionError, + NotPrimaryError, + OperationFailure, +) class TestErrors(PyMongoTestCase): @@ -36,8 +38,7 @@ def test_not_primary_error(self): self.assertIn("full error", traceback.format_exc()) def test_operation_failure(self): - exc = OperationFailure("operation failure test", 10, - {"errmsg": "error"}) + exc = OperationFailure("operation failure test", 10, {"errmsg": "error"}) self.assertIn("full error", str(exc)) try: raise exc @@ -45,26 +46,26 @@ def test_operation_failure(self): self.assertIn("full error", traceback.format_exc()) def _test_unicode_strs(self, exc): - if sys.implementation.name == 'pypy' and sys.implementation.version < (7, 3, 7): + if sys.implementation.name == "pypy" and sys.implementation.version < (7, 3, 7): # PyPy used to display unicode in repr differently. - self.assertEqual("unicode \U0001f40d, full error: {" - "'errmsg': 'unicode \\U0001f40d'}", str(exc)) + self.assertEqual( + "unicode \U0001f40d, full error: {" "'errmsg': 'unicode \\U0001f40d'}", str(exc) + ) else: - self.assertEqual("unicode \U0001f40d, full error: {" - "'errmsg': 'unicode \U0001f40d'}", str(exc)) + self.assertEqual( + "unicode \U0001f40d, full error: {" "'errmsg': 'unicode \U0001f40d'}", str(exc) + ) try: raise exc except Exception: self.assertIn("full error", traceback.format_exc()) def test_unicode_strs_operation_failure(self): - exc = OperationFailure('unicode \U0001f40d', 10, - {"errmsg": 'unicode \U0001f40d'}) + exc = OperationFailure("unicode \U0001f40d", 10, {"errmsg": "unicode \U0001f40d"}) self._test_unicode_strs(exc) def test_unicode_strs_not_primary_error(self): - exc = NotPrimaryError('unicode \U0001f40d', - {"errmsg": 'unicode \U0001f40d'}) + exc = NotPrimaryError("unicode \U0001f40d", {"errmsg": "unicode \U0001f40d"}) self._test_unicode_strs(exc) def assertPyMongoErrorEqual(self, exc1, exc2): @@ -84,7 +85,7 @@ def test_pickle_NotPrimaryError(self): self.assertPyMongoErrorEqual(exc, pickle.loads(pickle.dumps(exc))) def test_pickle_OperationFailure(self): - exc = OperationFailure('error', code=5, details={}, max_wire_version=7) + exc = OperationFailure("error", code=5, details={}, max_wire_version=7) self.assertOperationFailureEqual(exc, pickle.loads(pickle.dumps(exc))) def test_pickle_BulkWriteError(self): @@ -93,8 +94,7 @@ def test_pickle_BulkWriteError(self): self.assertIn("batch op errors occurred", str(exc)) def test_pickle_EncryptionError(self): - cause = OperationFailure('error', code=5, details={}, - max_wire_version=7) + cause = OperationFailure("error", code=5, details={}, max_wire_version=7) exc = EncryptionError(cause) exc2 = pickle.loads(pickle.dumps(exc)) self.assertPyMongoErrorEqual(exc, exc2) diff --git a/test/test_examples.py b/test/test_examples.py index ed12c8bcc1..7354ac5be2 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -20,6 +20,9 @@ sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import rs_client + import pymongo from pymongo.errors import ConnectionFailure, OperationFailure from pymongo.read_concern import ReadConcern @@ -27,9 +30,6 @@ from pymongo.server_api import ServerApi from pymongo.write_concern import WriteConcern -from test import client_context, unittest, IntegrationTest -from test.utils import rs_client - class TestSampleShellCommands(IntegrationTest): @classmethod @@ -51,10 +51,13 @@ def test_first_three_examples(self): # Start Example 1 db.inventory.insert_one( - {"item": "canvas", - "qty": 100, - "tags": ["cotton"], - "size": {"h": 28, "w": 35.5, "uom": "cm"}}) + { + "item": "canvas", + "qty": 100, + "tags": ["cotton"], + "size": {"h": 28, "w": 35.5, "uom": "cm"}, + } + ) # End Example 1 self.assertEqual(db.inventory.count_documents({}), 1) @@ -66,19 +69,28 @@ def test_first_three_examples(self): self.assertEqual(len(list(cursor)), 1) # Start Example 3 - db.inventory.insert_many([ - {"item": "journal", - "qty": 25, - "tags": ["blank", "red"], - "size": {"h": 14, "w": 21, "uom": "cm"}}, - {"item": "mat", - "qty": 85, - "tags": ["gray"], - "size": {"h": 27.9, "w": 35.5, "uom": "cm"}}, - {"item": "mousepad", - "qty": 25, - "tags": ["gel", "blue"], - "size": {"h": 19, "w": 22.85, "uom": "cm"}}]) + db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "tags": ["blank", "red"], + "size": {"h": 14, "w": 21, "uom": "cm"}, + }, + { + "item": "mat", + "qty": 85, + "tags": ["gray"], + "size": {"h": 27.9, "w": 35.5, "uom": "cm"}, + }, + { + "item": "mousepad", + "qty": 25, + "tags": ["gel", "blue"], + "size": {"h": 19, "w": 22.85, "uom": "cm"}, + }, + ] + ) # End Example 3 self.assertEqual(db.inventory.count_documents({}), 4) @@ -87,26 +99,40 @@ def test_query_top_level_fields(self): db = self.db # Start Example 6 - db.inventory.insert_many([ - {"item": "journal", - "qty": 25, - "size": {"h": 14, "w": 21, "uom": "cm"}, - "status": "A"}, - {"item": "notebook", - "qty": 50, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "A"}, - {"item": "paper", - "qty": 100, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "D"}, - {"item": "planner", - "qty": 75, "size": {"h": 22.85, "w": 30, "uom": "cm"}, - "status": "D"}, - {"item": "postcard", - "qty": 45, - "size": {"h": 10, "w": 15.25, "uom": "cm"}, - "status": "A"}]) + db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "A", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + ] + ) # End Example 6 self.assertEqual(db.inventory.count_documents({}), 5) @@ -136,16 +162,15 @@ def test_query_top_level_fields(self): self.assertEqual(len(list(cursor)), 1) # Start Example 12 - cursor = db.inventory.find( - {"$or": [{"status": "A"}, {"qty": {"$lt": 30}}]}) + cursor = db.inventory.find({"$or": [{"status": "A"}, {"qty": {"$lt": 30}}]}) # End Example 12 self.assertEqual(len(list(cursor)), 3) # Start Example 13 - cursor = db.inventory.find({ - "status": "A", - "$or": [{"qty": {"$lt": 30}}, {"item": {"$regex": "^p"}}]}) + cursor = db.inventory.find( + {"status": "A", "$or": [{"qty": {"$lt": 30}}, {"item": {"$regex": "^p"}}]} + ) # End Example 13 self.assertEqual(len(list(cursor)), 2) @@ -157,39 +182,51 @@ def test_query_embedded_documents(self): # Subdocument key order matters in a few of these examples so we have # to use bson.son.SON instead of a Python dict. from bson.son import SON - db.inventory.insert_many([ - {"item": "journal", - "qty": 25, - "size": SON([("h", 14), ("w", 21), ("uom", "cm")]), - "status": "A"}, - {"item": "notebook", - "qty": 50, - "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), - "status": "A"}, - {"item": "paper", - "qty": 100, - "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), - "status": "D"}, - {"item": "planner", - "qty": 75, - "size": SON([("h", 22.85), ("w", 30), ("uom", "cm")]), - "status": "D"}, - {"item": "postcard", - "qty": 45, - "size": SON([("h", 10), ("w", 15.25), ("uom", "cm")]), - "status": "A"}]) + + db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": SON([("h", 14), ("w", 21), ("uom", "cm")]), + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), + "status": "A", + }, + { + "item": "paper", + "qty": 100, + "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": SON([("h", 22.85), ("w", 30), ("uom", "cm")]), + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": SON([("h", 10), ("w", 15.25), ("uom", "cm")]), + "status": "A", + }, + ] + ) # End Example 14 # Start Example 15 - cursor = db.inventory.find( - {"size": SON([("h", 14), ("w", 21), ("uom", "cm")])}) + cursor = db.inventory.find({"size": SON([("h", 14), ("w", 21), ("uom", "cm")])}) # End Example 15 self.assertEqual(len(list(cursor)), 1) # Start Example 16 - cursor = db.inventory.find( - {"size": SON([("w", 21), ("h", 14), ("uom", "cm")])}) + cursor = db.inventory.find({"size": SON([("w", 21), ("h", 14), ("uom", "cm")])}) # End Example 16 self.assertEqual(len(list(cursor)), 0) @@ -207,8 +244,7 @@ def test_query_embedded_documents(self): self.assertEqual(len(list(cursor)), 4) # Start Example 19 - cursor = db.inventory.find( - {"size.h": {"$lt": 15}, "size.uom": "in", "status": "D"}) + cursor = db.inventory.find({"size.h": {"$lt": 15}, "size.uom": "in", "status": "D"}) # End Example 19 self.assertEqual(len(list(cursor)), 1) @@ -217,27 +253,20 @@ def test_query_arrays(self): db = self.db # Start Example 20 - db.inventory.insert_many([ - {"item": "journal", - "qty": 25, - "tags": ["blank", "red"], - "dim_cm": [14, 21]}, - {"item": "notebook", - "qty": 50, - "tags": ["red", "blank"], - "dim_cm": [14, 21]}, - {"item": "paper", - "qty": 100, - "tags": ["red", "blank", "plain"], - "dim_cm": [14, 21]}, - {"item": "planner", - "qty": 75, - "tags": ["blank", "red"], - "dim_cm": [22.85, 30]}, - {"item": "postcard", - "qty": 45, - "tags": ["blue"], - "dim_cm": [10, 15.25]}]) + db.inventory.insert_many( + [ + {"item": "journal", "qty": 25, "tags": ["blank", "red"], "dim_cm": [14, 21]}, + {"item": "notebook", "qty": 50, "tags": ["red", "blank"], "dim_cm": [14, 21]}, + { + "item": "paper", + "qty": 100, + "tags": ["red", "blank", "plain"], + "dim_cm": [14, 21], + }, + {"item": "planner", "qty": 75, "tags": ["blank", "red"], "dim_cm": [22.85, 30]}, + {"item": "postcard", "qty": 45, "tags": ["blue"], "dim_cm": [10, 15.25]}, + ] + ) # End Example 20 # Start Example 21 @@ -271,8 +300,7 @@ def test_query_arrays(self): self.assertEqual(len(list(cursor)), 4) # Start Example 26 - cursor = db.inventory.find( - {"dim_cm": {"$elemMatch": {"$gt": 22, "$lt": 30}}}) + cursor = db.inventory.find({"dim_cm": {"$elemMatch": {"$gt": 22, "$lt": 30}}}) # End Example 26 self.assertEqual(len(list(cursor)), 1) @@ -296,64 +324,74 @@ def test_query_array_of_documents(self): # Subdocument key order matters in a few of these examples so we have # to use bson.son.SON instead of a Python dict. from bson.son import SON - db.inventory.insert_many([ - {"item": "journal", - "instock": [ - SON([("warehouse", "A"), ("qty", 5)]), - SON([("warehouse", "C"), ("qty", 15)])]}, - {"item": "notebook", - "instock": [ - SON([("warehouse", "C"), ("qty", 5)])]}, - {"item": "paper", - "instock": [ - SON([("warehouse", "A"), ("qty", 60)]), - SON([("warehouse", "B"), ("qty", 15)])]}, - {"item": "planner", - "instock": [ - SON([("warehouse", "A"), ("qty", 40)]), - SON([("warehouse", "B"), ("qty", 5)])]}, - {"item": "postcard", - "instock": [ - SON([("warehouse", "B"), ("qty", 15)]), - SON([("warehouse", "C"), ("qty", 35)])]}]) + + db.inventory.insert_many( + [ + { + "item": "journal", + "instock": [ + SON([("warehouse", "A"), ("qty", 5)]), + SON([("warehouse", "C"), ("qty", 15)]), + ], + }, + {"item": "notebook", "instock": [SON([("warehouse", "C"), ("qty", 5)])]}, + { + "item": "paper", + "instock": [ + SON([("warehouse", "A"), ("qty", 60)]), + SON([("warehouse", "B"), ("qty", 15)]), + ], + }, + { + "item": "planner", + "instock": [ + SON([("warehouse", "A"), ("qty", 40)]), + SON([("warehouse", "B"), ("qty", 5)]), + ], + }, + { + "item": "postcard", + "instock": [ + SON([("warehouse", "B"), ("qty", 15)]), + SON([("warehouse", "C"), ("qty", 35)]), + ], + }, + ] + ) # End Example 29 # Start Example 30 - cursor = db.inventory.find( - {"instock": SON([("warehouse", "A"), ("qty", 5)])}) + cursor = db.inventory.find({"instock": SON([("warehouse", "A"), ("qty", 5)])}) # End Example 30 self.assertEqual(len(list(cursor)), 1) # Start Example 31 - cursor = db.inventory.find( - {"instock": SON([("qty", 5), ("warehouse", "A")])}) + cursor = db.inventory.find({"instock": SON([("qty", 5), ("warehouse", "A")])}) # End Example 31 self.assertEqual(len(list(cursor)), 0) # Start Example 32 - cursor = db.inventory.find({'instock.0.qty': {"$lte": 20}}) + cursor = db.inventory.find({"instock.0.qty": {"$lte": 20}}) # End Example 32 self.assertEqual(len(list(cursor)), 3) # Start Example 33 - cursor = db.inventory.find({'instock.qty': {"$lte": 20}}) + cursor = db.inventory.find({"instock.qty": {"$lte": 20}}) # End Example 33 self.assertEqual(len(list(cursor)), 5) # Start Example 34 - cursor = db.inventory.find( - {"instock": {"$elemMatch": {"qty": 5, "warehouse": "A"}}}) + cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": 5, "warehouse": "A"}}}) # End Example 34 self.assertEqual(len(list(cursor)), 1) # Start Example 35 - cursor = db.inventory.find( - {"instock": {"$elemMatch": {"qty": {"$gt": 10, "$lte": 20}}}}) + cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": {"$gt": 10, "$lte": 20}}}}) # End Example 35 self.assertEqual(len(list(cursor)), 3) @@ -365,8 +403,7 @@ def test_query_array_of_documents(self): self.assertEqual(len(list(cursor)), 4) # Start Example 37 - cursor = db.inventory.find( - {"instock.qty": 5, "instock.warehouse": "A"}) + cursor = db.inventory.find({"instock.qty": 5, "instock.warehouse": "A"}) # End Example 37 self.assertEqual(len(list(cursor)), 2) @@ -400,29 +437,40 @@ def test_projection(self): db = self.db # Start Example 42 - db.inventory.insert_many([ - {"item": "journal", - "status": "A", - "size": {"h": 14, "w": 21, "uom": "cm"}, - "instock": [{"warehouse": "A", "qty": 5}]}, - {"item": "notebook", - "status": "A", - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "instock": [{"warehouse": "C", "qty": 5}]}, - {"item": "paper", - "status": "D", - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "instock": [{"warehouse": "A", "qty": 60}]}, - {"item": "planner", - "status": "D", - "size": {"h": 22.85, "w": 30, "uom": "cm"}, - "instock": [{"warehouse": "A", "qty": 40}]}, - {"item": "postcard", - "status": "A", - "size": {"h": 10, "w": 15.25, "uom": "cm"}, - "instock": [ - {"warehouse": "B", "qty": 15}, - {"warehouse": "C", "qty": 35}]}]) + db.inventory.insert_many( + [ + { + "item": "journal", + "status": "A", + "size": {"h": 14, "w": 21, "uom": "cm"}, + "instock": [{"warehouse": "A", "qty": 5}], + }, + { + "item": "notebook", + "status": "A", + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "instock": [{"warehouse": "C", "qty": 5}], + }, + { + "item": "paper", + "status": "D", + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "instock": [{"warehouse": "A", "qty": 60}], + }, + { + "item": "planner", + "status": "D", + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "instock": [{"warehouse": "A", "qty": 40}], + }, + { + "item": "postcard", + "status": "A", + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "instock": [{"warehouse": "B", "qty": 15}, {"warehouse": "C", "qty": 35}], + }, + ] + ) # End Example 42 # Start Example 43 @@ -432,8 +480,7 @@ def test_projection(self): self.assertEqual(len(list(cursor)), 3) # Start Example 44 - cursor = db.inventory.find( - {"status": "A"}, {"item": 1, "status": 1}) + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1}) # End Example 44 for doc in cursor: @@ -444,8 +491,7 @@ def test_projection(self): self.assertFalse("instock" in doc) # Start Example 45 - cursor = db.inventory.find( - {"status": "A"}, {"item": 1, "status": 1, "_id": 0}) + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "_id": 0}) # End Example 45 for doc in cursor: @@ -456,8 +502,7 @@ def test_projection(self): self.assertFalse("instock" in doc) # Start Example 46 - cursor = db.inventory.find( - {"status": "A"}, {"status": 0, "instock": 0}) + cursor = db.inventory.find({"status": "A"}, {"status": 0, "instock": 0}) # End Example 46 for doc in cursor: @@ -468,8 +513,7 @@ def test_projection(self): self.assertFalse("instock" in doc) # Start Example 47 - cursor = db.inventory.find( - {"status": "A"}, {"item": 1, "status": 1, "size.uom": 1}) + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "size.uom": 1}) # End Example 47 for doc in cursor: @@ -478,10 +522,10 @@ def test_projection(self): self.assertTrue("status" in doc) self.assertTrue("size" in doc) self.assertFalse("instock" in doc) - size = doc['size'] - self.assertTrue('uom' in size) - self.assertFalse('h' in size) - self.assertFalse('w' in size) + size = doc["size"] + self.assertTrue("uom" in size) + self.assertFalse("h" in size) + self.assertFalse("w" in size) # Start Example 48 cursor = db.inventory.find({"status": "A"}, {"size.uom": 0}) @@ -493,14 +537,13 @@ def test_projection(self): self.assertTrue("status" in doc) self.assertTrue("size" in doc) self.assertTrue("instock" in doc) - size = doc['size'] - self.assertFalse('uom' in size) - self.assertTrue('h' in size) - self.assertTrue('w' in size) + size = doc["size"] + self.assertFalse("uom" in size) + self.assertTrue("h" in size) + self.assertTrue("w" in size) # Start Example 49 - cursor = db.inventory.find( - {"status": "A"}, {"item": 1, "status": 1, "instock.qty": 1}) + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "instock.qty": 1}) # End Example 49 for doc in cursor: @@ -509,14 +552,14 @@ def test_projection(self): self.assertTrue("status" in doc) self.assertFalse("size" in doc) self.assertTrue("instock" in doc) - for subdoc in doc['instock']: - self.assertFalse('warehouse' in subdoc) - self.assertTrue('qty' in subdoc) + for subdoc in doc["instock"]: + self.assertFalse("warehouse" in subdoc) + self.assertTrue("qty" in subdoc) # Start Example 50 cursor = db.inventory.find( - {"status": "A"}, - {"item": 1, "status": 1, "instock": {"$slice": -1}}) + {"status": "A"}, {"item": 1, "status": 1, "instock": {"$slice": -1}} + ) # End Example 50 for doc in cursor: @@ -531,54 +574,77 @@ def test_update_and_replace(self): db = self.db # Start Example 51 - db.inventory.insert_many([ - {"item": "canvas", - "qty": 100, - "size": {"h": 28, "w": 35.5, "uom": "cm"}, - "status": "A"}, - {"item": "journal", - "qty": 25, - "size": {"h": 14, "w": 21, "uom": "cm"}, - "status": "A"}, - {"item": "mat", - "qty": 85, - "size": {"h": 27.9, "w": 35.5, "uom": "cm"}, - "status": "A"}, - {"item": "mousepad", - "qty": 25, - "size": {"h": 19, "w": 22.85, "uom": "cm"}, - "status": "P"}, - {"item": "notebook", - "qty": 50, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "P"}, - {"item": "paper", - "qty": 100, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "D"}, - {"item": "planner", - "qty": 75, - "size": {"h": 22.85, "w": 30, "uom": "cm"}, - "status": "D"}, - {"item": "postcard", - "qty": 45, - "size": {"h": 10, "w": 15.25, "uom": "cm"}, - "status": "A"}, - {"item": "sketchbook", - "qty": 80, - "size": {"h": 14, "w": 21, "uom": "cm"}, - "status": "A"}, - {"item": "sketch pad", - "qty": 95, - "size": {"h": 22.85, "w": 30.5, "uom": "cm"}, - "status": "A"}]) + db.inventory.insert_many( + [ + { + "item": "canvas", + "qty": 100, + "size": {"h": 28, "w": 35.5, "uom": "cm"}, + "status": "A", + }, + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "mat", + "qty": 85, + "size": {"h": 27.9, "w": 35.5, "uom": "cm"}, + "status": "A", + }, + { + "item": "mousepad", + "qty": 25, + "size": {"h": 19, "w": 22.85, "uom": "cm"}, + "status": "P", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "P", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + { + "item": "sketchbook", + "qty": 80, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "sketch pad", + "qty": 95, + "size": {"h": 22.85, "w": 30.5, "uom": "cm"}, + "status": "A", + }, + ] + ) # End Example 51 # Start Example 52 db.inventory.update_one( {"item": "paper"}, - {"$set": {"size.uom": "cm", "status": "P"}, - "$currentDate": {"lastModified": True}}) + {"$set": {"size.uom": "cm", "status": "P"}, "$currentDate": {"lastModified": True}}, + ) # End Example 52 for doc in db.inventory.find({"item": "paper"}): @@ -589,8 +655,8 @@ def test_update_and_replace(self): # Start Example 53 db.inventory.update_many( {"qty": {"$lt": 50}}, - {"$set": {"size.uom": "in", "status": "P"}, - "$currentDate": {"lastModified": True}}) + {"$set": {"size.uom": "in", "status": "P"}, "$currentDate": {"lastModified": True}}, + ) # End Example 53 for doc in db.inventory.find({"qty": {"$lt": 50}}): @@ -601,10 +667,11 @@ def test_update_and_replace(self): # Start Example 54 db.inventory.replace_one( {"item": "paper"}, - {"item": "paper", - "instock": [ - {"warehouse": "A", "qty": 60}, - {"warehouse": "B", "qty": 40}]}) + { + "item": "paper", + "instock": [{"warehouse": "A", "qty": 60}, {"warehouse": "B", "qty": 40}], + }, + ) # End Example 54 for doc in db.inventory.find({"item": "paper"}, {"_id": 0}): @@ -617,27 +684,40 @@ def test_delete(self): db = self.db # Start Example 55 - db.inventory.insert_many([ - {"item": "journal", - "qty": 25, - "size": {"h": 14, "w": 21, "uom": "cm"}, - "status": "A"}, - {"item": "notebook", - "qty": 50, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "P"}, - {"item": "paper", - "qty": 100, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "D"}, - {"item": "planner", - "qty": 75, - "size": {"h": 22.85, "w": 30, "uom": "cm"}, - "status": "D"}, - {"item": "postcard", - "qty": 45, - "size": {"h": 10, "w": 15.25, "uom": "cm"}, - "status": "A"}]) + db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "P", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + ] + ) # End Example 55 self.assertEqual(db.inventory.count_documents({}), 5) @@ -682,7 +762,7 @@ def insert_docs(): # End Changestream Example 1 # Start Changestream Example 2 - cursor = db.inventory.watch(full_document='updateLookup') + cursor = db.inventory.watch(full_document="updateLookup") document = next(cursor) # End Changestream Example 2 @@ -694,8 +774,8 @@ def insert_docs(): # Start Changestream Example 4 pipeline = [ - {'$match': {'fullDocument.username': 'alice'}}, - {'$addFields': {'newField': 'this is an added field!'}} + {"$match": {"fullDocument.username": "alice"}}, + {"$addFields": {"newField": "this is an added field!"}}, ] cursor = db.inventory.watch(pipeline=pipeline) document = next(cursor) @@ -708,83 +788,77 @@ def test_aggregate_examples(self): db = self.db # Start Aggregation Example 1 - db.sales.aggregate([ - {"$match": {"items.fruit": "banana"}}, - {"$sort": {"date": 1}} - ]) + db.sales.aggregate([{"$match": {"items.fruit": "banana"}}, {"$sort": {"date": 1}}]) # End Aggregation Example 1 # Start Aggregation Example 2 - db.sales.aggregate([ - {"$unwind": "$items"}, - {"$match": {"items.fruit": "banana"}}, - {"$group": { - "_id": {"day": {"$dayOfWeek": "$date"}}, - "count": {"$sum": "$items.quantity"}} - }, - {"$project": { - "dayOfWeek": "$_id.day", - "numberSold": "$count", - "_id": 0} - }, - {"$sort": {"numberSold": 1}} - ]) + db.sales.aggregate( + [ + {"$unwind": "$items"}, + {"$match": {"items.fruit": "banana"}}, + { + "$group": { + "_id": {"day": {"$dayOfWeek": "$date"}}, + "count": {"$sum": "$items.quantity"}, + } + }, + {"$project": {"dayOfWeek": "$_id.day", "numberSold": "$count", "_id": 0}}, + {"$sort": {"numberSold": 1}}, + ] + ) # End Aggregation Example 2 # Start Aggregation Example 3 - db.sales.aggregate([ - {"$unwind": "$items"}, - {"$group": { - "_id": {"day": {"$dayOfWeek": "$date"}}, - "items_sold": {"$sum": "$items.quantity"}, - "revenue": { - "$sum": { - "$multiply": [ - "$items.quantity", "$items.price"] - } + db.sales.aggregate( + [ + {"$unwind": "$items"}, + { + "$group": { + "_id": {"day": {"$dayOfWeek": "$date"}}, + "items_sold": {"$sum": "$items.quantity"}, + "revenue": {"$sum": {"$multiply": ["$items.quantity", "$items.price"]}}, } - } - }, - {"$project": { - "day": "$_id.day", - "revenue": 1, - "items_sold": 1, - "discount": { - "$cond": { - "if": {"$lte": ["$revenue", 250]}, - "then": 25, - "else": 0 - } + }, + { + "$project": { + "day": "$_id.day", + "revenue": 1, + "items_sold": 1, + "discount": { + "$cond": {"if": {"$lte": ["$revenue", 250]}, "then": 25, "else": 0} + }, } - } - } - ]) + }, + ] + ) # End Aggregation Example 3 # Start Aggregation Example 4 - db.air_alliances.aggregate([ - {"$lookup": { - "from": "air_airlines", - "let": {"constituents": "$airlines"}, - "pipeline": [ - {"$match": {"$expr": {"$in": ["$name", "$$constituents"]}}} - ], - "as": "airlines" - } - }, - {"$project": { - "_id": 0, - "name": 1, - "airlines": { - "$filter": { - "input": "$airlines", - "as": "airline", - "cond": {"$eq": ["$$airline.country", "Canada"]} - } + db.air_alliances.aggregate( + [ + { + "$lookup": { + "from": "air_airlines", + "let": {"constituents": "$airlines"}, + "pipeline": [{"$match": {"$expr": {"$in": ["$name", "$$constituents"]}}}], + "as": "airlines", } - } - } - ]) + }, + { + "$project": { + "_id": 0, + "name": 1, + "airlines": { + "$filter": { + "input": "$airlines", + "as": "airline", + "cond": {"$eq": ["$$airline.country", "Canada"]}, + } + }, + } + }, + ] + ) # End Aggregation Example 4 def test_commands(self): @@ -809,7 +883,7 @@ def test_index_management(self): # Start Index Example 1 db.restaurants.create_index( [("cuisine", pymongo.ASCENDING), ("name", pymongo.ASCENDING)], - partialFilterExpression={"rating": {"$gt": 5}} + partialFilterExpression={"rating": {"$gt": 5}}, ) # End Index Example 1 @@ -823,18 +897,14 @@ def test_misc(self): # 2. Tunable consistency controls collection = client.my_database.my_collection with client.start_session() as session: - collection.insert_one({'_id': 1}, session=session) - collection.update_one( - {'_id': 1}, {"$set": {"a": 1}}, session=session) + collection.insert_one({"_id": 1}, session=session) + collection.update_one({"_id": 1}, {"$set": {"a": 1}}, session=session) for doc in collection.find({}, session=session): pass # 3. Exploiting the power of arrays collection = client.test.array_updates_test - collection.update_one( - {'_id': 1}, - {"$set": {"a.$[i].b": 2}}, - array_filters=[{"i.b": 0}]) + collection.update_one({"_id": 1}, {"$set": {"a.$[i].b": 2}}, array_filters=[{"i.b": 0}]) class TestTransactionExamples(IntegrationTest): @@ -848,8 +918,7 @@ def test_transactions(self): employees = client.hr.employees events = client.reporting.events employees.insert_one({"employee": 3, "status": "Active"}) - events.insert_one( - {"employee": 3, "status": {"new": "Active", "old": None}}) + events.insert_one({"employee": 3, "status": {"new": "Active", "old": None}}) # Start Transactions Intro Example 1 @@ -858,15 +927,14 @@ def update_employee_info(session): events_coll = session.client.reporting.events with session.start_transaction( - read_concern=ReadConcern("snapshot"), - write_concern=WriteConcern(w="majority")): + read_concern=ReadConcern("snapshot"), write_concern=WriteConcern(w="majority") + ): employees_coll.update_one( - {"employee": 3}, {"$set": {"status": "Inactive"}}, - session=session) + {"employee": 3}, {"$set": {"status": "Inactive"}}, session=session + ) events_coll.insert_one( - {"employee": 3, "status": { - "new": "Inactive", "old": "Active"}}, - session=session) + {"employee": 3, "status": {"new": "Inactive", "old": "Active"}}, session=session + ) while True: try: @@ -876,14 +944,15 @@ def update_employee_info(session): break except (ConnectionFailure, OperationFailure) as exc: # Can retry commit - if exc.has_error_label( - "UnknownTransactionCommitResult"): - print("UnknownTransactionCommitResult, retrying " - "commit operation ...") + if exc.has_error_label("UnknownTransactionCommitResult"): + print( + "UnknownTransactionCommitResult, retrying " "commit operation ..." + ) continue else: print("Error during commit ...") raise + # End Transactions Intro Example 1 with client.start_session() as session: @@ -892,7 +961,7 @@ def update_employee_info(session): employee = employees.find_one({"employee": 3}) assert employee is not None self.assertIsNotNone(employee) - self.assertEqual(employee['status'], 'Inactive') + self.assertEqual(employee["status"], "Inactive") # Start Transactions Retry Example 1 def run_transaction_with_retry(txn_func, session): @@ -901,16 +970,15 @@ def run_transaction_with_retry(txn_func, session): txn_func(session) # performs transaction break except (ConnectionFailure, OperationFailure) as exc: - print("Transaction aborted. Caught exception during " - "transaction.") + print("Transaction aborted. Caught exception during " "transaction.") # If transient error, retry the whole transaction if exc.has_error_label("TransientTransactionError"): - print("TransientTransactionError, retrying" - "transaction ...") + print("TransientTransactionError, retrying" "transaction ...") continue else: raise + # End Transactions Retry Example 1 with client.start_session() as session: @@ -919,7 +987,7 @@ def run_transaction_with_retry(txn_func, session): employee = employees.find_one({"employee": 3}) assert employee is not None self.assertIsNotNone(employee) - self.assertEqual(employee['status'], 'Inactive') + self.assertEqual(employee["status"], "Inactive") # Start Transactions Retry Example 2 def commit_with_retry(session): @@ -932,23 +1000,21 @@ def commit_with_retry(session): except (ConnectionFailure, OperationFailure) as exc: # Can retry commit if exc.has_error_label("UnknownTransactionCommitResult"): - print("UnknownTransactionCommitResult, retrying " - "commit operation ...") + print("UnknownTransactionCommitResult, retrying " "commit operation ...") continue else: print("Error during commit ...") raise + # End Transactions Retry Example 2 # Test commit_with_retry from the previous examples def _insert_employee_retry_commit(session): with session.start_transaction(): - employees.insert_one( - {"employee": 4, "status": "Active"}, - session=session) + employees.insert_one({"employee": 4, "status": "Active"}, session=session) events.insert_one( - {"employee": 4, "status": {"new": "Active", "old": None}}, - session=session) + {"employee": 4, "status": {"new": "Active", "old": None}}, session=session + ) commit_with_retry(session) @@ -958,7 +1024,7 @@ def _insert_employee_retry_commit(session): employee = employees.find_one({"employee": 4}) assert employee is not None self.assertIsNotNone(employee) - self.assertEqual(employee['status'], 'Active') + self.assertEqual(employee["status"], "Active") # Start Transactions Retry Example 3 @@ -970,8 +1036,7 @@ def run_transaction_with_retry(txn_func, session): except (ConnectionFailure, OperationFailure) as exc: # If transient error, retry the whole transaction if exc.has_error_label("TransientTransactionError"): - print("TransientTransactionError, retrying " - "transaction ...") + print("TransientTransactionError, retrying " "transaction ...") continue else: raise @@ -986,8 +1051,7 @@ def commit_with_retry(session): except (ConnectionFailure, OperationFailure) as exc: # Can retry commit if exc.has_error_label("UnknownTransactionCommitResult"): - print("UnknownTransactionCommitResult, retrying " - "commit operation ...") + print("UnknownTransactionCommitResult, retrying " "commit operation ...") continue else: print("Error during commit ...") @@ -1000,16 +1064,16 @@ def update_employee_info(session): events_coll = session.client.reporting.events with session.start_transaction( - read_concern=ReadConcern("snapshot"), - write_concern=WriteConcern(w="majority"), - read_preference=ReadPreference.PRIMARY): + read_concern=ReadConcern("snapshot"), + write_concern=WriteConcern(w="majority"), + read_preference=ReadPreference.PRIMARY, + ): employees_coll.update_one( - {"employee": 3}, {"$set": {"status": "Inactive"}}, - session=session) + {"employee": 3}, {"$set": {"status": "Inactive"}}, session=session + ) events_coll.insert_one( - {"employee": 3, "status": { - "new": "Inactive", "old": "Active"}}, - session=session) + {"employee": 3, "status": {"new": "Inactive", "old": "Active"}}, session=session + ) commit_with_retry(session) @@ -1026,7 +1090,7 @@ def update_employee_info(session): employee = employees.find_one({"employee": 3}) assert employee is not None self.assertIsNotNone(employee) - self.assertEqual(employee['status'], 'Inactive') + self.assertEqual(employee["status"], "Inactive") MongoClient = lambda _: rs_client() uriString = None @@ -1042,10 +1106,8 @@ def update_employee_info(session): wc_majority = WriteConcern("majority", wtimeout=1000) # Prereq: Create collections. - client.get_database( - "mydb1", write_concern=wc_majority).foo.insert_one({'abc': 0}) - client.get_database( - "mydb2", write_concern=wc_majority).bar.insert_one({'xyz': 0}) + client.get_database("mydb1", write_concern=wc_majority).foo.insert_one({"abc": 0}) + client.get_database("mydb2", write_concern=wc_majority).bar.insert_one({"xyz": 0}) # Step 1: Define the callback that specifies the sequence of operations to perform inside the transactions. def callback(session): @@ -1053,16 +1115,18 @@ def callback(session): collection_two = session.client.mydb2.bar # Important:: You must pass the session to the operations. - collection_one.insert_one({'abc': 1}, session=session) - collection_two.insert_one({'xyz': 999}, session=session) + collection_one.insert_one({"abc": 1}, session=session) + collection_two.insert_one({"xyz": 999}, session=session) # Step 2: Start a client session. with client.start_session() as session: # Step 3: Use with_transaction to start a transaction, execute the callback, and commit (or abort on error). session.with_transaction( - callback, read_concern=ReadConcern('local'), + callback, + read_concern=ReadConcern("local"), write_concern=wc_majority, - read_preference=ReadPreference.PRIMARY) + read_preference=ReadPreference.PRIMARY, + ) # End Transactions withTxn API Example 1 @@ -1073,24 +1137,26 @@ class TestCausalConsistencyExamples(IntegrationTest): def test_causal_consistency(self): # Causal consistency examples client = self.client - self.addCleanup(client.drop_database, 'test') - client.test.drop_collection('items') - client.test.items.insert_one({ - 'sku': "111", 'name': 'Peanuts', - 'start':datetime.datetime.today()}) + self.addCleanup(client.drop_database, "test") + client.test.drop_collection("items") + client.test.items.insert_one( + {"sku": "111", "name": "Peanuts", "start": datetime.datetime.today()} + ) # Start Causal Consistency Example 1 with client.start_session(causal_consistency=True) as s1: current_date = datetime.datetime.today() items = client.get_database( - 'test', read_concern=ReadConcern('majority'), - write_concern=WriteConcern('majority', wtimeout=1000)).items + "test", + read_concern=ReadConcern("majority"), + write_concern=WriteConcern("majority", wtimeout=1000), + ).items items.update_one( - {'sku': "111", 'end': None}, - {'$set': {'end': current_date}}, session=s1) + {"sku": "111", "end": None}, {"$set": {"end": current_date}}, session=s1 + ) items.insert_one( - {'sku': "nuts-111", 'name': "Pecans", - 'start': current_date}, session=s1) + {"sku": "nuts-111", "name": "Pecans", "start": current_date}, session=s1 + ) # End Causal Consistency Example 1 assert s1.cluster_time is not None @@ -1102,10 +1168,12 @@ def test_causal_consistency(self): s2.advance_operation_time(s1.operation_time) items = client.get_database( - 'test', read_preference=ReadPreference.SECONDARY, - read_concern=ReadConcern('majority'), - write_concern=WriteConcern('majority', wtimeout=1000)).items - for item in items.find({'end': None}, session=s2): + "test", + read_preference=ReadPreference.SECONDARY, + read_concern=ReadConcern("majority"), + write_concern=WriteConcern("majority", wtimeout=1000), + ).items + for item in items.find({"end": None}, session=s2): print(item) # End Causal Consistency Example 2 @@ -1114,35 +1182,33 @@ class TestVersionedApiExamples(IntegrationTest): @client_context.require_version_min(4, 7) def test_versioned_api(self): # Versioned API examples - MongoClient = lambda _, server_api: rs_client( - server_api=server_api, connect=False) + MongoClient = lambda _, server_api: rs_client(server_api=server_api, connect=False) uri = None # Start Versioned API Example 1 from pymongo.server_api import ServerApi + client = MongoClient(uri, server_api=ServerApi("1")) # End Versioned API Example 1 # Start Versioned API Example 2 - client = MongoClient( - uri, server_api=ServerApi("1", strict=True)) + client = MongoClient(uri, server_api=ServerApi("1", strict=True)) # End Versioned API Example 2 # Start Versioned API Example 3 - client = MongoClient( - uri, server_api=ServerApi("1", strict=False)) + client = MongoClient(uri, server_api=ServerApi("1", strict=False)) # End Versioned API Example 3 # Start Versioned API Example 4 - client = MongoClient( - uri, server_api=ServerApi("1", deprecation_errors=True)) + client = MongoClient(uri, server_api=ServerApi("1", deprecation_errors=True)) # End Versioned API Example 4 @client_context.require_version_min(4, 7) def test_versioned_api_migration(self): # SERVER-58785 - if (client_context.is_topology_type(["sharded"]) and - not client_context.version.at_least(5, 0, 2)): + if client_context.is_topology_type(["sharded"]) and not client_context.version.at_least( + 5, 0, 2 + ): self.skipTest("This test needs MongoDB 5.0.2 or newer") client = rs_client(server_api=ServerApi("1", strict=True)) @@ -1151,22 +1217,74 @@ def test_versioned_api_migration(self): # Start Versioned API Example 5 def strptime(s): return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ") - client.db.sales.insert_many([ - {"_id": 1, "item": "abc", "price": 10, "quantity": 2, "date": strptime("2021-01-01T08:00:00Z")}, - {"_id": 2, "item": "jkl", "price": 20, "quantity": 1, "date": strptime("2021-02-03T09:00:00Z")}, - {"_id": 3, "item": "xyz", "price": 5, "quantity": 5, "date": strptime("2021-02-03T09:05:00Z")}, - {"_id": 4, "item": "abc", "price": 10, "quantity": 10, "date": strptime("2021-02-15T08:00:00Z")}, - {"_id": 5, "item": "xyz", "price": 5, "quantity": 10, "date": strptime("2021-02-15T09:05:00Z")}, - {"_id": 6, "item": "xyz", "price": 5, "quantity": 5, "date": strptime("2021-02-15T12:05:10Z")}, - {"_id": 7, "item": "xyz", "price": 5, "quantity": 10, "date": strptime("2021-02-15T14:12:12Z")}, - {"_id": 8, "item": "abc", "price": 10, "quantity": 5, "date": strptime("2021-03-16T20:20:13Z")} - ]) + + client.db.sales.insert_many( + [ + { + "_id": 1, + "item": "abc", + "price": 10, + "quantity": 2, + "date": strptime("2021-01-01T08:00:00Z"), + }, + { + "_id": 2, + "item": "jkl", + "price": 20, + "quantity": 1, + "date": strptime("2021-02-03T09:00:00Z"), + }, + { + "_id": 3, + "item": "xyz", + "price": 5, + "quantity": 5, + "date": strptime("2021-02-03T09:05:00Z"), + }, + { + "_id": 4, + "item": "abc", + "price": 10, + "quantity": 10, + "date": strptime("2021-02-15T08:00:00Z"), + }, + { + "_id": 5, + "item": "xyz", + "price": 5, + "quantity": 10, + "date": strptime("2021-02-15T09:05:00Z"), + }, + { + "_id": 6, + "item": "xyz", + "price": 5, + "quantity": 5, + "date": strptime("2021-02-15T12:05:10Z"), + }, + { + "_id": 7, + "item": "xyz", + "price": 5, + "quantity": 10, + "date": strptime("2021-02-15T14:12:12Z"), + }, + { + "_id": 8, + "item": "abc", + "price": 10, + "quantity": 5, + "date": strptime("2021-03-16T20:20:13Z"), + }, + ] + ) # End Versioned API Example 5 with self.assertRaisesRegex( - OperationFailure, "Provided apiStrict:true, but the command " - "count is not in API Version 1"): - client.db.command('count', 'sales', query={}) + OperationFailure, + "Provided apiStrict:true, but the command " "count is not in API Version 1", + ): + client.db.command("count", "sales", query={}) # Start Versioned API Example 6 # pymongo.errors.OperationFailure: Provided apiStrict:true, but the command count is not in API Version 1, full error: {'ok': 0.0, 'errmsg': 'Provided apiStrict:true, but the command count is not in API Version 1', 'code': 323, 'codeName': 'APIStrictError'} # End Versioned API Example 6 diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 2208e97b42..27d82e242b 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -21,34 +21,34 @@ import io import sys import zipfile - from io import BytesIO from pymongo.database import Database sys.path[0:0] = [""] +from test import IntegrationTest, qcheck, unittest +from test.utils import EventListener, rs_or_single_client + from bson.objectid import ObjectId from gridfs import GridFS -from gridfs.grid_file import (DEFAULT_CHUNK_SIZE, - _SEEK_CUR, - _SEEK_END, - GridIn, - GridOut, - GridOutCursor) from gridfs.errors import NoFile +from gridfs.grid_file import ( + _SEEK_CUR, + _SEEK_END, + DEFAULT_CHUNK_SIZE, + GridIn, + GridOut, + GridOutCursor, +) from pymongo import MongoClient from pymongo.errors import ConfigurationError, ServerSelectionTimeoutError from pymongo.message import _CursorAddress -from test import (IntegrationTest, - unittest, - qcheck) -from test.utils import rs_or_single_client, EventListener class TestGridFileNoConnect(unittest.TestCase): - """Test GridFile features on a client that does not connect. - """ + """Test GridFile features on a client that does not connect.""" + db: Database @classmethod @@ -58,9 +58,17 @@ def setUpClass(cls): def test_grid_in_custom_opts(self): self.assertRaises(TypeError, GridIn, "foo") - a = GridIn(self.db.fs, _id=5, filename="my_file", - contentType="text/html", chunkSize=1000, aliases=["foo"], - metadata={"foo": 1, "bar": 2}, bar=3, baz="hello") + a = GridIn( + self.db.fs, + _id=5, + filename="my_file", + contentType="text/html", + chunkSize=1000, + aliases=["foo"], + metadata={"foo": 1, "bar": 2}, + bar=3, + baz="hello", + ) self.assertEqual(5, a._id) self.assertEqual("my_file", a.filename) @@ -73,15 +81,13 @@ def test_grid_in_custom_opts(self): self.assertEqual("hello", a.baz) self.assertRaises(AttributeError, getattr, a, "mike") - b = GridIn(self.db.fs, - content_type="text/html", chunk_size=1000, baz=100) + b = GridIn(self.db.fs, content_type="text/html", chunk_size=1000, baz=100) self.assertEqual("text/html", b.content_type) self.assertEqual(1000, b.chunk_size) self.assertEqual(100, b.baz) class TestGridFile(IntegrationTest): - def setUp(self): self.cleanup_colls(self.db.fs.files, self.db.fs.chunks) @@ -226,30 +232,48 @@ def test_grid_out_default_opts(self): self.assertEqual(None, b.metadata) self.assertEqual(None, b.md5) - for attr in ["_id", "name", "content_type", "length", "chunk_size", - "upload_date", "aliases", "metadata", "md5"]: + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: self.assertRaises(AttributeError, setattr, b, attr, 5) def test_grid_out_cursor_options(self): - self.assertRaises(TypeError, GridOutCursor.__init__, self.db.fs, {}, - projection={"filename": 1}) + self.assertRaises( + TypeError, GridOutCursor.__init__, self.db.fs, {}, projection={"filename": 1} + ) cursor = GridOutCursor(self.db.fs, {}) cursor_clone = cursor.clone() cursor_dict = cursor.__dict__.copy() - cursor_dict.pop('_Cursor__session') + cursor_dict.pop("_Cursor__session") cursor_clone_dict = cursor_clone.__dict__.copy() - cursor_clone_dict.pop('_Cursor__session') + cursor_clone_dict.pop("_Cursor__session") self.assertDictEqual(cursor_dict, cursor_clone_dict) self.assertRaises(NotImplementedError, cursor.add_option, 0) self.assertRaises(NotImplementedError, cursor.remove_option, 0) def test_grid_out_custom_opts(self): - one = GridIn(self.db.fs, _id=5, filename="my_file", - contentType="text/html", chunkSize=1000, aliases=["foo"], - metadata={"foo": 1, "bar": 2}, bar=3, baz="hello") + one = GridIn( + self.db.fs, + _id=5, + filename="my_file", + contentType="text/html", + chunkSize=1000, + aliases=["foo"], + metadata={"foo": 1, "bar": 2}, + bar=3, + baz="hello", + ) one.write(b"hello world") one.close() @@ -267,8 +291,17 @@ def test_grid_out_custom_opts(self): self.assertEqual(3, two.bar) self.assertEqual(None, two.md5) - for attr in ["_id", "name", "content_type", "length", "chunk_size", - "upload_date", "aliases", "metadata", "md5"]: + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: self.assertRaises(AttributeError, setattr, two, attr, 5) def test_grid_out_file_document(self): @@ -279,8 +312,7 @@ def test_grid_out_file_document(self): two = GridOut(self.db.fs, file_document=self.db.fs.files.find_one()) self.assertEqual(b"foo bar", two.read()) - three = GridOut(self.db.fs, 5, - file_document=self.db.fs.files.find_one()) + three = GridOut(self.db.fs, 5, file_document=self.db.fs.files.find_one()) self.assertEqual(b"foo bar", three.read()) four = GridOut(self.db.fs, file_document={}) @@ -307,8 +339,7 @@ def test_write_file_like(self): five.write(buffer) five.write(b" and mongodb") five.close() - self.assertEqual(b"hello world and mongodb", - GridOut(self.db.fs, five._id).read()) + self.assertEqual(b"hello world and mongodb", GridOut(self.db.fs, five._id).read()) def test_write_lines(self): a = GridIn(self.db.fs) @@ -338,7 +369,7 @@ def test_closed(self): self.assertTrue(g.closed) def test_multi_chunk_file(self): - random_string = b'a' * (DEFAULT_CHUNK_SIZE + 1000) + random_string = b"a" * (DEFAULT_CHUNK_SIZE + 1000) f = GridIn(self.db.fs) f.write(random_string) @@ -372,8 +403,7 @@ def helper(data): self.assertEqual(data, g.read(10) + g.read(10)) return True - qcheck.check_unittest(self, helper, - qcheck.gen_string(qcheck.gen_range(0, 20))) + qcheck.check_unittest(self, helper, qcheck.gen_string(qcheck.gen_range(0, 20))) def test_seek(self): f = GridIn(self.db.fs, chunkSize=3) @@ -431,10 +461,14 @@ def test_multiple_reads(self): def test_readline(self): f = GridIn(self.db.fs, chunkSize=5) - f.write((b"""Hello world, + f.write( + ( + b"""Hello world, How are you? Hope all is well. -Bye""")) +Bye""" + ) + ) f.close() # Try read(), then readline(). @@ -463,10 +497,14 @@ def test_readline(self): def test_readlines(self): f = GridIn(self.db.fs, chunkSize=5) - f.write((b"""Hello world, + f.write( + ( + b"""Hello world, How are you? Hope all is well. -Bye""")) +Bye""" + ) + ) f.close() # Try read(), then readlines(). @@ -486,13 +524,13 @@ def test_readlines(self): # Only readlines(). g = GridOut(self.db.fs, f._id) self.assertEqual( - [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], - g.readlines()) + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], g.readlines() + ) g = GridOut(self.db.fs, f._id) self.assertEqual( - [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], - g.readlines(0)) + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], g.readlines(0) + ) g = GridOut(self.db.fs, f._id) self.assertEqual([b"Hello world,\n"], g.readlines(1)) @@ -542,14 +580,13 @@ def test_iterator(self): self.assertEqual([b"hello world"], list(g)) def test_read_unaligned_buffer_size(self): - in_data = (b"This is a text that doesn't " - b"quite fit in a single 16-byte chunk.") + in_data = b"This is a text that doesn't " b"quite fit in a single 16-byte chunk." f = GridIn(self.db.fs, chunkSize=16) f.write(in_data) f.close() g = GridOut(self.db.fs, f._id) - out_data = b'' + out_data = b"" while 1: s = g.read(13) if not s: @@ -559,7 +596,7 @@ def test_read_unaligned_buffer_size(self): self.assertEqual(in_data, out_data) def test_readchunk(self): - in_data = b'a' * 10 + in_data = b"a" * 10 f = GridIn(self.db.fs, chunkSize=3) f.write(in_data) f.close() @@ -639,13 +676,12 @@ def test_context_manager(self): self.assertEqual(contents, outfile.read()) def test_prechunked_string(self): - def write_me(s, chunk_size): buf = BytesIO(s) infile = GridIn(self.db.fs) while True: to_write = buf.read(chunk_size) - if to_write == b'': + if to_write == b"": break infile.write(to_write) infile.close() @@ -655,7 +691,7 @@ def write_me(s, chunk_size): data = outfile.read() self.assertEqual(s, data) - s = b'x' * DEFAULT_CHUNK_SIZE * 4 + s = b"x" * DEFAULT_CHUNK_SIZE * 4 # Test with default chunk size write_me(s, DEFAULT_CHUNK_SIZE) # Multiple @@ -667,7 +703,7 @@ def test_grid_out_lazy_connect(self): fs = self.db.fs outfile = GridOut(fs, file_id=-1) self.assertRaises(NoFile, outfile.read) - self.assertRaises(NoFile, getattr, outfile, 'filename') + self.assertRaises(NoFile, getattr, outfile, "filename") infile = GridIn(fs, filename=1) infile.close() @@ -680,11 +716,10 @@ def test_grid_out_lazy_connect(self): outfile.readchunk() def test_grid_in_lazy_connect(self): - client = MongoClient('badhost', connect=False, - serverSelectionTimeoutMS=10) + client = MongoClient("badhost", connect=False, serverSelectionTimeoutMS=10) fs = client.db.fs infile = GridIn(fs, file_id=-1, chunk_size=1) - self.assertRaises(ServerSelectionTimeoutError, infile.write, b'data') + self.assertRaises(ServerSelectionTimeoutError, infile.write, b"data") self.assertRaises(ServerSelectionTimeoutError, infile.close) def test_unacknowledged(self): @@ -696,7 +731,7 @@ def test_survive_cursor_not_found(self): # By default the find command returns 101 documents in the first batch. # Use 102 batches to cause a single getMore. chunk_size = 1024 - data = b'd' * (102 * chunk_size) + data = b"d" * (102 * chunk_size) listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) db = client.pymongo_test @@ -711,7 +746,8 @@ def test_survive_cursor_not_found(self): # readchunk(). client._close_cursor_now( outfile._GridOut__chunk_iter._cursor.cursor_id, - _CursorAddress(client.address, db.fs.chunks.full_name)) + _CursorAddress(client.address, db.fs.chunks.full_name), + ) # Read the rest of the file without error. self.assertEqual(len(outfile.read()), len(data) - chunk_size) diff --git a/test/test_gridfs.py b/test/test_gridfs.py index 3d8a7d8f6b..ec88dcd488 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -21,33 +21,28 @@ import sys import threading import time - from io import BytesIO sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import joinall, one, rs_client, rs_or_single_client, single_client + +import gridfs from bson.binary import Binary +from gridfs.errors import CorruptGridFile, FileExists, NoFile +from gridfs.grid_file import GridOutCursor from pymongo.database import Database +from pymongo.errors import ( + ConfigurationError, + NotPrimaryError, + ServerSelectionTimeoutError, +) from pymongo.mongo_client import MongoClient -from pymongo.errors import (ConfigurationError, - NotPrimaryError, - ServerSelectionTimeoutError) from pymongo.read_preferences import ReadPreference -import gridfs -from gridfs.errors import CorruptGridFile, FileExists, NoFile -from gridfs.grid_file import GridOutCursor -from test import (client_context, - unittest, - IntegrationTest) -from test.utils import (joinall, - one, - rs_client, - rs_or_single_client, - single_client) class JustWrite(threading.Thread): - def __init__(self, fs, n): threading.Thread.__init__(self) self.fs = fs @@ -62,7 +57,6 @@ def run(self): class JustRead(threading.Thread): - def __init__(self, fs, n, results): threading.Thread.__init__(self) self.fs = fs @@ -101,8 +95,9 @@ def setUpClass(cls): cls.alt = gridfs.GridFS(cls.db, "alt") def setUp(self): - self.cleanup_colls(self.db.fs.files, self.db.fs.chunks, - self.db.alt.files, self.db.alt.chunks) + self.cleanup_colls( + self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks + ) def test_basic(self): oid = self.fs.put(b"hello world") @@ -146,8 +141,7 @@ def test_list(self): self.fs.put(b"foo", filename="test") self.fs.put(b"", filename="hello world") - self.assertEqual(set(["mike", "test", "hello world"]), - set(self.fs.list())) + self.assertEqual(set(["mike", "test", "hello world"]), set(self.fs.list())) def test_empty_file(self): oid = self.fs.put(b"") @@ -164,9 +158,8 @@ def test_empty_file(self): self.assertNotIn("md5", raw) def test_corrupt_chunk(self): - files_id = self.fs.put(b'foobar') - self.db.fs.chunks.update_one({'files_id': files_id}, - {'$set': {'data': Binary(b'foo', 0)}}) + files_id = self.fs.put(b"foobar") + self.db.fs.chunks.update_one({"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}}) try: out = self.fs.get(files_id) self.assertRaises(CorruptGridFile, out.read) @@ -184,12 +177,18 @@ def test_put_ensures_index(self): files.drop() self.fs.put(b"junk") - self.assertTrue(any( - info.get('key') == [('files_id', 1), ('n', 1)] - for info in chunks.index_information().values())) - self.assertTrue(any( - info.get('key') == [('filename', 1), ('uploadDate', 1)] - for info in files.index_information().values())) + self.assertTrue( + any( + info.get("key") == [("files_id", 1), ("n", 1)] + for info in chunks.index_information().values() + ) + ) + self.assertTrue( + any( + info.get("key") == [("filename", 1), ("uploadDate", 1)] + for info in files.index_information().values() + ) + ) def test_alt_collection(self): oid = self.alt.put(b"hello world") @@ -211,8 +210,7 @@ def test_alt_collection(self): self.alt.put(b"foo", filename="test") self.alt.put(b"", filename="hello world") - self.assertEqual(set(["mike", "test", "hello world"]), - set(self.alt.list())) + self.assertEqual(set(["mike", "test", "hello world"]), set(self.alt.list())) def test_threaded_reads(self): self.fs.put(b"hello", _id="test") @@ -225,10 +223,7 @@ def test_threaded_reads(self): joinall(threads) - self.assertEqual( - 100 * [b'hello'], - results - ) + self.assertEqual(100 * [b"hello"], results) def test_threaded_writes(self): threads = [] @@ -242,10 +237,7 @@ def test_threaded_writes(self): self.assertEqual(f.read(), b"hello") # Should have created 100 versions of 'test' file - self.assertEqual( - 100, - self.db.fs.files.count_documents({'filename': 'test'}) - ) + self.assertEqual(100, self.db.fs.files.count_documents({"filename": "test"})) def test_get_last_version(self): one = self.fs.put(b"foo", filename="test") @@ -316,30 +308,25 @@ def test_get_version_with_metadata(self): three = self.fs.put(b"baz", filename="test", author="author2") self.assertEqual( - b"foo", - self.fs.get_version( - filename="test", author="author1", version=-2).read()) - self.assertEqual( - b"bar", self.fs.get_version( - filename="test", author="author1", version=-1).read()) - self.assertEqual( - b"foo", self.fs.get_version( - filename="test", author="author1", version=0).read()) + b"foo", self.fs.get_version(filename="test", author="author1", version=-2).read() + ) self.assertEqual( - b"bar", self.fs.get_version( - filename="test", author="author1", version=1).read()) + b"bar", self.fs.get_version(filename="test", author="author1", version=-1).read() + ) self.assertEqual( - b"baz", self.fs.get_version( - filename="test", author="author2", version=0).read()) + b"foo", self.fs.get_version(filename="test", author="author1", version=0).read() + ) self.assertEqual( - b"baz", self.fs.get_version(filename="test", version=-1).read()) + b"bar", self.fs.get_version(filename="test", author="author1", version=1).read() + ) self.assertEqual( - b"baz", self.fs.get_version(filename="test", version=2).read()) + b"baz", self.fs.get_version(filename="test", author="author2", version=0).read() + ) + self.assertEqual(b"baz", self.fs.get_version(filename="test", version=-1).read()) + self.assertEqual(b"baz", self.fs.get_version(filename="test", version=2).read()) - self.assertRaises( - NoFile, self.fs.get_version, filename="test", author="author3") - self.assertRaises( - NoFile, self.fs.get_version, filename="test", author="author1", version=2) + self.assertRaises(NoFile, self.fs.get_version, filename="test", author="author3") + self.assertRaises(NoFile, self.fs.get_version, filename="test", author="author1", version=2) self.fs.delete(one) self.fs.delete(two) @@ -359,7 +346,7 @@ def test_file_exists(self): one.close() two = self.fs.new_file(_id=123) - self.assertRaises(FileExists, two.write, b'x' * 262146) + self.assertRaises(FileExists, two.write, b"x" * 262146) def test_exists(self): oid = self.fs.put(b"hello") @@ -414,8 +401,7 @@ def iterate_file(grid_file): self.assertTrue(iterate_file(f)) def test_gridfs_lazy_connect(self): - client = MongoClient('badhost', connect=False, - serverSelectionTimeoutMS=10) + client = MongoClient("badhost", connect=False, serverSelectionTimeoutMS=10) db = client.db gfs = gridfs.GridFS(db) self.assertRaises(ServerSelectionTimeoutError, gfs.list) @@ -435,8 +421,7 @@ def test_gridfs_find(self): files = self.db.fs.files self.assertEqual(3, files.count_documents({"filename": "two"})) self.assertEqual(4, files.count_documents({})) - cursor = self.fs.find( - no_cursor_timeout=False).sort("uploadDate", -1).skip(1).limit(2) + cursor = self.fs.find(no_cursor_timeout=False).sort("uploadDate", -1).skip(1).limit(2) gout = next(cursor) self.assertEqual(b"test1", gout.read()) cursor.rewind() @@ -459,35 +444,34 @@ def test_delete_not_initialized(self): def test_gridfs_find_one(self): self.assertEqual(None, self.fs.find_one()) - id1 = self.fs.put(b'test1', filename='file1') + id1 = self.fs.put(b"test1", filename="file1") res = self.fs.find_one() assert res is not None - self.assertEqual(b'test1', res.read()) + self.assertEqual(b"test1", res.read()) - id2 = self.fs.put(b'test2', filename='file2', meta='data') + id2 = self.fs.put(b"test2", filename="file2", meta="data") res1 = self.fs.find_one(id1) assert res1 is not None - self.assertEqual(b'test1', res1.read()) + self.assertEqual(b"test1", res1.read()) res2 = self.fs.find_one(id2) assert res2 is not None - self.assertEqual(b'test2', res2.read()) + self.assertEqual(b"test2", res2.read()) - res3 = self.fs.find_one({'filename': 'file1'}) + res3 = self.fs.find_one({"filename": "file1"}) assert res3 is not None - self.assertEqual(b'test1', res3.read()) + self.assertEqual(b"test1", res3.read()) res4 = self.fs.find_one(id2) assert res4 is not None - self.assertEqual('data', res4.meta) + self.assertEqual("data", res4.meta) def test_grid_in_non_int_chunksize(self): # Lua, and perhaps other buggy GridFS clients, store size as a float. - data = b'data' - self.fs.put(data, filename='f') - self.db.fs.files.update_one({'filename': 'f'}, - {'$set': {'chunkSize': 100.0}}) + data = b"data" + self.fs.put(data, filename="f") + self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) - self.assertEqual(data, self.fs.get_version('f').read()) + self.assertEqual(data, self.fs.get_version("f").read()) def test_unacknowledged(self): # w=0 is prohibited. @@ -509,7 +493,6 @@ def test_md5(self): class TestGridfsReplicaSet(IntegrationTest): - @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): @@ -517,51 +500,47 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - client_context.client.drop_database('gfsreplica') + client_context.client.drop_database("gfsreplica") def test_gridfs_replica_set(self): - rsc = rs_client( - w=client_context.w, - read_preference=ReadPreference.SECONDARY) + rsc = rs_client(w=client_context.w, read_preference=ReadPreference.SECONDARY) - fs = gridfs.GridFS(rsc.gfsreplica, 'gfsreplicatest') + fs = gridfs.GridFS(rsc.gfsreplica, "gfsreplicatest") gin = fs.new_file() self.assertEqual(gin._coll.read_preference, ReadPreference.PRIMARY) - oid = fs.put(b'foo') + oid = fs.put(b"foo") content = fs.get(oid).read() - self.assertEqual(b'foo', content) + self.assertEqual(b"foo", content) def test_gridfs_secondary(self): secondary_host, secondary_port = one(self.client.secondaries) secondary_connection = single_client( - secondary_host, secondary_port, - read_preference=ReadPreference.SECONDARY) + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY + ) # Should detect it's connected to secondary and not attempt to # create index - fs = gridfs.GridFS(secondary_connection.gfsreplica, 'gfssecondarytest') + fs = gridfs.GridFS(secondary_connection.gfsreplica, "gfssecondarytest") # This won't detect secondary, raises error - self.assertRaises(NotPrimaryError, fs.put, b'foo') + self.assertRaises(NotPrimaryError, fs.put, b"foo") def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. secondary_host, secondary_port = one(self.client.secondaries) client = single_client( - secondary_host, - secondary_port, - read_preference=ReadPreference.SECONDARY, - connect=False) + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False + ) # Still no connection. - fs = gridfs.GridFS(client.gfsreplica, 'gfssecondarylazytest') + fs = gridfs.GridFS(client.gfsreplica, "gfssecondarylazytest") # Connects, doesn't create index. self.assertRaises(NoFile, fs.get_last_version) - self.assertRaises(NotPrimaryError, fs.put, 'data') + self.assertRaises(NotPrimaryError, fs.put, "data") if __name__ == "__main__": diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 53f94991d3..8b0a9a3936 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -20,32 +20,26 @@ import itertools import threading import time - from io import BytesIO +from test import IntegrationTest, client_context, unittest +from test.utils import joinall, one, rs_client, rs_or_single_client, single_client +import gridfs from bson.binary import Binary from bson.int64 import Int64 from bson.objectid import ObjectId from bson.son import SON -import gridfs -from gridfs.errors import NoFile, CorruptGridFile -from pymongo.errors import (ConfigurationError, - NotPrimaryError, - ServerSelectionTimeoutError) +from gridfs.errors import CorruptGridFile, NoFile +from pymongo.errors import ( + ConfigurationError, + NotPrimaryError, + ServerSelectionTimeoutError, +) from pymongo.mongo_client import MongoClient from pymongo.read_preferences import ReadPreference -from test import (client_context, - unittest, - IntegrationTest) -from test.utils import (joinall, - one, - rs_client, - rs_or_single_client, - single_client) class JustWrite(threading.Thread): - def __init__(self, gfs, num): threading.Thread.__init__(self) self.gfs = gfs @@ -60,7 +54,6 @@ def run(self): class JustRead(threading.Thread): - def __init__(self, gfs, num, results): threading.Thread.__init__(self) self.gfs = gfs @@ -84,18 +77,16 @@ class TestGridfs(IntegrationTest): def setUpClass(cls): super(TestGridfs, cls).setUpClass() cls.fs = gridfs.GridFSBucket(cls.db) - cls.alt = gridfs.GridFSBucket( - cls.db, bucket_name="alt") + cls.alt = gridfs.GridFSBucket(cls.db, bucket_name="alt") def setUp(self): - self.cleanup_colls(self.db.fs.files, self.db.fs.chunks, - self.db.alt.files, self.db.alt.chunks) + self.cleanup_colls( + self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks + ) def test_basic(self): - oid = self.fs.upload_from_stream("test_filename", - b"hello world") - self.assertEqual(b"hello world", - self.fs.open_download_stream(oid).read()) + oid = self.fs.upload_from_stream("test_filename", b"hello world") + self.assertEqual(b"hello world", self.fs.open_download_stream(oid).read()) self.assertEqual(1, self.db.fs.files.count_documents({})) self.assertEqual(1, self.db.fs.chunks.count_documents({})) @@ -108,9 +99,7 @@ def test_multi_chunk_delete(self): self.assertEqual(0, self.db.fs.files.count_documents({})) self.assertEqual(0, self.db.fs.chunks.count_documents({})) gfs = gridfs.GridFSBucket(self.db) - oid = gfs.upload_from_stream("test_filename", - b"hello", - chunk_size_bytes=1) + oid = gfs.upload_from_stream("test_filename", b"hello", chunk_size_bytes=1) self.assertEqual(1, self.db.fs.files.count_documents({})) self.assertEqual(5, self.db.fs.chunks.count_documents({})) gfs.delete(oid) @@ -118,8 +107,7 @@ def test_multi_chunk_delete(self): self.assertEqual(0, self.db.fs.chunks.count_documents({})) def test_empty_file(self): - oid = self.fs.upload_from_stream("test_filename", - b"") + oid = self.fs.upload_from_stream("test_filename", b"") self.assertEqual(b"", self.fs.open_download_stream(oid).read()) self.assertEqual(1, self.db.fs.files.count_documents({})) self.assertEqual(0, self.db.fs.chunks.count_documents({})) @@ -133,10 +121,8 @@ def test_empty_file(self): self.assertNotIn("md5", raw) def test_corrupt_chunk(self): - files_id = self.fs.upload_from_stream("test_filename", - b'foobar') - self.db.fs.chunks.update_one({'files_id': files_id}, - {'$set': {'data': Binary(b'foo', 0)}}) + files_id = self.fs.upload_from_stream("test_filename", b"foobar") + self.db.fs.chunks.update_one({"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}}) try: out = self.fs.open_download_stream(files_id) self.assertRaises(CorruptGridFile, out.read) @@ -154,37 +140,45 @@ def test_upload_ensures_index(self): files.drop() self.fs.upload_from_stream("filename", b"junk") - self.assertTrue(any( - info.get('key') == [('files_id', 1), ('n', 1)] - for info in chunks.index_information().values())) - self.assertTrue(any( - info.get('key') == [('filename', 1), ('uploadDate', 1)] - for info in files.index_information().values())) + self.assertTrue( + any( + info.get("key") == [("files_id", 1), ("n", 1)] + for info in chunks.index_information().values() + ) + ) + self.assertTrue( + any( + info.get("key") == [("filename", 1), ("uploadDate", 1)] + for info in files.index_information().values() + ) + ) def test_ensure_index_shell_compat(self): files = self.db.fs.files - for i, j in itertools.combinations_with_replacement( - [1, 1.0, Int64(1)], 2): + for i, j in itertools.combinations_with_replacement([1, 1.0, Int64(1)], 2): # Create the index with different numeric types (as might be done # from the mongo shell). - shell_index = [('filename', i), ('uploadDate', j)] - self.db.command('createIndexes', files.name, - indexes=[{'key': SON(shell_index), - 'name': 'filename_1.0_uploadDate_1.0'}]) + shell_index = [("filename", i), ("uploadDate", j)] + self.db.command( + "createIndexes", + files.name, + indexes=[{"key": SON(shell_index), "name": "filename_1.0_uploadDate_1.0"}], + ) # No error. self.fs.upload_from_stream("filename", b"data") - self.assertTrue(any( - info.get('key') == [('filename', 1), ('uploadDate', 1)] - for info in files.index_information().values())) + self.assertTrue( + any( + info.get("key") == [("filename", 1), ("uploadDate", 1)] + for info in files.index_information().values() + ) + ) files.drop() def test_alt_collection(self): - oid = self.alt.upload_from_stream("test_filename", - b"hello world") - self.assertEqual(b"hello world", - self.alt.open_download_stream(oid).read()) + oid = self.alt.upload_from_stream("test_filename", b"hello world") + self.assertEqual(b"hello world", self.alt.open_download_stream(oid).read()) self.assertEqual(1, self.db.alt.files.count_documents({})) self.assertEqual(1, self.db.alt.chunks.count_documents({})) @@ -194,18 +188,17 @@ def test_alt_collection(self): self.assertEqual(0, self.db.alt.chunks.count_documents({})) self.assertRaises(NoFile, self.alt.open_download_stream, "foo") - self.alt.upload_from_stream("foo", - b"hello world") - self.assertEqual(b"hello world", - self.alt.open_download_stream_by_name("foo").read()) + self.alt.upload_from_stream("foo", b"hello world") + self.assertEqual(b"hello world", self.alt.open_download_stream_by_name("foo").read()) self.alt.upload_from_stream("mike", b"") self.alt.upload_from_stream("test", b"foo") self.alt.upload_from_stream("hello world", b"") - self.assertEqual(set(["mike", "test", "hello world", "foo"]), - set(k["filename"] for k in list( - self.db.alt.files.find()))) + self.assertEqual( + set(["mike", "test", "hello world", "foo"]), + set(k["filename"] for k in list(self.db.alt.files.find())), + ) def test_threaded_reads(self): self.fs.upload_from_stream("test", b"hello") @@ -218,10 +211,7 @@ def test_threaded_reads(self): joinall(threads) - self.assertEqual( - 100 * [b'hello'], - results - ) + self.assertEqual(100 * [b"hello"], results) def test_threaded_writes(self): threads = [] @@ -235,10 +225,7 @@ def test_threaded_writes(self): self.assertEqual(fstr.read(), b"hello") # Should have created 100 versions of 'test' file - self.assertEqual( - 100, - self.db.fs.files.count_documents({'filename': 'test'}) - ) + self.assertEqual(100, self.db.fs.files.count_documents({"filename": "test"})) def test_get_last_version(self): one = self.fs.upload_from_stream("test", b"foo") @@ -250,17 +237,13 @@ def test_get_last_version(self): two = two._id three = self.fs.upload_from_stream("test", b"baz") - self.assertEqual(b"baz", - self.fs.open_download_stream_by_name("test").read()) + self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test").read()) self.fs.delete(three) - self.assertEqual(b"bar", - self.fs.open_download_stream_by_name("test").read()) + self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test").read()) self.fs.delete(two) - self.assertEqual(b"foo", - self.fs.open_download_stream_by_name("test").read()) + self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test").read()) self.fs.delete(one) - self.assertRaises(NoFile, - self.fs.open_download_stream_by_name, "test") + self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test") def test_get_version(self): self.fs.upload_from_stream("test", b"foo") @@ -270,56 +253,41 @@ def test_get_version(self): self.fs.upload_from_stream("test", b"baz") time.sleep(0.01) - self.assertEqual(b"foo", self.fs.open_download_stream_by_name( - "test", revision=0).read()) - self.assertEqual(b"bar", self.fs.open_download_stream_by_name( - "test", revision=1).read()) - self.assertEqual(b"baz", self.fs.open_download_stream_by_name( - "test", revision=2).read()) - - self.assertEqual(b"baz", self.fs.open_download_stream_by_name( - "test", revision=-1).read()) - self.assertEqual(b"bar", self.fs.open_download_stream_by_name( - "test", revision=-2).read()) - self.assertEqual(b"foo", self.fs.open_download_stream_by_name( - "test", revision=-3).read()) - - self.assertRaises(NoFile, self.fs.open_download_stream_by_name, - "test", revision=3) - self.assertRaises(NoFile, self.fs.open_download_stream_by_name, - "test", revision=-4) + self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test", revision=0).read()) + self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test", revision=1).read()) + self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test", revision=2).read()) + + self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test", revision=-1).read()) + self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test", revision=-2).read()) + self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test", revision=-3).read()) + + self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test", revision=3) + self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test", revision=-4) def test_upload_from_stream(self): - oid = self.fs.upload_from_stream("test_file", - BytesIO(b"hello world"), - chunk_size_bytes=1) + oid = self.fs.upload_from_stream("test_file", BytesIO(b"hello world"), chunk_size_bytes=1) self.assertEqual(11, self.db.fs.chunks.count_documents({})) - self.assertEqual(b"hello world", - self.fs.open_download_stream(oid).read()) + self.assertEqual(b"hello world", self.fs.open_download_stream(oid).read()) def test_upload_from_stream_with_id(self): oid = ObjectId() - self.fs.upload_from_stream_with_id(oid, - "test_file_custom_id", - BytesIO(b"custom id"), - chunk_size_bytes=1) - self.assertEqual(b"custom id", - self.fs.open_download_stream(oid).read()) + self.fs.upload_from_stream_with_id( + oid, "test_file_custom_id", BytesIO(b"custom id"), chunk_size_bytes=1 + ) + self.assertEqual(b"custom id", self.fs.open_download_stream(oid).read()) def test_open_upload_stream(self): gin = self.fs.open_upload_stream("from_stream") gin.write(b"from stream") gin.close() - self.assertEqual(b"from stream", - self.fs.open_download_stream(gin._id).read()) + self.assertEqual(b"from stream", self.fs.open_download_stream(gin._id).read()) def test_open_upload_stream_with_id(self): oid = ObjectId() gin = self.fs.open_upload_stream_with_id(oid, "from_stream_custom_id") gin.write(b"from stream with custom id") gin.close() - self.assertEqual(b"from stream with custom id", - self.fs.open_download_stream(oid).read()) + self.assertEqual(b"from stream with custom id", self.fs.open_download_stream(oid).read()) def test_missing_length_iter(self): # Test fix that guards against PHP-237 @@ -338,16 +306,15 @@ def iterate_file(grid_file): self.assertTrue(iterate_file(fstr)) def test_gridfs_lazy_connect(self): - client = MongoClient('badhost', connect=False, - serverSelectionTimeoutMS=0) + client = MongoClient("badhost", connect=False, serverSelectionTimeoutMS=0) cdb = client.db gfs = gridfs.GridFSBucket(cdb) self.assertRaises(ServerSelectionTimeoutError, gfs.delete, 0) gfs = gridfs.GridFSBucket(cdb) self.assertRaises( - ServerSelectionTimeoutError, - gfs.upload_from_stream, "test", b"") # Still no connection. + ServerSelectionTimeoutError, gfs.upload_from_stream, "test", b"" + ) # Still no connection. def test_gridfs_find(self): self.fs.upload_from_stream("two", b"test2") @@ -361,8 +328,8 @@ def test_gridfs_find(self): self.assertEqual(3, files.count_documents({"filename": "two"})) self.assertEqual(4, files.count_documents({})) cursor = self.fs.find( - {}, no_cursor_timeout=False, sort=[("uploadDate", -1)], - skip=1, limit=2) + {}, no_cursor_timeout=False, sort=[("uploadDate", -1)], skip=1, limit=2 + ) gout = next(cursor) self.assertEqual(b"test1", gout.read()) cursor.rewind() @@ -376,13 +343,11 @@ def test_gridfs_find(self): def test_grid_in_non_int_chunksize(self): # Lua, and perhaps other buggy GridFS clients, store size as a float. - data = b'data' - self.fs.upload_from_stream('f', data) - self.db.fs.files.update_one({'filename': 'f'}, - {'$set': {'chunkSize': 100.0}}) + data = b"data" + self.fs.upload_from_stream("f", data) + self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) - self.assertEqual(data, - self.fs.open_download_stream_by_name('f').read()) + self.assertEqual(data, self.fs.open_download_stream_by_name("f").read()) def test_unacknowledged(self): # w=0 is prohibited. @@ -390,29 +355,23 @@ def test_unacknowledged(self): gridfs.GridFSBucket(rs_or_single_client(w=0).pymongo_test) def test_rename(self): - _id = self.fs.upload_from_stream("first_name", b'testing') - self.assertEqual(b'testing', self.fs.open_download_stream_by_name( - "first_name").read()) + _id = self.fs.upload_from_stream("first_name", b"testing") + self.assertEqual(b"testing", self.fs.open_download_stream_by_name("first_name").read()) self.fs.rename(_id, "second_name") - self.assertRaises(NoFile, self.fs.open_download_stream_by_name, - "first_name") - self.assertEqual(b"testing", self.fs.open_download_stream_by_name( - "second_name").read()) + self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "first_name") + self.assertEqual(b"testing", self.fs.open_download_stream_by_name("second_name").read()) def test_abort(self): - gin = self.fs.open_upload_stream("test_filename", - chunk_size_bytes=5) + gin = self.fs.open_upload_stream("test_filename", chunk_size_bytes=5) gin.write(b"test1") gin.write(b"test2") gin.write(b"test3") - self.assertEqual(3, self.db.fs.chunks.count_documents( - {"files_id": gin._id})) + self.assertEqual(3, self.db.fs.chunks.count_documents({"files_id": gin._id})) gin.abort() self.assertTrue(gin.closed) self.assertRaises(ValueError, gin.write, b"test4") - self.assertEqual(0, self.db.fs.chunks.count_documents( - {"files_id": gin._id})) + self.assertEqual(0, self.db.fs.chunks.count_documents({"files_id": gin._id})) def test_download_to_stream(self): file1 = BytesIO(b"hello world") @@ -429,9 +388,7 @@ def test_download_to_stream(self): self.db.drop_collection("fs.files") self.db.drop_collection("fs.chunks") file1.seek(0) - oid = self.fs.upload_from_stream("many_chunks", - file1, - chunk_size_bytes=1) + oid = self.fs.upload_from_stream("many_chunks", file1, chunk_size_bytes=1) self.assertEqual(11, self.db.fs.chunks.count_documents({})) file2 = BytesIO() self.fs.download_to_stream(oid, file2) @@ -482,7 +439,6 @@ def test_md5(self): class TestGridfsBucketReplicaSet(IntegrationTest): - @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): @@ -490,52 +446,43 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - client_context.client.drop_database('gfsbucketreplica') + client_context.client.drop_database("gfsbucketreplica") def test_gridfs_replica_set(self): - rsc = rs_client( - w=client_context.w, - read_preference=ReadPreference.SECONDARY) + rsc = rs_client(w=client_context.w, read_preference=ReadPreference.SECONDARY) - gfs = gridfs.GridFSBucket(rsc.gfsbucketreplica, 'gfsbucketreplicatest') - oid = gfs.upload_from_stream("test_filename", b'foo') + gfs = gridfs.GridFSBucket(rsc.gfsbucketreplica, "gfsbucketreplicatest") + oid = gfs.upload_from_stream("test_filename", b"foo") content = gfs.open_download_stream(oid).read() - self.assertEqual(b'foo', content) + self.assertEqual(b"foo", content) def test_gridfs_secondary(self): secondary_host, secondary_port = one(self.client.secondaries) secondary_connection = single_client( - secondary_host, secondary_port, - read_preference=ReadPreference.SECONDARY) + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY + ) # Should detect it's connected to secondary and not attempt to # create index - gfs = gridfs.GridFSBucket( - secondary_connection.gfsbucketreplica, 'gfsbucketsecondarytest') + gfs = gridfs.GridFSBucket(secondary_connection.gfsbucketreplica, "gfsbucketsecondarytest") # This won't detect secondary, raises error - self.assertRaises(NotPrimaryError, gfs.upload_from_stream, - "test_filename", b'foo') + self.assertRaises(NotPrimaryError, gfs.upload_from_stream, "test_filename", b"foo") def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. secondary_host, secondary_port = one(self.client.secondaries) client = single_client( - secondary_host, - secondary_port, - read_preference=ReadPreference.SECONDARY, - connect=False) + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False + ) # Still no connection. - gfs = gridfs.GridFSBucket( - client.gfsbucketreplica, 'gfsbucketsecondarylazytest') + gfs = gridfs.GridFSBucket(client.gfsbucketreplica, "gfsbucketsecondarylazytest") # Connects, doesn't create index. - self.assertRaises(NoFile, gfs.open_download_stream_by_name, - "test_filename") - self.assertRaises(NotPrimaryError, gfs.upload_from_stream, - "test_filename", b'data') + self.assertRaises(NoFile, gfs.open_download_stream_by_name, "test_filename") + self.assertRaises(NotPrimaryError, gfs.upload_from_stream, "test_filename", b"data") if __name__ == "__main__": diff --git a/test/test_gridfs_spec.py b/test/test_gridfs_spec.py index 057a7b4841..3c6f6b76c4 100644 --- a/test/test_gridfs_spec.py +++ b/test/test_gridfs_spec.py @@ -19,39 +19,35 @@ import os import re import sys - from json import loads sys.path[0:0] = [""] +from test import IntegrationTest, unittest + +import gridfs from bson import Binary from bson.int64 import Int64 from bson.json_util import object_hook -import gridfs -from gridfs.errors import NoFile, CorruptGridFile -from test import (unittest, - IntegrationTest) +from gridfs.errors import CorruptGridFile, NoFile # Commands. -_COMMANDS = {"delete": lambda coll, doc: [coll.delete_many(d["q"]) - for d in doc['deletes']], - "insert": lambda coll, doc: coll.insert_many(doc['documents']), - "update": lambda coll, doc: [coll.update_many(u["q"], u["u"]) - for u in doc['updates']] - } +_COMMANDS = { + "delete": lambda coll, doc: [coll.delete_many(d["q"]) for d in doc["deletes"]], + "insert": lambda coll, doc: coll.insert_many(doc["documents"]), + "update": lambda coll, doc: [coll.update_many(u["q"], u["u"]) for u in doc["updates"]], +} # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'gridfs') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "gridfs") def camel_to_snake(camel): # Regex to convert CamelCase to snake_case. Special case for _id. if camel == "id": return "file_id" - snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower() + snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() class TestAllScenarios(IntegrationTest): @@ -66,23 +62,25 @@ def setUpClass(cls): "upload": cls.fs.upload_from_stream, "download": cls.fs.open_download_stream, "delete": cls.fs.delete, - "download_by_name": cls.fs.open_download_stream_by_name} + "download_by_name": cls.fs.open_download_stream_by_name, + } def init_db(self, data, test): - self.cleanup_colls(self.db.fs.files, self.db.fs.chunks, - self.db.expected.files, self.db.expected.chunks) + self.cleanup_colls( + self.db.fs.files, self.db.fs.chunks, self.db.expected.files, self.db.expected.chunks + ) # Read in data. - if data['files']: - self.db.fs.files.insert_many(data['files']) - self.db.expected.files.insert_many(data['files']) - if data['chunks']: - self.db.fs.chunks.insert_many(data['chunks']) - self.db.expected.chunks.insert_many(data['chunks']) + if data["files"]: + self.db.fs.files.insert_many(data["files"]) + self.db.expected.files.insert_many(data["files"]) + if data["chunks"]: + self.db.fs.chunks.insert_many(data["chunks"]) + self.db.expected.chunks.insert_many(data["chunks"]) # Make initial modifications. if "arrange" in test: - for cmd in test['arrange'].get('data', []): + for cmd in test["arrange"].get("data", []): for key in cmd.keys(): if key in _COMMANDS: coll = self.db.get_collection(cmd[key]) @@ -90,11 +88,11 @@ def init_db(self, data, test): def init_expected_db(self, test, result): # Modify outcome DB. - for cmd in test['assert'].get('data', []): + for cmd in test["assert"].get("data", []): for key in cmd.keys(): if key in _COMMANDS: # Replace wildcards in inserts. - for doc in cmd.get('documents', []): + for doc in cmd.get("documents", []): keylist = doc.keys() for dockey in copy.deepcopy(list(keylist)): if "result" in str(doc[dockey]): @@ -107,8 +105,8 @@ def init_expected_db(self, test, result): coll = self.db.get_collection(cmd[key]) _COMMANDS[key](coll, cmd) - if test['assert'].get('result') == "&result": - test['assert']['result'] = result + if test["assert"].get("result") == "&result": + test["assert"]["result"] = result def sorted_list(self, coll, ignore_id): to_sort = [] @@ -129,30 +127,28 @@ def create_test(scenario_def): def run_scenario(self): # Run tests. - self.assertTrue(scenario_def['tests'], "tests cannot be empty") - for test in scenario_def['tests']: - self.init_db(scenario_def['data'], test) + self.assertTrue(scenario_def["tests"], "tests cannot be empty") + for test in scenario_def["tests"]: + self.init_db(scenario_def["data"], test) # Run GridFs Operation. - operation = self.str_to_cmd[test['act']['operation']] - args = test['act']['arguments'] + operation = self.str_to_cmd[test["act"]["operation"]] + args = test["act"]["arguments"] extra_opts = args.pop("options", {}) if "contentType" in extra_opts: - extra_opts["metadata"] = { - "contentType": extra_opts.pop("contentType")} + extra_opts["metadata"] = {"contentType": extra_opts.pop("contentType")} args.update(extra_opts) - converted_args = dict((camel_to_snake(c), v) - for c, v in args.items()) + converted_args = dict((camel_to_snake(c), v) for c, v in args.items()) - expect_error = test['assert'].get("error", False) + expect_error = test["assert"].get("error", False) result = None error = None try: result = operation(**converted_args) - if 'download' in test['act']['operation']: + if "download" in test["act"]["operation"]: result = Binary(result.read()) except Exception as exc: if not expect_error: @@ -162,47 +158,51 @@ def run_scenario(self): self.init_expected_db(test, result) # Asserts. - errors = {"FileNotFound": NoFile, - "ChunkIsMissing": CorruptGridFile, - "ExtraChunk": CorruptGridFile, - "ChunkIsWrongSize": CorruptGridFile, - "RevisionNotFound": NoFile} + errors = { + "FileNotFound": NoFile, + "ChunkIsMissing": CorruptGridFile, + "ExtraChunk": CorruptGridFile, + "ChunkIsWrongSize": CorruptGridFile, + "RevisionNotFound": NoFile, + } if expect_error: self.assertIsNotNone(error) - self.assertIsInstance(error, errors[test['assert']['error']], - test['description']) + self.assertIsInstance(error, errors[test["assert"]["error"]], test["description"]) else: self.assertIsNone(error) - if 'result' in test['assert']: - if test['assert']['result'] == 'void': - test['assert']['result'] = None - self.assertEqual(result, test['assert'].get('result')) + if "result" in test["assert"]: + if test["assert"]["result"] == "void": + test["assert"]["result"] = None + self.assertEqual(result, test["assert"].get("result")) - if 'data' in test['assert']: + if "data" in test["assert"]: # Create alphabetized list self.assertEqual( set(self.sorted_list(self.db.fs.chunks, True)), - set(self.sorted_list(self.db.expected.chunks, True))) + set(self.sorted_list(self.db.expected.chunks, True)), + ) self.assertEqual( set(self.sorted_list(self.db.fs.files, False)), - set(self.sorted_list(self.db.expected.files, False))) + set(self.sorted_list(self.db.expected.files, False)), + ) return run_scenario + def _object_hook(dct): - if 'length' in dct: - dct['length'] = Int64(dct['length']) + if "length" in dct: + dct["length"] = Int64(dct["length"]) return object_hook(dct) + def create_tests(): for dirpath, _, filenames in os.walk(_TEST_PATH): for filename in filenames: with open(os.path.join(dirpath, filename)) as scenario_stream: - scenario_def = loads( - scenario_stream.read(), object_hook=_object_hook) + scenario_def = loads(scenario_stream.read(), object_hook=_object_hook) # Because object_hook is already defined by bson.json_util, # and everything is named 'data' @@ -210,7 +210,7 @@ def str2hex(jsn): for key, val in jsn.items(): if key in ("data", "source", "result"): if "$hex" in val: - jsn[key] = Binary(bytes.fromhex(val['$hex'])) + jsn[key] = Binary(bytes.fromhex(val["$hex"])) if isinstance(jsn[key], dict): str2hex(jsn[key]) if isinstance(jsn[key], list): @@ -221,8 +221,7 @@ def str2hex(jsn): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = 'test_%s' % ( - os.path.splitext(filename)[0]) + test_name = "test_%s" % (os.path.splitext(filename)[0]) new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/test_heartbeat_monitoring.py b/test/test_heartbeat_monitoring.py index 6941e6bd84..cd4a875e9e 100644 --- a/test/test_heartbeat_monitoring.py +++ b/test/test_heartbeat_monitoring.py @@ -19,21 +19,21 @@ sys.path[0:0] = [""] +from test import IntegrationTest, client_knobs, unittest +from test.utils import HeartbeatEventListener, MockPool, single_client, wait_until + from pymongo.errors import ConnectionFailure from pymongo.hello import Hello, HelloCompat from pymongo.monitor import Monitor -from test import unittest, client_knobs, IntegrationTest -from test.utils import (HeartbeatEventListener, MockPool, single_client, - wait_until) class TestHeartbeatMonitoring(IntegrationTest): - def create_mock_monitor(self, responses, uri, expected_results): listener = HeartbeatEventListener() - with client_knobs(heartbeat_frequency=0.1, - min_heartbeat_interval=0.1, - events_queue_frequency=0.1): + with client_knobs( + heartbeat_frequency=0.1, min_heartbeat_interval=0.1, events_queue_frequency=0.1 + ): + class MockMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): if isinstance(responses[1], Exception): @@ -41,27 +41,21 @@ def _check_with_socket(self, *args, **kwargs): return Hello(responses[1]), 99 m = single_client( - h=uri, - event_listeners=(listener,), - _monitor_class=MockMonitor, - _pool_class=MockPool) + h=uri, event_listeners=(listener,), _monitor_class=MockMonitor, _pool_class=MockPool + ) expected_len = len(expected_results) # Wait for *at least* expected_len number of results. The # monitor thread may run multiple times during the execution # of this test. - wait_until( - lambda: len(listener.events) >= expected_len, - "publish all events") + wait_until(lambda: len(listener.events) >= expected_len, "publish all events") try: # zip gives us len(expected_results) pairs. for expected, actual in zip(expected_results, listener.events): - self.assertEqual(expected, - actual.__class__.__name__) - self.assertEqual(actual.connection_id, - responses[0]) - if expected != 'ServerHeartbeatStartedEvent': + self.assertEqual(expected, actual.__class__.__name__) + self.assertEqual(actual.connection_id, responses[0]) + if expected != "ServerHeartbeatStartedEvent": if isinstance(actual.reply, Hello): self.assertEqual(actual.duration, 99) self.assertEqual(actual.reply._doc, responses[1]) @@ -72,28 +66,25 @@ def _check_with_socket(self, *args, **kwargs): m.close() def test_standalone(self): - responses = (('a', 27017), - { - HelloCompat.LEGACY_CMD: True, - "maxWireVersion": 4, - "minWireVersion": 0, - "ok": 1 - }) + responses = ( + ("a", 27017), + {HelloCompat.LEGACY_CMD: True, "maxWireVersion": 4, "minWireVersion": 0, "ok": 1}, + ) uri = "mongodb://a:27017" - expected_results = ['ServerHeartbeatStartedEvent', - 'ServerHeartbeatSucceededEvent'] + expected_results = ["ServerHeartbeatStartedEvent", "ServerHeartbeatSucceededEvent"] self.create_mock_monitor(responses, uri, expected_results) def test_standalone_error(self): - responses = (('a', 27017), - ConnectionFailure("SPECIAL MESSAGE")) + responses = (("a", 27017), ConnectionFailure("SPECIAL MESSAGE")) uri = "mongodb://a:27017" # _check_with_socket failing results in a second attempt. - expected_results = ['ServerHeartbeatStartedEvent', - 'ServerHeartbeatFailedEvent', - 'ServerHeartbeatStartedEvent', - 'ServerHeartbeatFailedEvent'] + expected_results = [ + "ServerHeartbeatStartedEvent", + "ServerHeartbeatFailedEvent", + "ServerHeartbeatStartedEvent", + "ServerHeartbeatFailedEvent", + ] self.create_mock_monitor(responses, uri, expected_results) diff --git a/test/test_json_util.py b/test/test_json_util.py index 16c7d96a2f..203542e822 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -19,21 +19,30 @@ import re import sys import uuid - from typing import Any, List, MutableMapping sys.path[0:0] = [""] -from bson import json_util, EPOCH_AWARE, EPOCH_NAIVE, SON -from bson.json_util import (DatetimeRepresentation, - JSONMode, - JSONOptions, - LEGACY_JSON_OPTIONS) -from bson.binary import (ALL_UUID_REPRESENTATIONS, Binary, MD5_SUBTYPE, - USER_DEFINED_SUBTYPE, UuidRepresentation, STANDARD) +from test import IntegrationTest, unittest + +from bson import EPOCH_AWARE, EPOCH_NAIVE, SON, json_util +from bson.binary import ( + ALL_UUID_REPRESENTATIONS, + MD5_SUBTYPE, + STANDARD, + USER_DEFINED_SUBTYPE, + Binary, + UuidRepresentation, +) from bson.code import Code from bson.dbref import DBRef from bson.int64 import Int64 +from bson.json_util import ( + LEGACY_JSON_OPTIONS, + DatetimeRepresentation, + JSONMode, + JSONOptions, +) from bson.max_key import MaxKey from bson.min_key import MinKey from bson.objectid import ObjectId @@ -41,14 +50,12 @@ from bson.timestamp import Timestamp from bson.tz_util import FixedOffset, utc -from test import unittest, IntegrationTest - - STRICT_JSON_OPTIONS = JSONOptions( strict_number_long=True, datetime_representation=DatetimeRepresentation.ISO8601, strict_uuid=True, - json_mode=JSONMode.LEGACY) + json_mode=JSONMode.LEGACY, +) class TestJsonUtil(unittest.TestCase): @@ -63,15 +70,13 @@ def test_basic(self): def test_json_options_with_options(self): opts = JSONOptions( - datetime_representation=DatetimeRepresentation.NUMBERLONG, - json_mode=JSONMode.LEGACY) - self.assertEqual( - opts.datetime_representation, DatetimeRepresentation.NUMBERLONG) + datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY + ) + self.assertEqual(opts.datetime_representation, DatetimeRepresentation.NUMBERLONG) opts2 = opts.with_options( - datetime_representation=DatetimeRepresentation.ISO8601, - json_mode=JSONMode.LEGACY) - self.assertEqual( - opts2.datetime_representation, DatetimeRepresentation.ISO8601) + datetime_representation=DatetimeRepresentation.ISO8601, json_mode=JSONMode.LEGACY + ) + self.assertEqual(opts2.datetime_representation, DatetimeRepresentation.ISO8601) opts = JSONOptions(strict_number_long=True, json_mode=JSONMode.LEGACY) self.assertEqual(opts.strict_number_long, True) @@ -79,16 +84,12 @@ def test_json_options_with_options(self): self.assertEqual(opts2.strict_number_long, False) opts = json_util.CANONICAL_JSON_OPTIONS - self.assertNotEqual( - opts.uuid_representation, UuidRepresentation.JAVA_LEGACY) - opts2 = opts.with_options( - uuid_representation=UuidRepresentation.JAVA_LEGACY) - self.assertEqual( - opts2.uuid_representation, UuidRepresentation.JAVA_LEGACY) + self.assertNotEqual(opts.uuid_representation, UuidRepresentation.JAVA_LEGACY) + opts2 = opts.with_options(uuid_representation=UuidRepresentation.JAVA_LEGACY) + self.assertEqual(opts2.uuid_representation, UuidRepresentation.JAVA_LEGACY) self.assertEqual(opts2.document_class, dict) opts3 = opts2.with_options(document_class=SON) - self.assertEqual( - opts3.uuid_representation, UuidRepresentation.JAVA_LEGACY) + self.assertEqual(opts3.uuid_representation, UuidRepresentation.JAVA_LEGACY) self.assertEqual(opts3.document_class, SON) def test_objectid(self): @@ -102,41 +103,42 @@ def test_dbref(self): # Check order. self.assertEqual( '{"$ref": "collection", "$id": 1, "$db": "db"}', - json_util.dumps(DBRef('collection', 1, 'db'))) + json_util.dumps(DBRef("collection", 1, "db")), + ) def test_datetime(self): - tz_aware_opts = json_util.DEFAULT_JSON_OPTIONS.with_options( - tz_aware=True) + tz_aware_opts = json_util.DEFAULT_JSON_OPTIONS.with_options(tz_aware=True) # only millis, not micros - self.round_trip({"date": datetime.datetime(2009, 12, 9, 15, 49, 45, - 191000, utc)}, json_options=tz_aware_opts) - self.round_trip({"date": datetime.datetime(2009, 12, 9, 15, - 49, 45, 191000)}) - - for jsn in ['{"dt": { "$date" : "1970-01-01T00:00:00.000+0000"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00.000000+0000"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00.000+00:00"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00:00"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00.000Z"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00.000000Z"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00Z"}}', - '{"dt": {"$date": "1970-01-01T00:00:00.000"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00.000000"}}', - '{"dt": { "$date" : "1969-12-31T16:00:00.000-0800"}}', - '{"dt": { "$date" : "1969-12-31T16:00:00.000000-0800"}}', - '{"dt": { "$date" : "1969-12-31T16:00:00.000-08:00"}}', - '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08:00"}}', - '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08"}}', - '{"dt": { "$date" : "1970-01-01T01:00:00.000+0100"}}', - '{"dt": { "$date" : "1970-01-01T01:00:00.000000+0100"}}', - '{"dt": { "$date" : "1970-01-01T01:00:00.000+01:00"}}', - '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01:00"}}', - '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01"}}' - ]: - self.assertEqual(EPOCH_AWARE, json_util.loads( - jsn, json_options=tz_aware_opts)["dt"]) + self.round_trip( + {"date": datetime.datetime(2009, 12, 9, 15, 49, 45, 191000, utc)}, + json_options=tz_aware_opts, + ) + self.round_trip({"date": datetime.datetime(2009, 12, 9, 15, 49, 45, 191000)}) + + for jsn in [ + '{"dt": { "$date" : "1970-01-01T00:00:00.000+0000"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000+0000"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000+00:00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00:00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000Z"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000Z"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00Z"}}', + '{"dt": {"$date": "1970-01-01T00:00:00.000"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000-0800"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000000-0800"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000-08:00"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08:00"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000+0100"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000000+0100"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000+01:00"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01:00"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01"}}', + ]: + self.assertEqual(EPOCH_AWARE, json_util.loads(jsn, json_options=tz_aware_opts)["dt"]) self.assertEqual(EPOCH_NAIVE, json_util.loads(jsn)["dt"]) dtm = datetime.datetime(1, 1, 1, 1, 1, 1, 0, utc) @@ -149,84 +151,99 @@ def test_datetime(self): pre_epoch = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000, utc)} post_epoch = {"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc)} self.assertEqual( - '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', - json_util.dumps(pre_epoch)) + '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', json_util.dumps(pre_epoch) + ) self.assertEqual( - '{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}', - json_util.dumps(post_epoch)) + '{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}', json_util.dumps(post_epoch) + ) self.assertEqual( '{"dt": {"$date": -62135593138990}}', - json_util.dumps(pre_epoch, json_options=LEGACY_JSON_OPTIONS)) + json_util.dumps(pre_epoch, json_options=LEGACY_JSON_OPTIONS), + ) self.assertEqual( '{"dt": {"$date": 63075661010}}', - json_util.dumps(post_epoch, json_options=LEGACY_JSON_OPTIONS)) + json_util.dumps(post_epoch, json_options=LEGACY_JSON_OPTIONS), + ) self.assertEqual( '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', - json_util.dumps(pre_epoch, json_options=STRICT_JSON_OPTIONS)) + json_util.dumps(pre_epoch, json_options=STRICT_JSON_OPTIONS), + ) self.assertEqual( '{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}', - json_util.dumps(post_epoch, json_options=STRICT_JSON_OPTIONS)) + json_util.dumps(post_epoch, json_options=STRICT_JSON_OPTIONS), + ) number_long_options = JSONOptions( - datetime_representation=DatetimeRepresentation.NUMBERLONG, - json_mode=JSONMode.LEGACY) + datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY + ) self.assertEqual( '{"dt": {"$date": {"$numberLong": "63075661010"}}}', - json_util.dumps(post_epoch, json_options=number_long_options)) + json_util.dumps(post_epoch, json_options=number_long_options), + ) self.assertEqual( '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', - json_util.dumps(pre_epoch, json_options=number_long_options)) + json_util.dumps(pre_epoch, json_options=number_long_options), + ) # ISO8601 mode assumes naive datetimes are UTC pre_epoch_naive = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000)} - post_epoch_naive = { - "dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000)} + post_epoch_naive = {"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000)} self.assertEqual( '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', - json_util.dumps(pre_epoch_naive, json_options=STRICT_JSON_OPTIONS)) + json_util.dumps(pre_epoch_naive, json_options=STRICT_JSON_OPTIONS), + ) self.assertEqual( '{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}', - json_util.dumps(post_epoch_naive, - json_options=STRICT_JSON_OPTIONS)) + json_util.dumps(post_epoch_naive, json_options=STRICT_JSON_OPTIONS), + ) # Test tz_aware and tzinfo options self.assertEqual( datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc), json_util.loads( - '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', - json_options=tz_aware_opts)["dt"]) + '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', json_options=tz_aware_opts + )["dt"], + ) self.assertEqual( datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc), json_util.loads( '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', - json_options=JSONOptions(tz_aware=True, - tzinfo=utc))["dt"]) + json_options=JSONOptions(tz_aware=True, tzinfo=utc), + )["dt"], + ) self.assertEqual( datetime.datetime(1972, 1, 1, 1, 1, 1, 10000), json_util.loads( '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', - json_options=JSONOptions(tz_aware=False))["dt"]) - self.round_trip(pre_epoch_naive, json_options=JSONOptions( - tz_aware=False)) + json_options=JSONOptions(tz_aware=False), + )["dt"], + ) + self.round_trip(pre_epoch_naive, json_options=JSONOptions(tz_aware=False)) # Test a non-utc timezone - pacific = FixedOffset(-8 * 60, 'US/Pacific') - aware_datetime = {"dt": datetime.datetime(2002, 10, 27, 6, 0, 0, 10000, - pacific)} + pacific = FixedOffset(-8 * 60, "US/Pacific") + aware_datetime = {"dt": datetime.datetime(2002, 10, 27, 6, 0, 0, 10000, pacific)} self.assertEqual( '{"dt": {"$date": "2002-10-27T06:00:00.010-0800"}}', - json_util.dumps(aware_datetime, json_options=STRICT_JSON_OPTIONS)) - self.round_trip(aware_datetime, json_options=JSONOptions( - json_mode=JSONMode.LEGACY, - tz_aware=True, tzinfo=pacific)) - self.round_trip(aware_datetime, json_options=JSONOptions( - datetime_representation=DatetimeRepresentation.ISO8601, - json_mode=JSONMode.LEGACY, - tz_aware=True, tzinfo=pacific)) + json_util.dumps(aware_datetime, json_options=STRICT_JSON_OPTIONS), + ) + self.round_trip( + aware_datetime, + json_options=JSONOptions(json_mode=JSONMode.LEGACY, tz_aware=True, tzinfo=pacific), + ) + self.round_trip( + aware_datetime, + json_options=JSONOptions( + datetime_representation=DatetimeRepresentation.ISO8601, + json_mode=JSONMode.LEGACY, + tz_aware=True, + tzinfo=pacific, + ), + ) def test_regex_object_hook(self): # Extended JSON format regular expression. - pat = 'a*b' + pat = "a*b" json_re = '{"$regex": "%s", "$options": "u"}' % pat loaded = json_util.object_hook(json.loads(json_re)) self.assertTrue(isinstance(loaded, Regex)) @@ -234,9 +251,7 @@ def test_regex_object_hook(self): self.assertEqual(re.U, loaded.flags) def test_regex(self): - for regex_instance in ( - re.compile("a*b", re.IGNORECASE), - Regex("a*b", re.IGNORECASE)): + for regex_instance in (re.compile("a*b", re.IGNORECASE), Regex("a*b", re.IGNORECASE)): res = self.round_tripped({"r": regex_instance})["r"] self.assertEqual("a*b", res.pattern) @@ -244,33 +259,34 @@ def test_regex(self): self.assertEqual("a*b", res.pattern) self.assertEqual(re.IGNORECASE, res.flags) - unicode_options = re.I|re.M|re.S|re.U|re.X + unicode_options = re.I | re.M | re.S | re.U | re.X regex = re.compile("a*b", unicode_options) res = self.round_tripped({"r": regex})["r"] self.assertEqual(unicode_options, res.flags) # Some tools may not add $options if no flags are set. - res = json_util.loads('{"r": {"$regex": "a*b"}}')['r'] + res = json_util.loads('{"r": {"$regex": "a*b"}}')["r"] self.assertEqual(0, res.flags) self.assertEqual( - Regex('.*', 'ilm'), - json_util.loads( - '{"r": {"$regex": ".*", "$options": "ilm"}}')['r']) + Regex(".*", "ilm"), json_util.loads('{"r": {"$regex": ".*", "$options": "ilm"}}')["r"] + ) # Check order. self.assertEqual( '{"$regularExpression": {"pattern": ".*", "options": "mx"}}', - json_util.dumps(Regex('.*', re.M | re.X))) + json_util.dumps(Regex(".*", re.M | re.X)), + ) self.assertEqual( '{"$regularExpression": {"pattern": ".*", "options": "mx"}}', - json_util.dumps(re.compile(b'.*', re.M | re.X))) + json_util.dumps(re.compile(b".*", re.M | re.X)), + ) self.assertEqual( '{"$regex": ".*", "$options": "mx"}', - json_util.dumps(Regex('.*', re.M | re.X), - json_options=LEGACY_JSON_OPTIONS)) + json_util.dumps(Regex(".*", re.M | re.X), json_options=LEGACY_JSON_OPTIONS), + ) def test_regex_validation(self): non_str_types = [10, {}, []] @@ -297,87 +313,94 @@ def test_timestamp(self): def test_uuid_default(self): # Cannot directly encode native UUIDs with the default # uuid_representation. - doc = {'uuid': uuid.UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479')} - with self.assertRaisesRegex(ValueError, 'cannot encode native uuid'): + doc = {"uuid": uuid.UUID("f47ac10b-58cc-4372-a567-0e02b2c3d479")} + with self.assertRaisesRegex(ValueError, "cannot encode native uuid"): json_util.dumps(doc) legacy_jsn = '{"uuid": {"$uuid": "f47ac10b58cc4372a5670e02b2c3d479"}}' - expected = {'uuid': Binary( - b'\xf4z\xc1\x0bX\xccCr\xa5g\x0e\x02\xb2\xc3\xd4y', 4)} + expected = {"uuid": Binary(b"\xf4z\xc1\x0bX\xccCr\xa5g\x0e\x02\xb2\xc3\xd4y", 4)} self.assertEqual(json_util.loads(legacy_jsn), expected) def test_uuid(self): - doc = {'uuid': uuid.UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479')} + doc = {"uuid": uuid.UUID("f47ac10b-58cc-4372-a567-0e02b2c3d479")} uuid_legacy_opts = LEGACY_JSON_OPTIONS.with_options( - uuid_representation=UuidRepresentation.PYTHON_LEGACY) + uuid_representation=UuidRepresentation.PYTHON_LEGACY + ) self.round_trip(doc, json_options=uuid_legacy_opts) self.assertEqual( '{"uuid": {"$uuid": "f47ac10b58cc4372a5670e02b2c3d479"}}', - json_util.dumps(doc, json_options=LEGACY_JSON_OPTIONS)) + json_util.dumps(doc, json_options=LEGACY_JSON_OPTIONS), + ) self.assertEqual( - '{"uuid": ' - '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', + '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', json_util.dumps( - doc, json_options=STRICT_JSON_OPTIONS.with_options( - uuid_representation=UuidRepresentation.PYTHON_LEGACY))) - self.assertEqual( - '{"uuid": ' - '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', + doc, + json_options=STRICT_JSON_OPTIONS.with_options( + uuid_representation=UuidRepresentation.PYTHON_LEGACY + ), + ), + ) + self.assertEqual( + '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', json_util.dumps( - doc, json_options=JSONOptions( - strict_uuid=True, json_mode=JSONMode.LEGACY, - uuid_representation=STANDARD))) - self.assertEqual( - doc, json_util.loads( - '{"uuid": ' - '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', - json_options=uuid_legacy_opts)) - for uuid_representation in (set(ALL_UUID_REPRESENTATIONS) - - {UuidRepresentation.UNSPECIFIED}): + doc, + json_options=JSONOptions( + strict_uuid=True, json_mode=JSONMode.LEGACY, uuid_representation=STANDARD + ), + ), + ) + self.assertEqual( + doc, + json_util.loads( + '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', + json_options=uuid_legacy_opts, + ), + ) + for uuid_representation in set(ALL_UUID_REPRESENTATIONS) - {UuidRepresentation.UNSPECIFIED}: options = JSONOptions( - strict_uuid=True, json_mode=JSONMode.LEGACY, - uuid_representation=uuid_representation) + strict_uuid=True, json_mode=JSONMode.LEGACY, uuid_representation=uuid_representation + ) self.round_trip(doc, json_options=options) # Ignore UUID representation when decoding BSON binary subtype 4. - self.assertEqual(doc, json_util.loads( - '{"uuid": ' - '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', - json_options=options)) + self.assertEqual( + doc, + json_util.loads( + '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', + json_options=options, + ), + ) def test_uuid_uuid_rep_unspecified(self): _uuid = uuid.uuid4() options = JSONOptions( strict_uuid=True, json_mode=JSONMode.LEGACY, - uuid_representation=UuidRepresentation.UNSPECIFIED) + uuid_representation=UuidRepresentation.UNSPECIFIED, + ) # Cannot directly encode native UUIDs with UNSPECIFIED. - doc = {'uuid': _uuid} + doc = {"uuid": _uuid} with self.assertRaises(ValueError): json_util.dumps(doc, json_options=options) # All UUID subtypes are decoded as Binary with UNSPECIFIED. # subtype 3 - doc = {'uuid': Binary(_uuid.bytes, subtype=3)} + doc = {"uuid": Binary(_uuid.bytes, subtype=3)} ext_json_str = json_util.dumps(doc) - self.assertEqual( - doc, json_util.loads(ext_json_str, json_options=options)) + self.assertEqual(doc, json_util.loads(ext_json_str, json_options=options)) # subtype 4 - doc = {'uuid': Binary(_uuid.bytes, subtype=4)} + doc = {"uuid": Binary(_uuid.bytes, subtype=4)} ext_json_str = json_util.dumps(doc) - self.assertEqual( - doc, json_util.loads(ext_json_str, json_options=options)) + self.assertEqual(doc, json_util.loads(ext_json_str, json_options=options)) # $uuid-encoded fields - doc = {'uuid': Binary(_uuid.bytes, subtype=4)} - ext_json_str = json_util.dumps({'uuid': _uuid}, - json_options=LEGACY_JSON_OPTIONS) - self.assertEqual( - doc, json_util.loads(ext_json_str, json_options=options)) + doc = {"uuid": Binary(_uuid.bytes, subtype=4)} + ext_json_str = json_util.dumps({"uuid": _uuid}, json_options=LEGACY_JSON_OPTIONS) + self.assertEqual(doc, json_util.loads(ext_json_str, json_options=options)) def test_binary(self): bin_type_dict = {"bin": b"\x00\x01\x02\x03\x04"} md5_type_dict = { - "md5": Binary(b' n7\x18\xaf\t/\xd1\xd1/\x80\xca\xe7q\xcc\xac', - MD5_SUBTYPE)} + "md5": Binary(b" n7\x18\xaf\t/\xd1\xd1/\x80\xca\xe7q\xcc\xac", MD5_SUBTYPE) + } custom_type_dict = {"custom": Binary(b"hello", USER_DEFINED_SUBTYPE)} self.round_trip(bin_type_dict) @@ -385,43 +408,47 @@ def test_binary(self): self.round_trip(custom_type_dict) # Binary with subtype 0 is decoded into bytes in Python 3. - bin = json_util.loads( - '{"bin": {"$binary": "AAECAwQ=", "$type": "00"}}')['bin'] + bin = json_util.loads('{"bin": {"$binary": "AAECAwQ=", "$type": "00"}}')["bin"] self.assertEqual(type(bin), bytes) # PYTHON-443 ensure old type formats are supported - json_bin_dump = json_util.dumps(bin_type_dict, - json_options=LEGACY_JSON_OPTIONS) + json_bin_dump = json_util.dumps(bin_type_dict, json_options=LEGACY_JSON_OPTIONS) self.assertIn('"$type": "00"', json_bin_dump) - self.assertEqual(bin_type_dict, - json_util.loads('{"bin": {"$type": 0, "$binary": "AAECAwQ="}}')) - json_bin_dump = json_util.dumps(md5_type_dict, - json_options=LEGACY_JSON_OPTIONS) + self.assertEqual( + bin_type_dict, json_util.loads('{"bin": {"$type": 0, "$binary": "AAECAwQ="}}') + ) + json_bin_dump = json_util.dumps(md5_type_dict, json_options=LEGACY_JSON_OPTIONS) # Check order. self.assertEqual( - '{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==",' - + ' "$type": "05"}}', - json_bin_dump) + '{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==",' + ' "$type": "05"}}', json_bin_dump + ) - self.assertEqual(md5_type_dict, - json_util.loads('{"md5": {"$type": 5, "$binary":' - ' "IG43GK8JL9HRL4DK53HMrA=="}}')) + self.assertEqual( + md5_type_dict, + json_util.loads('{"md5": {"$type": 5, "$binary":' ' "IG43GK8JL9HRL4DK53HMrA=="}}'), + ) - json_bin_dump = json_util.dumps(custom_type_dict, - json_options=LEGACY_JSON_OPTIONS) + json_bin_dump = json_util.dumps(custom_type_dict, json_options=LEGACY_JSON_OPTIONS) self.assertIn('"$type": "80"', json_bin_dump) - self.assertEqual(custom_type_dict, - json_util.loads('{"custom": {"$type": 128, "$binary":' - ' "aGVsbG8="}}')) + self.assertEqual( + custom_type_dict, + json_util.loads('{"custom": {"$type": 128, "$binary":' ' "aGVsbG8="}}'), + ) # Handle mongoexport where subtype >= 128 - self.assertEqual(128, - json_util.loads('{"custom": {"$type": "ffffff80", "$binary":' - ' "aGVsbG8="}}')['custom'].subtype) + self.assertEqual( + 128, + json_util.loads('{"custom": {"$type": "ffffff80", "$binary":' ' "aGVsbG8="}}')[ + "custom" + ].subtype, + ) - self.assertEqual(255, - json_util.loads('{"custom": {"$type": "ffffffff", "$binary":' - ' "aGVsbG8="}}')['custom'].subtype) + self.assertEqual( + 255, + json_util.loads('{"custom": {"$type": "ffffffff", "$binary":' ' "aGVsbG8="}}')[ + "custom" + ].subtype, + ) def test_code(self): self.round_trip({"code": Code("function x() { return 1; }")}) @@ -433,34 +460,30 @@ def test_code(self): # Check order. self.assertEqual('{"$code": "return z", "$scope": {"z": 2}}', res) - no_scope = Code('function() {}') - self.assertEqual( - '{"$code": "function() {}"}', json_util.dumps(no_scope)) + no_scope = Code("function() {}") + self.assertEqual('{"$code": "function() {}"}', json_util.dumps(no_scope)) def test_undefined(self): jsn = '{"name": {"$undefined": true}}' - self.assertIsNone(json_util.loads(jsn)['name']) + self.assertIsNone(json_util.loads(jsn)["name"]) def test_numberlong(self): jsn = '{"weight": {"$numberLong": "65535"}}' - self.assertEqual(json_util.loads(jsn)['weight'], - Int64(65535)) - self.assertEqual(json_util.dumps({"weight": Int64(65535)}), - '{"weight": 65535}') - json_options = JSONOptions(strict_number_long=True, - json_mode=JSONMode.LEGACY) - self.assertEqual(json_util.dumps({"weight": Int64(65535)}, - json_options=json_options), - jsn) + self.assertEqual(json_util.loads(jsn)["weight"], Int64(65535)) + self.assertEqual(json_util.dumps({"weight": Int64(65535)}), '{"weight": 65535}') + json_options = JSONOptions(strict_number_long=True, json_mode=JSONMode.LEGACY) + self.assertEqual(json_util.dumps({"weight": Int64(65535)}, json_options=json_options), jsn) def test_loads_document_class(self): # document_class dict should always work - self.assertEqual({"foo": "bar"}, json_util.loads( - '{"foo": "bar"}', - json_options=JSONOptions(document_class=dict))) - self.assertEqual(SON([("foo", "bar"), ("b", 1)]), json_util.loads( - '{"foo": "bar", "b": 1}', - json_options=JSONOptions(document_class=SON))) + self.assertEqual( + {"foo": "bar"}, + json_util.loads('{"foo": "bar"}', json_options=JSONOptions(document_class=dict)), + ) + self.assertEqual( + SON([("foo", "bar"), ("b", 1)]), + json_util.loads('{"foo": "bar", "b": 1}', json_options=JSONOptions(document_class=SON)), + ) class TestJsonUtilRoundtrip(IntegrationTest): @@ -469,12 +492,11 @@ def test_cursor(self): db.drop_collection("test") docs: List[MutableMapping[str, Any]] = [ - {'foo': [1, 2]}, - {'bar': {'hello': 'world'}}, - {'code': Code("function x() { return 1; }")}, - {'bin': Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)}, - {'dbref': {'_ref': DBRef('simple', - ObjectId('509b8db456c02c5ab7e63c34'))}} + {"foo": [1, 2]}, + {"bar": {"hello": "world"}}, + {"code": Code("function x() { return 1; }")}, + {"bin": Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)}, + {"dbref": {"_ref": DBRef("simple", ObjectId("509b8db456c02c5ab7e63c34"))}}, ] db.test.insert_many(docs) @@ -482,5 +504,6 @@ def test_cursor(self): for doc in docs: self.assertTrue(doc in reloaded_docs) + if __name__ == "__main__": unittest.main() diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index 247072c7bd..547cf327d3 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -21,16 +21,12 @@ sys.path[0:0] = [""] -from test import unittest, IntegrationTest, client_context -from test.utils import (ExceptionCatchingThread, - get_pool, - rs_client, - wait_until) +from test import IntegrationTest, client_context, unittest from test.unified_format import generate_test_classes +from test.utils import ExceptionCatchingThread, get_pool, rs_client, wait_until # Location of JSON test specifications. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'load_balancer') +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "load_balancer") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) @@ -45,7 +41,7 @@ def test_connections_are_only_returned_once(self): nconns = len(pool.sockets) self.db.test.find_one({}) self.assertEqual(len(pool.sockets), nconns) - list(self.db.test.aggregate([{'$limit': 1}])) + list(self.db.test.aggregate([{"$limit": 1}])) self.assertEqual(len(pool.sockets), nconns) @client_context.require_load_balancer @@ -68,6 +64,7 @@ def create_resource(coll): cursor = coll.find({}, batch_size=3) next(cursor) return cursor + self._test_no_gc_deadlock(create_resource) @client_context.require_failCommand_fail_point @@ -76,6 +73,7 @@ def create_resource(coll): cursor = coll.aggregate([], batchSize=3) next(cursor) return cursor + self._test_no_gc_deadlock(create_resource) def _test_no_gc_deadlock(self, create_resource): @@ -87,15 +85,11 @@ def _test_no_gc_deadlock(self, create_resource): self.assertEqual(pool.active_sockets, 0) # Cause the initial find attempt to fail to induce a reference cycle. args = { - "mode": { - "times": 1 - }, + "mode": {"times": 1}, "data": { - "failCommands": [ - "find", "aggregate" - ], - "closeConnection": True, - } + "failCommands": ["find", "aggregate"], + "closeConnection": True, + }, } with self.fail_point(args): resource = create_resource(coll) @@ -104,7 +98,7 @@ def _test_no_gc_deadlock(self, create_resource): thread = PoolLocker(pool) thread.start() - self.assertTrue(thread.locked.wait(5), 'timed out') + self.assertTrue(thread.locked.wait(5), "timed out") # Garbage collect the resource while the pool is locked to ensure we # don't deadlock. del resource @@ -116,7 +110,7 @@ def _test_no_gc_deadlock(self, create_resource): self.assertFalse(thread.is_alive()) self.assertIsNone(thread.exc) - wait_until(lambda: pool.active_sockets == 0, 'return socket') + wait_until(lambda: pool.active_sockets == 0, "return socket") # Run another operation to ensure the socket still works. coll.delete_many({}) @@ -133,7 +127,7 @@ def test_session_gc(self): thread = PoolLocker(pool) thread.start() - self.assertTrue(thread.locked.wait(5), 'timed out') + self.assertTrue(thread.locked.wait(5), "timed out") # Garbage collect the session while the pool is locked to ensure we # don't deadlock. del session @@ -145,7 +139,7 @@ def test_session_gc(self): self.assertFalse(thread.is_alive()) self.assertIsNone(thread.exc) - wait_until(lambda: pool.active_sockets == 0, 'return socket') + wait_until(lambda: pool.active_sockets == 0, "return socket") # Run another operation to ensure the socket still works. client[self.db.name].test.delete_many({}) @@ -164,8 +158,7 @@ def lock_pool(self): # Wait for the unlock flag. unlock_pool = self.unlock.wait(10) if not unlock_pool: - raise Exception('timed out waiting for unlock signal:' - ' deadlock?') + raise Exception("timed out waiting for unlock signal:" " deadlock?") if __name__ == "__main__": diff --git a/test/test_max_staleness.py b/test/test_max_staleness.py index 5c484fe334..4c17701133 100644 --- a/test/test_max_staleness.py +++ b/test/test_max_staleness.py @@ -21,18 +21,16 @@ sys.path[0:0] = [""] -from pymongo import MongoClient -from pymongo.errors import ConfigurationError -from pymongo.server_selectors import writable_server_selector - from test import client_context, unittest from test.utils import rs_or_single_client from test.utils_selection_tests import create_selection_tests +from pymongo import MongoClient +from pymongo.errors import ConfigurationError +from pymongo.server_selectors import writable_server_selector + # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'max_staleness') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "max_staleness") class TestAllScenarios(create_selection_tests(_TEST_PATH)): # type: ignore @@ -54,26 +52,21 @@ def test_max_staleness(self): with self.assertRaises(ConfigurationError): # Read pref "primary" can't be used with max staleness. - MongoClient("mongodb://a/?readPreference=primary&" - "maxStalenessSeconds=120") + MongoClient("mongodb://a/?readPreference=primary&" "maxStalenessSeconds=120") client = MongoClient("mongodb://host/?maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient("mongodb://host/?readPreference=primary&" - "maxStalenessSeconds=-1") + client = MongoClient("mongodb://host/?readPreference=primary&" "maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient("mongodb://host/?readPreference=secondary&" - "maxStalenessSeconds=120") + client = MongoClient("mongodb://host/?readPreference=secondary&" "maxStalenessSeconds=120") self.assertEqual(120, client.read_preference.max_staleness) - client = MongoClient("mongodb://a/?readPreference=secondary&" - "maxStalenessSeconds=1") + client = MongoClient("mongodb://a/?readPreference=secondary&" "maxStalenessSeconds=1") self.assertEqual(1, client.read_preference.max_staleness) - client = MongoClient("mongodb://a/?readPreference=secondary&" - "maxStalenessSeconds=-1") + client = MongoClient("mongodb://a/?readPreference=secondary&" "maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) client = MongoClient(maxStalenessSeconds=-1, readPreference="nearest") @@ -85,15 +78,15 @@ def test_max_staleness(self): def test_max_staleness_float(self): with self.assertRaises(TypeError) as ctx: - rs_or_single_client(maxStalenessSeconds=1.5, - readPreference="nearest") + rs_or_single_client(maxStalenessSeconds=1.5, readPreference="nearest") self.assertIn("must be an integer", str(ctx.exception)) with warnings.catch_warnings(record=True) as ctx: warnings.simplefilter("always") - client = MongoClient("mongodb://host/?maxStalenessSeconds=1.5" - "&readPreference=nearest") + client = MongoClient( + "mongodb://host/?maxStalenessSeconds=1.5" "&readPreference=nearest" + ) # Option was ignored. self.assertEqual(-1, client.read_preference.max_staleness) @@ -102,15 +95,13 @@ def test_max_staleness_float(self): def test_max_staleness_zero(self): # Zero is too small. with self.assertRaises(ValueError) as ctx: - rs_or_single_client(maxStalenessSeconds=0, - readPreference="nearest") + rs_or_single_client(maxStalenessSeconds=0, readPreference="nearest") self.assertIn("must be a positive integer", str(ctx.exception)) with warnings.catch_warnings(record=True) as ctx: warnings.simplefilter("always") - client = MongoClient("mongodb://host/?maxStalenessSeconds=0" - "&readPreference=nearest") + client = MongoClient("mongodb://host/?maxStalenessSeconds=0" "&readPreference=nearest") # Option was ignored. self.assertEqual(-1, client.read_preference.max_staleness) diff --git a/test/test_mongos_load_balancing.py b/test/test_mongos_load_balancing.py index c110b8b10c..e39940f56b 100644 --- a/test/test_mongos_load_balancing.py +++ b/test/test_mongos_load_balancing.py @@ -19,12 +19,13 @@ sys.path[0:0] = [""] +from test import MockClientTest, client_context, unittest +from test.pymongo_mocks import MockClient +from test.utils import connected, wait_until + from pymongo.errors import AutoReconnect, InvalidOperation from pymongo.server_selectors import writable_server_selector from pymongo.topology_description import TOPOLOGY_TYPE -from test import unittest, client_context, MockClientTest -from test.pymongo_mocks import MockClient -from test.utils import connected, wait_until @client_context.require_connection @@ -34,14 +35,13 @@ def setUpModule(): class SimpleOp(threading.Thread): - def __init__(self, client): super(SimpleOp, self).__init__() self.client = client self.passed = False def run(self): - self.client.db.command('ping') + self.client.db.command("ping") self.passed = True # No exception raised. @@ -58,26 +58,27 @@ def do_simple_op(client, nthreads): def writable_addresses(topology): - return set(server.description.address for server in - topology.select_servers(writable_server_selector)) + return set( + server.description.address for server in topology.select_servers(writable_server_selector) + ) class TestMongosLoadBalancing(MockClientTest): - def mock_client(self, **kwargs): mock_client = MockClient( standalones=[], members=[], - mongoses=['a:1', 'b:2', 'c:3'], - host='a:1,b:2,c:3', + mongoses=["a:1", "b:2", "c:3"], + host="a:1,b:2,c:3", connect=False, - **kwargs) + **kwargs + ) self.addCleanup(mock_client.close) # Latencies in seconds. - mock_client.mock_rtts['a:1'] = 0.020 - mock_client.mock_rtts['b:2'] = 0.025 - mock_client.mock_rtts['c:3'] = 0.045 + mock_client.mock_rtts["a:1"] = 0.020 + mock_client.mock_rtts["b:2"] = 0.025 + mock_client.mock_rtts["c:3"] = 0.045 return mock_client def test_lazy_connect(self): @@ -90,15 +91,15 @@ def test_lazy_connect(self): # Trigger initial connection. do_simple_op(client, nthreads) - wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') + wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") def test_failover(self): nthreads = 10 client = connected(self.mock_client(localThresholdMS=0.001)) - wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') + wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") # Our chosen mongos goes down. - client.kill_host('a:1') + client.kill_host("a:1") # Trigger failover to higher-latency nodes. AutoReconnect should be # raised at most once in each thread. @@ -106,10 +107,10 @@ def test_failover(self): def f(): try: - client.db.command('ping') + client.db.command("ping") except AutoReconnect: # Second attempt succeeds. - client.db.command('ping') + client.db.command("ping") passed.append(True) @@ -128,34 +129,34 @@ def f(): def test_local_threshold(self): client = connected(self.mock_client(localThresholdMS=30)) self.assertEqual(30, client.options.local_threshold_ms) - wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') + wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") topology = client._topology # All are within a 30-ms latency window, see self.mock_client(). - self.assertEqual(set([('a', 1), ('b', 2), ('c', 3)]), - writable_addresses(topology)) + self.assertEqual(set([("a", 1), ("b", 2), ("c", 3)]), writable_addresses(topology)) # No error - client.admin.command('ping') + client.admin.command("ping") client = connected(self.mock_client(localThresholdMS=0)) self.assertEqual(0, client.options.local_threshold_ms) # No error - client.db.command('ping') + client.db.command("ping") # Our chosen mongos goes down. - client.kill_host('%s:%s' % next(iter(client.nodes))) + client.kill_host("%s:%s" % next(iter(client.nodes))) try: - client.db.command('ping') + client.db.command("ping") except: pass # We eventually connect to a new mongos. def connect_to_new_mongos(): try: - return client.db.command('ping') + return client.db.command("ping") except AutoReconnect: pass - wait_until(connect_to_new_mongos, 'connect to a new mongos') + + wait_until(connect_to_new_mongos, "connect to a new mongos") def test_load_balancing(self): # Although the server selection JSON tests already prove that @@ -163,25 +164,25 @@ def test_load_balancing(self): # test of discovering servers' round trip times and configuring # localThresholdMS. client = connected(self.mock_client()) - wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') + wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") # Prohibited for topology type Sharded. with self.assertRaises(InvalidOperation): client.address topology = client._topology - self.assertEqual(TOPOLOGY_TYPE.Sharded, - topology.description.topology_type) + self.assertEqual(TOPOLOGY_TYPE.Sharded, topology.description.topology_type) # a and b are within the 15-ms latency window, see self.mock_client(). - self.assertEqual(set([('a', 1), ('b', 2)]), - writable_addresses(topology)) + self.assertEqual(set([("a", 1), ("b", 2)]), writable_addresses(topology)) - client.mock_rtts['a:1'] = 0.045 + client.mock_rtts["a:1"] = 0.045 # Discover only b is within latency window. - wait_until(lambda: set([('b', 2)]) == writable_addresses(topology), - 'discover server "a" is too far') + wait_until( + lambda: set([("b", 2)]) == writable_addresses(topology), + 'discover server "a" is too far', + ) if __name__ == "__main__": diff --git a/test/test_monitor.py b/test/test_monitor.py index ed0d4543f8..85cfb0bc40 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -20,13 +20,15 @@ sys.path[0:0] = [""] -from pymongo.periodic_executor import _EXECUTORS +from test import IntegrationTest, unittest +from test.utils import ( + ServerAndTopologyEventListener, + connected, + single_client, + wait_until, +) -from test import unittest, IntegrationTest -from test.utils import (connected, - ServerAndTopologyEventListener, - single_client, - wait_until) +from pymongo.periodic_executor import _EXECUTORS def unregistered(ref): @@ -58,16 +60,13 @@ def test_cleanup_executors_on_client_del(self): self.assertEqual(len(executors), 4) # Each executor stores a weakref to itself in _EXECUTORS. - executor_refs = [ - (r, r()._name) for r in _EXECUTORS.copy() if r() in executors] # type: ignore + executor_refs = [(r, r()._name) for r in _EXECUTORS.copy() if r() in executors] del executors del client for ref, name in executor_refs: - wait_until(partial(unregistered, ref), - 'unregister executor: %s' % (name,), - timeout=5) + wait_until(partial(unregistered, ref), "unregister executor: %s" % (name,), timeout=5) def test_cleanup_executors_on_client_close(self): client = create_client() @@ -77,9 +76,9 @@ def test_cleanup_executors_on_client_close(self): client.close() for executor in executors: - wait_until(lambda: executor._stopped, - 'closed executor: %s' % (executor._name,), - timeout=5) + wait_until( + lambda: executor._stopped, "closed executor: %s" % (executor._name,), timeout=5 + ) if __name__ == "__main__": diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 4e513c5c69..1adb2983e4 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -16,31 +16,28 @@ import datetime import sys import time -from typing import Any import warnings +from typing import Any sys.path[0:0] = [""] +from test import IntegrationTest, client_context, client_knobs, sanitize_cmd, unittest +from test.utils import ( + EventListener, + get_pool, + rs_or_single_client, + single_client, + wait_until, +) + from bson.int64 import Int64 from bson.objectid import ObjectId from bson.son import SON -from pymongo import CursorType, monitoring, InsertOne, UpdateOne, DeleteOne +from pymongo import CursorType, DeleteOne, InsertOne, UpdateOne, monitoring from pymongo.command_cursor import CommandCursor -from pymongo.errors import (AutoReconnect, - NotPrimaryError, - OperationFailure) +from pymongo.errors import AutoReconnect, NotPrimaryError, OperationFailure from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern -from test import (client_context, - client_knobs, - IntegrationTest, - sanitize_cmd, - unittest) -from test.utils import (EventListener, - get_pool, - rs_or_single_client, - single_client, - wait_until) class TestCommandMonitoring(IntegrationTest): @@ -51,9 +48,7 @@ class TestCommandMonitoring(IntegrationTest): def setUpClass(cls): super(TestCommandMonitoring, cls).setUpClass() cls.listener = EventListener() - cls.client = rs_or_single_client( - event_listeners=[cls.listener], - retryWrites=False) + cls.client = rs_or_single_client(event_listeners=[cls.listener], retryWrites=False) @classmethod def tearDownClass(cls): @@ -65,107 +60,93 @@ def tearDown(self): super(TestCommandMonitoring, self).tearDown() def test_started_simple(self): - self.client.pymongo_test.command('ping') + self.client.pymongo_test.command("ping") results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertEqualCommand(SON([('ping', 1)]), started.command) - self.assertEqual('ping', started.command_name) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand(SON([("ping", 1)]), started.command) + self.assertEqual("ping", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) def test_succeeded_simple(self): - self.client.pymongo_test.command('ping') + self.client.pymongo_test.command("ping") results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertEqual('ping', succeeded.command_name) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertEqual("ping", succeeded.command_name) self.assertEqual(self.client.address, succeeded.connection_id) - self.assertEqual(1, succeeded.reply.get('ok')) + self.assertEqual(1, succeeded.reply.get("ok")) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertTrue(isinstance(succeeded.duration_micros, int)) def test_failed_simple(self): try: - self.client.pymongo_test.command('oops!') + self.client.pymongo_test.command("oops!") except OperationFailure: pass results = self.listener.results - started = results['started'][0] - failed = results['failed'][0] - self.assertEqual(0, len(results['succeeded'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue( - isinstance(failed, monitoring.CommandFailedEvent)) - self.assertEqual('oops!', failed.command_name) + started = results["started"][0] + failed = results["failed"][0] + self.assertEqual(0, len(results["succeeded"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) + self.assertEqual("oops!", failed.command_name) self.assertEqual(self.client.address, failed.connection_id) - self.assertEqual(0, failed.failure.get('ok')) + self.assertEqual(0, failed.failure.get("ok")) self.assertTrue(isinstance(failed.request_id, int)) self.assertTrue(isinstance(failed.duration_micros, int)) def test_find_one(self): self.client.pymongo_test.test.find_one() results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('find', 'test'), - ('filter', {}), - ('limit', 1), - ('singleBatch', True)]), - started.command) - self.assertEqual('find', started.command_name) + SON([("find", "test"), ("filter", {}), ("limit", 1), ("singleBatch", True)]), + started.command, + ) + self.assertEqual("find", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) def test_find_and_get_more(self): self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_many([{} for _ in range(10)]) self.listener.results.clear() - cursor = self.client.pymongo_test.test.find( - projection={'_id': False}, - batch_size=4) + cursor = self.client.pymongo_test.test.find(projection={"_id": False}, batch_size=4) for _ in range(4): next(cursor) cursor_id = cursor.cursor_id results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('find', 'test'), - ('filter', {}), - ('projection', {'_id': False}), - ('batchSize', 4)]), - started.command) - self.assertEqual('find', started.command_name) + SON( + [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 4)] + ), + started.command, + ) + self.assertEqual("find", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('find', succeeded.command_name) + self.assertEqual("find", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(cursor.address, succeeded.connection_id) csr = succeeded.reply["cursor"] @@ -179,24 +160,21 @@ def test_find_and_get_more(self): next(cursor) try: results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('getMore', cursor_id), - ('collection', 'test'), - ('batchSize', 4)]), - started.command) - self.assertEqual('getMore', started.command_name) + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), + started.command, + ) + self.assertEqual("getMore", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('getMore', succeeded.command_name) + self.assertEqual("getMore", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(cursor.address, succeeded.connection_id) csr = succeeded.reply["cursor"] @@ -208,32 +186,28 @@ def test_find_and_get_more(self): tuple(cursor) def test_find_with_explain(self): - cmd = SON([('explain', SON([('find', 'test'), - ('filter', {})]))]) + cmd = SON([("explain", SON([("find", "test"), ("filter", {})]))]) self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_one({}) self.listener.results.clear() coll = self.client.pymongo_test.test # Test that we publish the unwrapped command. if self.client.is_mongos: - coll = coll.with_options( - read_preference=ReadPreference.PRIMARY_PREFERRED) + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) res = coll.find().explain() results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand(cmd, started.command) - self.assertEqual('explain', started.command_name) + self.assertEqual("explain", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('explain', succeeded.command_name) + self.assertEqual("explain", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(self.client.address, succeeded.connection_id) self.assertEqual(res, succeeded.reply) @@ -241,34 +215,31 @@ def test_find_with_explain(self): def _test_find_options(self, query, expected_cmd): coll = self.client.pymongo_test.test coll.drop() - coll.create_index('x') - coll.insert_many([{'x': i} for i in range(5)]) + coll.create_index("x") + coll.insert_many([{"x": i} for i in range(5)]) # Test that we publish the unwrapped command. self.listener.results.clear() if self.client.is_mongos: - coll = coll.with_options( - read_preference=ReadPreference.PRIMARY_PREFERRED) + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) cursor = coll.find(**query) next(cursor) try: results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand(expected_cmd, started.command) - self.assertEqual('find', started.command_name) + self.assertEqual("find", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('find', succeeded.command_name) + self.assertEqual("find", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(self.client.address, succeeded.connection_id) finally: @@ -276,125 +247,128 @@ def _test_find_options(self, query, expected_cmd): tuple(cursor) def test_find_options(self): - query = dict(filter={}, - hint=[('x', 1)], - max_time_ms=10000, - max={'x': 10}, - min={'x': -10}, - return_key=True, - show_record_id=True, - projection={'x': False}, - skip=1, - no_cursor_timeout=True, - sort=[('_id', 1)], - allow_partial_results=True, - comment='this is a test', - batch_size=2) - - cmd = dict(find='test', - filter={}, - hint=SON([('x', 1)]), - comment='this is a test', - maxTimeMS=10000, - max={'x': 10}, - min={'x': -10}, - returnKey=True, - showRecordId=True, - sort=SON([('_id', 1)]), - projection={'x': False}, - skip=1, - batchSize=2, - noCursorTimeout=True, - allowPartialResults=True) + query = dict( + filter={}, + hint=[("x", 1)], + max_time_ms=10000, + max={"x": 10}, + min={"x": -10}, + return_key=True, + show_record_id=True, + projection={"x": False}, + skip=1, + no_cursor_timeout=True, + sort=[("_id", 1)], + allow_partial_results=True, + comment="this is a test", + batch_size=2, + ) + + cmd = dict( + find="test", + filter={}, + hint=SON([("x", 1)]), + comment="this is a test", + maxTimeMS=10000, + max={"x": 10}, + min={"x": -10}, + returnKey=True, + showRecordId=True, + sort=SON([("_id", 1)]), + projection={"x": False}, + skip=1, + batchSize=2, + noCursorTimeout=True, + allowPartialResults=True, + ) if client_context.version < (4, 1, 0, -1): - query['max_scan'] = 10 - cmd['maxScan'] = 10 + query["max_scan"] = 10 + cmd["maxScan"] = 10 self._test_find_options(query, cmd) @client_context.require_version_max(3, 7, 2) def test_find_snapshot(self): # Test "snapshot" parameter separately, can't combine with "sort". - query = dict(filter={}, - snapshot=True) + query = dict(filter={}, snapshot=True) - cmd = dict(find='test', - filter={}, - snapshot=True) + cmd = dict(find="test", filter={}, snapshot=True) self._test_find_options(query, cmd) def test_command_and_get_more(self): self.client.pymongo_test.test.drop() - self.client.pymongo_test.test.insert_many( - [{'x': 1} for _ in range(10)]) + self.client.pymongo_test.test.insert_many([{"x": 1} for _ in range(10)]) self.listener.results.clear() coll = self.client.pymongo_test.test # Test that we publish the unwrapped command. if self.client.is_mongos: - coll = coll.with_options( - read_preference=ReadPreference.PRIMARY_PREFERRED) - cursor = coll.aggregate( - [{'$project': {'_id': False, 'x': 1}}], batchSize=4) + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) + cursor = coll.aggregate([{"$project": {"_id": False, "x": 1}}], batchSize=4) for _ in range(4): next(cursor) cursor_id = cursor.cursor_id results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('aggregate', 'test'), - ('pipeline', [{'$project': {'_id': False, 'x': 1}}]), - ('cursor', {'batchSize': 4})]), - started.command) - self.assertEqual('aggregate', started.command_name) + SON( + [ + ("aggregate", "test"), + ("pipeline", [{"$project": {"_id": False, "x": 1}}]), + ("cursor", {"batchSize": 4}), + ] + ), + started.command, + ) + self.assertEqual("aggregate", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('aggregate', succeeded.command_name) + self.assertEqual("aggregate", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(cursor.address, succeeded.connection_id) - expected_cursor = {'id': cursor_id, - 'ns': 'pymongo_test.test', - 'firstBatch': [{'x': 1} for _ in range(4)]} - self.assertEqualCommand(expected_cursor, succeeded.reply.get('cursor')) + expected_cursor = { + "id": cursor_id, + "ns": "pymongo_test.test", + "firstBatch": [{"x": 1} for _ in range(4)], + } + self.assertEqualCommand(expected_cursor, succeeded.reply.get("cursor")) self.listener.results.clear() next(cursor) try: results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('getMore', cursor_id), - ('collection', 'test'), - ('batchSize', 4)]), - started.command) - self.assertEqual('getMore', started.command_name) + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), + started.command, + ) + self.assertEqual("getMore", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('getMore', succeeded.command_name) + self.assertEqual("getMore", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(cursor.address, succeeded.connection_id) expected_result = { - 'cursor': {'id': cursor_id, - 'ns': 'pymongo_test.test', - 'nextBatch': [{'x': 1} for _ in range(4)]}, - 'ok': 1.0} + "cursor": { + "id": cursor_id, + "ns": "pymongo_test.test", + "nextBatch": [{"x": 1} for _ in range(4)], + }, + "ok": 1.0, + } self.assertEqualReply(expected_result, succeeded.reply) finally: # Exhaust the cursor to avoid kill cursors. @@ -411,23 +385,20 @@ def test_get_more_failure(self): except Exception: pass results = self.listener.results - started = results['started'][0] - self.assertEqual(0, len(results['succeeded'])) - failed = results['failed'][0] - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + self.assertEqual(0, len(results["succeeded"])) + failed = results["failed"][0] + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('getMore', cursor_id), - ('collection', 'test')]), - started.command) - self.assertEqual('getMore', started.command_name) + SON([("getMore", cursor_id), ("collection", "test")]), started.command + ) + self.assertEqual("getMore", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(failed, monitoring.CommandFailedEvent)) + self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) self.assertTrue(isinstance(failed.duration_micros, int)) - self.assertEqual('getMore', failed.command_name) + self.assertEqual("getMore", failed.command_name) self.assertTrue(isinstance(failed.request_id, int)) self.assertEqual(cursor.address, failed.connection_id) self.assertEqual(0, failed.failure.get("ok")) @@ -438,7 +409,7 @@ def test_not_primary_error(self): address = next(iter(client_context.client.secondaries)) client = single_client(*address, event_listeners=[self.listener]) # Clear authentication command results from the listener. - client.admin.command('ping') + client.admin.command("ping") self.listener.results.clear() error = None try: @@ -446,16 +417,14 @@ def test_not_primary_error(self): except NotPrimaryError as exc: error = exc.errors results = self.listener.results - started = results['started'][0] - failed = results['failed'][0] - self.assertEqual(0, len(results['succeeded'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue( - isinstance(failed, monitoring.CommandFailedEvent)) - self.assertEqual('findAndModify', failed.command_name) + started = results["started"][0] + failed = results["failed"][0] + self.assertEqual(0, len(results["succeeded"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) + self.assertEqual("findAndModify", failed.command_name) self.assertEqual(address, failed.connection_id) - self.assertEqual(0, failed.failure.get('ok')) + self.assertEqual(0, failed.failure.get("ok")) self.assertTrue(isinstance(failed.request_id, int)) self.assertTrue(isinstance(failed.duration_micros, int)) self.assertEqual(error, failed.failure) @@ -466,60 +435,62 @@ def test_exhaust(self): self.client.pymongo_test.test.insert_many([{} for _ in range(11)]) self.listener.results.clear() cursor = self.client.pymongo_test.test.find( - projection={'_id': False}, - batch_size=5, - cursor_type=CursorType.EXHAUST) + projection={"_id": False}, batch_size=5, cursor_type=CursorType.EXHAUST + ) next(cursor) cursor_id = cursor.cursor_id results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertEqualCommand(SON([('find', 'test'), - ('filter', {}), - ('projection', {'_id': False}), - ('batchSize', 5)]), started.command) - self.assertEqual('find', started.command_name) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand( + SON( + [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 5)] + ), + started.command, + ) + self.assertEqual("find", started.command_name) self.assertEqual(cursor.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('find', succeeded.command_name) + self.assertEqual("find", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(cursor.address, succeeded.connection_id) expected_result = { - 'cursor': {'id': cursor_id, - 'ns': 'pymongo_test.test', - 'firstBatch': [{} for _ in range(5)]}, - 'ok': 1} + "cursor": { + "id": cursor_id, + "ns": "pymongo_test.test", + "firstBatch": [{} for _ in range(5)], + }, + "ok": 1, + } self.assertEqualReply(expected_result, succeeded.reply) self.listener.results.clear() tuple(cursor) results = self.listener.results - self.assertEqual(0, len(results['failed'])) - for event in results['started']: + self.assertEqual(0, len(results["failed"])) + for event in results["started"]: self.assertTrue(isinstance(event, monitoring.CommandStartedEvent)) - self.assertEqualCommand(SON([('getMore', cursor_id), - ('collection', 'test'), - ('batchSize', 5)]), event.command) - self.assertEqual('getMore', event.command_name) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 5)]), + event.command, + ) + self.assertEqual("getMore", event.command_name) self.assertEqual(cursor.address, event.connection_id) - self.assertEqual('pymongo_test', event.database_name) + self.assertEqual("pymongo_test", event.database_name) self.assertTrue(isinstance(event.request_id, int)) - for event in results['succeeded']: - self.assertTrue( - isinstance(event, monitoring.CommandSucceededEvent)) + for event in results["succeeded"]: + self.assertTrue(isinstance(event, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(event.duration_micros, int)) - self.assertEqual('getMore', event.command_name) + self.assertEqual("getMore", event.command_name) self.assertTrue(isinstance(event.request_id, int)) self.assertEqual(cursor.address, event.connection_id) # Last getMore receives a response with cursor id 0. - self.assertEqual(0, results['succeeded'][-1].reply['cursor']['id']) + self.assertEqual(0, results["succeeded"][-1].reply["cursor"]["id"]) def test_kill_cursors(self): with client_knobs(kill_cursor_frequency=0.01): @@ -532,30 +503,30 @@ def test_kill_cursors(self): cursor.close() time.sleep(2) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) # There could be more than one cursor_id here depending on # when the thread last ran. - self.assertIn(cursor_id, started.command['cursors']) - self.assertEqual('killCursors', started.command_name) + self.assertIn(cursor_id, started.command["cursors"]) + self.assertEqual("killCursors", started.command_name) self.assertIs(type(started.connection_id), tuple) self.assertEqual(cursor.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('killCursors', succeeded.command_name) + self.assertEqual("killCursors", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertIs(type(succeeded.connection_id), tuple) self.assertEqual(cursor.address, succeeded.connection_id) # There could be more than one cursor_id here depending on # when the thread last ran. - self.assertTrue(cursor_id in succeeded.reply['cursorsUnknown'] - or cursor_id in succeeded.reply['cursorsKilled']) + self.assertTrue( + cursor_id in succeeded.reply["cursorsUnknown"] + or cursor_id in succeeded.reply["cursorsKilled"] + ) def test_non_bulk_writes(self): coll = self.client.pymongo_test.test @@ -563,18 +534,22 @@ def test_non_bulk_writes(self): self.listener.results.clear() # Implied write concern insert_one - res = coll.insert_one({'x': 1}) + res = coll.insert_one({"x": 1}) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': res.inserted_id, 'x': 1}])]) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -583,25 +558,29 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) # Unacknowledged insert_one self.listener.results.clear() coll = coll.with_options(write_concern=WriteConcern(w=0)) - res = coll.insert_one({'x': 1}) + res = coll.insert_one({"x": 1}) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': res.inserted_id, 'x': 1}]), - ('writeConcern', {'w': 0})]) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ("writeConcern", {"w": 0}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -609,24 +588,28 @@ def test_non_bulk_writes(self): self.assertEqual(started.command_name, succeeded.command_name) self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) - self.assertEqualReply(succeeded.reply, {'ok': 1}) + self.assertEqualReply(succeeded.reply, {"ok": 1}) # Explicit write concern insert_one self.listener.results.clear() coll = coll.with_options(write_concern=WriteConcern(w=1)) - res = coll.insert_one({'x': 1}) + res = coll.insert_one({"x": 1}) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': res.inserted_id, 'x': 1}]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -635,25 +618,28 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) # delete_many self.listener.results.clear() - res = coll.delete_many({'x': 1}) + res = coll.delete_many({"x": 1}) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('delete', coll.name), - ('ordered', True), - ('deletes', [SON([('q', {'x': 1}), - ('limit', 0)])]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"x": 1}), ("limit", 0)])]), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('delete', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("delete", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -662,28 +648,41 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(res.deleted_count, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(res.deleted_count, reply.get("n")) # replace_one self.listener.results.clear() oid = ObjectId() - res = coll.replace_one({'_id': oid}, {'_id': oid, 'x': 1}, upsert=True) + res = coll.replace_one({"_id": oid}, {"_id": oid, "x": 1}, upsert=True) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'_id': oid}), - ('u', {'_id': oid, 'x': 1}), - ('multi', False), - ('upsert', True)])]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"_id": oid}), + ("u", {"_id": oid, "x": 1}), + ("multi", False), + ("upsert", True), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('update', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -692,28 +691,41 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) - self.assertEqual([{'index': 0, '_id': oid}], reply.get('upserted')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + self.assertEqual([{"index": 0, "_id": oid}], reply.get("upserted")) # update_one self.listener.results.clear() - res = coll.update_one({'x': 1}, {'$inc': {'x': 1}}) + res = coll.update_one({"x": 1}, {"$inc": {"x": 1}}) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'x': 1}), - ('u', {'$inc': {'x': 1}}), - ('multi', False), - ('upsert', False)])]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"x": 1}), + ("u", {"$inc": {"x": 1}}), + ("multi", False), + ("upsert", False), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('update', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -722,27 +734,40 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) # update_many self.listener.results.clear() - res = coll.update_many({'x': 2}, {'$inc': {'x': 1}}) + res = coll.update_many({"x": 2}, {"$inc": {"x": 1}}) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'x': 2}), - ('u', {'$inc': {'x': 1}}), - ('multi', True), - ('upsert', False)])]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"x": 2}), + ("u", {"$inc": {"x": 1}}), + ("multi", True), + ("upsert", False), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('update', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -751,25 +776,28 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) # delete_one self.listener.results.clear() - res2 = coll.delete_one({'x': 3}) + res2 = coll.delete_one({"x": 3}) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('delete', coll.name), - ('ordered', True), - ('deletes', [SON([('q', {'x': 3}), - ('limit', 1)])]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"x": 3}), ("limit", 1)])]), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('delete', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("delete", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -778,30 +806,34 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) self.assertEqual(0, coll.count_documents({})) # write errors - coll.insert_one({'_id': 1}) + coll.insert_one({"_id": 1}) try: self.listener.results.clear() - coll.insert_one({'_id': 1}) + coll.insert_one({"_id": 1}) except OperationFailure: pass results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': 1}]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": 1}]), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -810,14 +842,14 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(0, reply.get('n')) - errors = reply.get('writeErrors') + self.assertEqual(1, reply.get("ok")) + self.assertEqual(0, reply.get("n")) + errors = reply.get("writeErrors") self.assertIsInstance(errors, list) error = errors[0] - self.assertEqual(0, error.get('index')) - self.assertIsInstance(error.get('code'), int) - self.assertIsInstance(error.get('errmsg'), str) + self.assertEqual(0, error.get("index")) + self.assertIsInstance(error.get("code"), int) + self.assertIsInstance(error.get("errmsg"), str) def test_insert_many(self): # This always uses the bulk API. @@ -825,13 +857,13 @@ def test_insert_many(self): coll.drop() self.listener.results.clear() - big = 'x' * (1024 * 1024 * 4) - docs = [{'_id': i, 'big': big} for i in range(6)] + big = "x" * (1024 * 1024 * 4) + docs = [{"_id": i, "big": big} for i in range(6)] coll.insert_many(docs) results = self.listener.results - started = results['started'] - succeeded = results['succeeded'] - self.assertEqual(0, len(results['failed'])) + started = results["started"] + succeeded = results["succeeded"] + self.assertEqual(0, len(results["failed"])) documents = [] count = 0 operation_id = started[0].operation_id @@ -839,13 +871,12 @@ def test_insert_many(self): for start, succeed in zip(started, succeeded): self.assertIsInstance(start, monitoring.CommandStartedEvent) cmd = sanitize_cmd(start.command) - self.assertEqual(['insert', 'ordered', 'documents'], - list(cmd.keys())) - self.assertEqual(coll.name, cmd['insert']) - self.assertIs(True, cmd['ordered']) - documents.extend(cmd['documents']) - self.assertEqual('pymongo_test', start.database_name) - self.assertEqual('insert', start.command_name) + self.assertEqual(["insert", "ordered", "documents"], list(cmd.keys())) + self.assertEqual(coll.name, cmd["insert"]) + self.assertIs(True, cmd["ordered"]) + documents.extend(cmd["documents"]) + self.assertEqual("pymongo_test", start.database_name) + self.assertEqual("insert", start.command_name) self.assertIsInstance(start.request_id, int) self.assertEqual(self.client.address, start.connection_id) self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) @@ -856,8 +887,8 @@ def test_insert_many(self): self.assertEqual(start.operation_id, operation_id) self.assertEqual(succeed.operation_id, operation_id) reply = succeed.reply - self.assertEqual(1, reply.get('ok')) - count += reply.get('n', 0) + self.assertEqual(1, reply.get("ok")) + count += reply.get("n", 0) self.assertEqual(documents, docs) self.assertEqual(6, count) @@ -868,27 +899,26 @@ def test_insert_many_unacknowledged(self): self.listener.results.clear() # Force two batches on legacy servers. - big = 'x' * (1024 * 1024 * 12) - docs = [{'_id': i, 'big': big} for i in range(6)] + big = "x" * (1024 * 1024 * 12) + docs = [{"_id": i, "big": big} for i in range(6)] unack_coll.insert_many(docs) results = self.listener.results - started = results['started'] - succeeded = results['succeeded'] - self.assertEqual(0, len(results['failed'])) + started = results["started"] + succeeded = results["succeeded"] + self.assertEqual(0, len(results["failed"])) documents = [] operation_id = started[0].operation_id self.assertIsInstance(operation_id, int) for start, succeed in zip(started, succeeded): self.assertIsInstance(start, monitoring.CommandStartedEvent) cmd = sanitize_cmd(start.command) - cmd.pop('writeConcern', None) - self.assertEqual(['insert', 'ordered', 'documents'], - list(cmd.keys())) - self.assertEqual(coll.name, cmd['insert']) - self.assertIs(True, cmd['ordered']) - documents.extend(cmd['documents']) - self.assertEqual('pymongo_test', start.database_name) - self.assertEqual('insert', start.command_name) + cmd.pop("writeConcern", None) + self.assertEqual(["insert", "ordered", "documents"], list(cmd.keys())) + self.assertEqual(coll.name, cmd["insert"]) + self.assertIs(True, cmd["ordered"]) + documents.extend(cmd["documents"]) + self.assertEqual("pymongo_test", start.database_name) + self.assertEqual("insert", start.command_name) self.assertIsInstance(start.request_id, int) self.assertEqual(self.client.address, start.connection_id) self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) @@ -898,29 +928,32 @@ def test_insert_many_unacknowledged(self): self.assertEqual(start.connection_id, succeed.connection_id) self.assertEqual(start.operation_id, operation_id) self.assertEqual(succeed.operation_id, operation_id) - self.assertEqual(1, succeed.reply.get('ok')) + self.assertEqual(1, succeed.reply.get("ok")) self.assertEqual(documents, docs) - wait_until(lambda: coll.count_documents({}) == 6, - 'insert documents with w=0') + wait_until(lambda: coll.count_documents({}) == 6, "insert documents with w=0") def test_bulk_write(self): coll = self.client.pymongo_test.test coll.drop() self.listener.results.clear() - coll.bulk_write([InsertOne({'_id': 1}), - UpdateOne({'_id': 1}, {'$set': {'x': 1}}), - DeleteOne({'_id': 1})]) + coll.bulk_write( + [ + InsertOne({"_id": 1}), + UpdateOne({"_id": 1}, {"$set": {"x": 1}}), + DeleteOne({"_id": 1}), + ] + ) results = self.listener.results - started = results['started'] - succeeded = results['succeeded'] - self.assertEqual(0, len(results['failed'])) + started = results["started"] + succeeded = results["succeeded"] + self.assertEqual(0, len(results["failed"])) operation_id = started[0].operation_id pairs = list(zip(started, succeeded)) self.assertEqual(3, len(pairs)) for start, succeed in pairs: self.assertIsInstance(start, monitoring.CommandStartedEvent) - self.assertEqual('pymongo_test', start.database_name) + self.assertEqual("pymongo_test", start.database_name) self.assertIsInstance(start.request_id, int) self.assertEqual(self.client.address, start.connection_id) self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) @@ -931,21 +964,35 @@ def test_bulk_write(self): self.assertEqual(start.operation_id, operation_id) self.assertEqual(succeed.operation_id, operation_id) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': 1}])]) + expected = SON([("insert", coll.name), ("ordered", True), ("documents", [{"_id": 1}])]) self.assertEqualCommand(expected, started[0].command) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'_id': 1}), - ('u', {'$set': {'x': 1}}), - ('multi', False), - ('upsert', False)])])]) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"_id": 1}), + ("u", {"$set": {"x": 1}}), + ("multi", False), + ("upsert", False), + ] + ) + ], + ), + ] + ) self.assertEqualCommand(expected, started[1].command) - expected = SON([('delete', coll.name), - ('ordered', True), - ('deletes', [SON([('q', {'_id': 1}), - ('limit', 1)])])]) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"_id": 1}), ("limit", 1)])]), + ] + ) self.assertEqualCommand(expected, started[2].command) @client_context.require_failCommand_fail_point @@ -954,23 +1001,23 @@ def test_bulk_write_command_network_error(self): self.listener.results.clear() insert_network_error = { - 'configureFailPoint': 'failCommand', - 'mode': {'times': 1}, - 'data': { - 'failCommands': ['insert'], - 'closeConnection': True, + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "closeConnection": True, }, } with self.fail_point(insert_network_error): with self.assertRaises(AutoReconnect): - coll.bulk_write([InsertOne({'_id': 1})]) - failed = self.listener.results['failed'] + coll.bulk_write([InsertOne({"_id": 1})]) + failed = self.listener.results["failed"] self.assertEqual(1, len(failed)) event = failed[0] - self.assertEqual(event.command_name, 'insert') + self.assertEqual(event.command_name, "insert") self.assertIsInstance(event.failure, dict) - self.assertEqual(event.failure['errtype'], 'AutoReconnect') - self.assertTrue(event.failure['errmsg']) + self.assertEqual(event.failure["errtype"], "AutoReconnect") + self.assertTrue(event.failure["errmsg"]) @client_context.require_failCommand_fail_point def test_bulk_write_command_error(self): @@ -978,24 +1025,24 @@ def test_bulk_write_command_error(self): self.listener.results.clear() insert_command_error = { - 'configureFailPoint': 'failCommand', - 'mode': {'times': 1}, - 'data': { - 'failCommands': ['insert'], - 'closeConnection': False, - 'errorCode': 10107, # Not primary + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "closeConnection": False, + "errorCode": 10107, # Not primary }, } with self.fail_point(insert_command_error): with self.assertRaises(NotPrimaryError): - coll.bulk_write([InsertOne({'_id': 1})]) - failed = self.listener.results['failed'] + coll.bulk_write([InsertOne({"_id": 1})]) + failed = self.listener.results["failed"] self.assertEqual(1, len(failed)) event = failed[0] - self.assertEqual(event.command_name, 'insert') + self.assertEqual(event.command_name, "insert") self.assertIsInstance(event.failure, dict) - self.assertEqual(event.failure['code'], 10107) - self.assertTrue(event.failure['errmsg']) + self.assertEqual(event.failure["code"], 10107) + self.assertTrue(event.failure["errmsg"]) def test_write_errors(self): coll = self.client.pymongo_test.test @@ -1003,23 +1050,27 @@ def test_write_errors(self): self.listener.results.clear() try: - coll.bulk_write([InsertOne({'_id': 1}), - InsertOne({'_id': 1}), - InsertOne({'_id': 1}), - DeleteOne({'_id': 1})], - ordered=False) + coll.bulk_write( + [ + InsertOne({"_id": 1}), + InsertOne({"_id": 1}), + InsertOne({"_id": 1}), + DeleteOne({"_id": 1}), + ], + ordered=False, + ) except OperationFailure: pass results = self.listener.results - started = results['started'] - succeeded = results['succeeded'] - self.assertEqual(0, len(results['failed'])) + started = results["started"] + succeeded = results["succeeded"] + self.assertEqual(0, len(results["failed"])) operation_id = started[0].operation_id pairs = list(zip(started, succeeded)) errors = [] for start, succeed in pairs: self.assertIsInstance(start, monitoring.CommandStartedEvent) - self.assertEqual('pymongo_test', start.database_name) + self.assertEqual("pymongo_test", start.database_name) self.assertIsInstance(start.request_id, int) self.assertEqual(self.client.address, start.connection_id) self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) @@ -1029,11 +1080,11 @@ def test_write_errors(self): self.assertEqual(start.connection_id, succeed.connection_id) self.assertEqual(start.operation_id, operation_id) self.assertEqual(succeed.operation_id, operation_id) - if 'writeErrors' in succeed.reply: - errors.extend(succeed.reply['writeErrors']) + if "writeErrors" in succeed.reply: + errors.extend(succeed.reply["writeErrors"]) self.assertEqual(2, len(errors)) - fields = set(['index', 'code', 'errmsg']) + fields = set(["index", "code", "errmsg"]) for error in errors: self.assertTrue(fields.issubset(set(error))) @@ -1043,14 +1094,14 @@ def test_first_batch_helper(self): self.listener.results.clear() tuple(self.client.pymongo_test.test.list_indexes()) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('listIndexes', 'test'), ('cursor', {})]) + expected = SON([("listIndexes", "test"), ("cursor", {})]) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('listIndexes', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("listIndexes", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -1058,8 +1109,8 @@ def test_first_batch_helper(self): self.assertEqual(started.command_name, succeeded.command_name) self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) - self.assertTrue('cursor' in succeeded.reply) - self.assertTrue('ok' in succeeded.reply) + self.assertTrue("cursor" in succeeded.reply) + self.assertTrue("ok" in succeeded.reply) self.listener.results.clear() @@ -1068,20 +1119,19 @@ def test_sensitive_commands(self): self.listener.results.clear() cmd = SON([("getnonce", 1)]) - listeners.publish_command_start( - cmd, "pymongo_test", 12345, self.client.address) + listeners.publish_command_start(cmd, "pymongo_test", 12345, self.client.address) delta = datetime.timedelta(milliseconds=100) listeners.publish_command_success( - delta, {'nonce': 'e474f4561c5eb40b', 'ok': 1.0}, - "getnonce", 12345, self.client.address) + delta, {"nonce": "e474f4561c5eb40b", "ok": 1.0}, "getnonce", 12345, self.client.address + ) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqual({}, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('getnonce', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getnonce", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -1106,7 +1156,7 @@ def setUpClass(cls): monitoring.register(cls.listener) cls.client = single_client() # Get one (authenticated) socket in the pool. - cls.client.pymongo_test.command('ping') + cls.client.pymongo_test.command("ping") @classmethod def tearDownClass(cls): @@ -1119,107 +1169,109 @@ def setUp(self): self.listener.results.clear() def test_simple(self): - self.client.pymongo_test.command('ping') + self.client.pymongo_test.command("ping") results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertEqualCommand(SON([('ping', 1)]), started.command) - self.assertEqual('ping', started.command_name) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand(SON([("ping", 1)]), started.command) + self.assertEqual("ping", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) class TestEventClasses(unittest.TestCase): - def test_command_event_repr(self): - request_id, connection_id, operation_id = 1, ('localhost', 27017), 2 + request_id, connection_id, operation_id = 1, ("localhost", 27017), 2 event = monitoring.CommandStartedEvent( - {'ping': 1}, 'admin', request_id, connection_id, operation_id) + {"ping": 1}, "admin", request_id, connection_id, operation_id + ) self.assertEqual( repr(event), "") + "command: 'ping', operation_id: 2, service_id: None>", + ) delta = datetime.timedelta(milliseconds=100) event = monitoring.CommandSucceededEvent( - delta, {'ok': 1}, 'ping', request_id, connection_id, - operation_id) + delta, {"ok": 1}, "ping", request_id, connection_id, operation_id + ) self.assertEqual( repr(event), "") + "service_id: None>", + ) event = monitoring.CommandFailedEvent( - delta, {'ok': 0}, 'ping', request_id, connection_id, - operation_id) + delta, {"ok": 0}, "ping", request_id, connection_id, operation_id + ) self.assertEqual( repr(event), "") + "failure: {'ok': 0}, service_id: None>", + ) def test_server_heartbeat_event_repr(self): - connection_id = ('localhost', 27017) + connection_id = ("localhost", 27017) event = monitoring.ServerHeartbeatStartedEvent(connection_id) - self.assertEqual( - repr(event), - "") + self.assertEqual(repr(event), "") delta = 0.1 event = monitoring.ServerHeartbeatSucceededEvent( - delta, {'ok': 1}, connection_id) # type: ignore[arg-type] + delta, {"ok": 1}, connection_id # type: ignore[arg-type] + ) self.assertEqual( repr(event), "") + "duration: 0.1, awaited: False, reply: {'ok': 1}>", + ) event = monitoring.ServerHeartbeatFailedEvent( - delta, 'ERROR', connection_id) # type: ignore[arg-type] + delta, "ERROR", connection_id # type: ignore[arg-type] + ) self.assertEqual( repr(event), "") + "duration: 0.1, awaited: False, reply: 'ERROR'>", + ) def test_server_event_repr(self): - server_address = ('localhost', 27017) - topology_id = ObjectId('000000000000000000000001') + server_address = ("localhost", 27017) + topology_id = ObjectId("000000000000000000000001") event = monitoring.ServerOpeningEvent(server_address, topology_id) self.assertEqual( repr(event), - "") + "", + ) event = monitoring.ServerDescriptionChangedEvent( - 'PREV', 'NEW', server_address, topology_id) # type: ignore[arg-type] + "PREV", "NEW", server_address, topology_id # type: ignore[arg-type] + ) self.assertEqual( repr(event), - "") + "", + ) event = monitoring.ServerClosedEvent(server_address, topology_id) self.assertEqual( repr(event), - "") + "", + ) def test_topology_event_repr(self): - topology_id = ObjectId('000000000000000000000001') + topology_id = ObjectId("000000000000000000000001") event = monitoring.TopologyOpenedEvent(topology_id) - self.assertEqual( - repr(event), - "") + self.assertEqual(repr(event), "") event = monitoring.TopologyDescriptionChangedEvent( - 'PREV', 'NEW', topology_id) # type: ignore[arg-type] + "PREV", "NEW", topology_id # type: ignore[arg-type] + ) self.assertEqual( repr(event), "") + "changed from: PREV, to: NEW>", + ) event = monitoring.TopologyClosedEvent(topology_id) - self.assertEqual( - repr(event), - "") + self.assertEqual(repr(event), "") if __name__ == "__main__": diff --git a/test/test_objectid.py b/test/test_objectid.py index 26ffe2e22c..bb1af865c0 100644 --- a/test/test_objectid.py +++ b/test/test_objectid.py @@ -21,13 +21,13 @@ sys.path[0:0] = [""] -from bson.errors import InvalidId -from bson.objectid import ObjectId, _MAX_COUNTER_VALUE -from bson.tz_util import (FixedOffset, - utc) from test import SkipTest, unittest from test.utils import oid_generated_on_process +from bson.errors import InvalidId +from bson.objectid import _MAX_COUNTER_VALUE, ObjectId +from bson.tz_util import FixedOffset, utc + def oid(x): return ObjectId() @@ -57,29 +57,28 @@ def test_from_hex(self): self.assertRaises(InvalidId, ObjectId, "123456789012123456789G12") def test_repr_str(self): - self.assertEqual(repr(ObjectId("1234567890abcdef12345678")), - "ObjectId('1234567890abcdef12345678')") - self.assertEqual(str(ObjectId("1234567890abcdef12345678")), - "1234567890abcdef12345678") - self.assertEqual(str(ObjectId(b"123456789012")), - "313233343536373839303132") - self.assertEqual(ObjectId("1234567890abcdef12345678").binary, - b'\x124Vx\x90\xab\xcd\xef\x124Vx') - self.assertEqual(str(ObjectId(b'\x124Vx\x90\xab\xcd\xef\x124Vx')), - "1234567890abcdef12345678") + self.assertEqual( + repr(ObjectId("1234567890abcdef12345678")), "ObjectId('1234567890abcdef12345678')" + ) + self.assertEqual(str(ObjectId("1234567890abcdef12345678")), "1234567890abcdef12345678") + self.assertEqual(str(ObjectId(b"123456789012")), "313233343536373839303132") + self.assertEqual( + ObjectId("1234567890abcdef12345678").binary, b"\x124Vx\x90\xab\xcd\xef\x124Vx" + ) + self.assertEqual( + str(ObjectId(b"\x124Vx\x90\xab\xcd\xef\x124Vx")), "1234567890abcdef12345678" + ) def test_equality(self): a = ObjectId() self.assertEqual(a, ObjectId(a)) - self.assertEqual(ObjectId(b"123456789012"), - ObjectId(b"123456789012")) + self.assertEqual(ObjectId(b"123456789012"), ObjectId(b"123456789012")) self.assertNotEqual(ObjectId(), ObjectId()) self.assertNotEqual(ObjectId(b"123456789012"), b"123456789012") # Explicitly test inequality self.assertFalse(a != ObjectId(a)) - self.assertFalse(ObjectId(b"123456789012") != - ObjectId(b"123456789012")) + self.assertFalse(ObjectId(b"123456789012") != ObjectId(b"123456789012")) def test_binary_str_equivalence(self): a = ObjectId() @@ -95,7 +94,7 @@ def test_generation_time(self): self.assertTrue(d2 - d1 < datetime.timedelta(seconds=2)) def test_from_datetime(self): - if 'PyPy 1.8.0' in sys.version: + if "PyPy 1.8.0" in sys.version: # See https://bugs.pypy.org/issue1092 raise SkipTest("datetime.timedelta is broken in pypy 1.8.0") d = datetime.datetime.utcnow() @@ -104,8 +103,7 @@ def test_from_datetime(self): self.assertEqual(d, oid.generation_time.replace(tzinfo=None)) self.assertEqual("0" * 16, str(oid)[8:]) - aware = datetime.datetime(1993, 4, 4, 2, - tzinfo=FixedOffset(555, "SomeZone")) + aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone")) offset = aware.utcoffset() assert offset is not None as_utc = (aware - offset).replace(tzinfo=utc) @@ -126,7 +124,8 @@ def test_pickle_backwards_compatability(self): b"(cbson.objectid\nObjectId\np1\nc__builtin__\n" b"object\np2\nNtp3\nRp4\n" b"(dp5\nS'_ObjectId__id'\np6\n" - b"S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np7\nsb.") + b"S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np7\nsb." + ) # We also test against a hardcoded "New" pickle format so that we # make sure we're backward compatible with the current version in @@ -135,11 +134,12 @@ def test_pickle_backwards_compatability(self): b"ccopy_reg\n_reconstructor\np0\n" b"(cbson.objectid\nObjectId\np1\nc__builtin__\n" b"object\np2\nNtp3\nRp4\n" - b"S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np5\nb.") + b"S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np5\nb." + ) # Have to load using 'latin-1' since these were pickled in python2.x. - oid_1_9 = pickle.loads(pickled_with_1_9, encoding='latin-1') - oid_1_10 = pickle.loads(pickled_with_1_10, encoding='latin-1') + oid_1_9 = pickle.loads(pickled_with_1_9, encoding="latin-1") + oid_1_10 = pickle.loads(pickled_with_1_10, encoding="latin-1") self.assertEqual(oid_1_9, ObjectId("4d9a66561376c00b88000000")) self.assertEqual(oid_1_9, oid_1_10) @@ -189,9 +189,7 @@ def generate_objectid_with_timestamp(timestamp): oid.generation_time except (OverflowError, ValueError): continue - self.assertEqual( - oid.generation_time, - datetime.datetime(*exp_datetime_args, tzinfo=utc)) + self.assertEqual(oid.generation_time, datetime.datetime(*exp_datetime_args, tzinfo=utc)) def test_random_regenerated_on_pid_change(self): # Test that change of pid triggers new random number generation. diff --git a/test/test_ocsp_cache.py b/test/test_ocsp_cache.py index 04fa06dfa1..0e6777a9f9 100644 --- a/test/test_ocsp_cache.py +++ b/test/test_ocsp_cache.py @@ -14,20 +14,20 @@ """Test the pymongo ocsp_support module.""" +import random +import sys from collections import namedtuple from datetime import datetime, timedelta from os import urandom -import random -import sys from time import sleep - from typing import Any sys.path[0:0] = [""] -from pymongo.ocsp_cache import _OCSPCache from test import unittest +from pymongo.ocsp_cache import _OCSPCache + class TestOcspCache(unittest.TestCase): MockHashAlgorithm: Any @@ -36,20 +36,20 @@ class TestOcspCache(unittest.TestCase): @classmethod def setUpClass(cls): - cls.MockHashAlgorithm = namedtuple( # type: ignore - "MockHashAlgorithm", ['name']) + cls.MockHashAlgorithm = namedtuple("MockHashAlgorithm", ["name"]) # type: ignore cls.MockOcspRequest = namedtuple( # type: ignore - "MockOcspRequest", ['hash_algorithm', 'issuer_name_hash', - 'issuer_key_hash', 'serial_number']) + "MockOcspRequest", + ["hash_algorithm", "issuer_name_hash", "issuer_key_hash", "serial_number"], + ) cls.MockOcspResponse = namedtuple( # type: ignore - "MockOcspResponse", ["this_update", "next_update"]) + "MockOcspResponse", ["this_update", "next_update"] + ) def setUp(self): self.cache = _OCSPCache() def _create_mock_request(self): - hash_algorithm = self.MockHashAlgorithm( - random.choice(['sha1', 'md5', 'sha256'])) + hash_algorithm = self.MockHashAlgorithm(random.choice(["sha1", "md5", "sha256"])) issuer_name_hash = urandom(8) issuer_key_hash = urandom(8) serial_number = random.randint(0, 10**10) @@ -57,19 +57,17 @@ def _create_mock_request(self): hash_algorithm=hash_algorithm, issuer_name_hash=issuer_name_hash, issuer_key_hash=issuer_key_hash, - serial_number=serial_number) + serial_number=serial_number, + ) - def _create_mock_response(self, this_update_delta_seconds, - next_update_delta_seconds): + def _create_mock_response(self, this_update_delta_seconds, next_update_delta_seconds): now = datetime.utcnow() this_update = now + timedelta(seconds=this_update_delta_seconds) if next_update_delta_seconds is not None: next_update = now + timedelta(seconds=next_update_delta_seconds) else: next_update = None - return self.MockOcspResponse( - this_update=this_update, - next_update=next_update) + return self.MockOcspResponse(this_update=this_update, next_update=next_update) def _add_mock_cache_entry(self, mock_request, mock_response): key = self.cache._get_cache_key(mock_request) diff --git a/test/test_pooling.py b/test/test_pooling.py index 4f0ac3584f..07dbc3643d 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -21,29 +21,25 @@ import threading import time -from bson.son import SON from bson.codec_options import DEFAULT_CODEC_OPTIONS - +from bson.son import SON from pymongo import MongoClient, message -from pymongo.errors import (AutoReconnect, - ConnectionFailure, - DuplicateKeyError) +from pymongo.errors import AutoReconnect, ConnectionFailure, DuplicateKeyError sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import delay, get_pool, joinall, rs_or_single_client + from pymongo.pool import Pool, PoolOptions from pymongo.socket_checker import SocketChecker -from test import client_context, IntegrationTest, unittest -from test.utils import (get_pool, - joinall, - delay, - rs_or_single_client) @client_context.require_connection def setUpModule(): pass + N = 10 DB = "pymongo-pooling-tests" @@ -62,6 +58,7 @@ def gc_collect_until_done(threads, timeout=60): class MongoThread(threading.Thread): """A thread that uses a MongoClient.""" + def __init__(self, client): super(MongoThread, self).__init__() self.daemon = True # Don't hang whole test if thread hangs. @@ -108,21 +105,22 @@ class SocketGetter(MongoThread): Checks out a socket and holds it forever. Used in test_no_wait_queue_timeout. """ + def __init__(self, client, pool): super(SocketGetter, self).__init__(client) - self.state = 'init' + self.state = "init" self.pool = pool self.sock = None def run_mongo_thread(self): - self.state = 'get_socket' + self.state = "get_socket" # Call 'pin_cursor' so we can hold the socket. with self.pool.get_socket() as sock: sock.pin_cursor() self.sock = sock - self.state = 'sock' + self.state = "sock" def __del__(self): if self.sock: @@ -162,16 +160,12 @@ def tearDown(self): self.c.close() super(_TestPoolingBase, self).tearDown() - def create_pool( - self, - pair=(client_context.host, client_context.port), - *args, - **kwargs): + def create_pool(self, pair=(client_context.host, client_context.port), *args, **kwargs): # Start the pool with the correct ssl options. pool_options = client_context.client._topology_settings.pool_options - kwargs['ssl_context'] = pool_options._ssl_context - kwargs['tls_allow_invalid_hostnames'] = pool_options.tls_allow_invalid_hostnames - kwargs['server_api'] = pool_options.server_api + kwargs["ssl_context"] = pool_options._ssl_context + kwargs["tls_allow_invalid_hostnames"] = pool_options.tls_allow_invalid_hostnames + kwargs["server_api"] = pool_options.server_api pool = Pool(pair, PoolOptions(*args, **kwargs)) pool.ready() return pool @@ -180,11 +174,9 @@ def create_pool( class TestPooling(_TestPoolingBase): def test_max_pool_size_validation(self): host, port = client_context.host, client_context.port - self.assertRaises( - ValueError, MongoClient, host=host, port=port, maxPoolSize=-1) + self.assertRaises(ValueError, MongoClient, host=host, port=port, maxPoolSize=-1) - self.assertRaises( - ValueError, MongoClient, host=host, port=port, maxPoolSize='foo') + self.assertRaises(ValueError, MongoClient, host=host, port=port, maxPoolSize="foo") c = MongoClient(host=host, port=port, maxPoolSize=100, connect=False) self.assertEqual(c.options.pool_options.max_pool_size, 100) @@ -264,27 +256,27 @@ def test_socket_checker(self): # Socket has nothing to read. self.assertFalse(socket_checker.select(s, read=True)) self.assertFalse(socket_checker.select(s, read=True, timeout=0)) - self.assertFalse(socket_checker.select(s, read=True, timeout=.05)) + self.assertFalse(socket_checker.select(s, read=True, timeout=0.05)) # Socket is writable. self.assertTrue(socket_checker.select(s, write=True, timeout=None)) self.assertTrue(socket_checker.select(s, write=True)) self.assertTrue(socket_checker.select(s, write=True, timeout=0)) - self.assertTrue(socket_checker.select(s, write=True, timeout=.05)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0.05)) # Make the socket readable _, msg, _ = message._query( - 0, 'admin.$cmd', 0, -1, SON([('ping', 1)]), None, - DEFAULT_CODEC_OPTIONS) + 0, "admin.$cmd", 0, -1, SON([("ping", 1)]), None, DEFAULT_CODEC_OPTIONS + ) s.sendall(msg) # Block until the socket is readable. self.assertTrue(socket_checker.select(s, read=True, timeout=None)) self.assertTrue(socket_checker.select(s, read=True)) self.assertTrue(socket_checker.select(s, read=True, timeout=0)) - self.assertTrue(socket_checker.select(s, read=True, timeout=.05)) + self.assertTrue(socket_checker.select(s, read=True, timeout=0.05)) # Socket is still writable. self.assertTrue(socket_checker.select(s, write=True, timeout=None)) self.assertTrue(socket_checker.select(s, write=True)) self.assertTrue(socket_checker.select(s, write=True, timeout=0)) - self.assertTrue(socket_checker.select(s, write=True, timeout=.05)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0.05)) s.close() self.assertTrue(socket_checker.socket_closed(s)) @@ -303,9 +295,7 @@ def test_return_socket_after_reset(self): def test_pool_check(self): # Test that Pool recovers from two connection failures in a row. # This exercises code at the end of Pool._check(). - cx_pool = self.create_pool(max_pool_size=1, - connect_timeout=1, - wait_queue_timeout=1) + cx_pool = self.create_pool(max_pool_size=1, connect_timeout=1, wait_queue_timeout=1) cx_pool._check_interval_seconds = 0 # Always check. self.addCleanup(cx_pool.close) @@ -315,7 +305,7 @@ def test_pool_check(self): sock_info.sock.close() # Swap pool's address with a bad one. - address, cx_pool.address = cx_pool.address, ('foo.com', 1234) + address, cx_pool.address = cx_pool.address, ("foo.com", 1234) with self.assertRaises(AutoReconnect): with cx_pool.get_socket(): pass @@ -327,8 +317,7 @@ def test_pool_check(self): def test_wait_queue_timeout(self): wait_queue_timeout = 2 # Seconds - pool = self.create_pool( - max_pool_size=1, wait_queue_timeout=wait_queue_timeout) + pool = self.create_pool(max_pool_size=1, wait_queue_timeout=wait_queue_timeout) self.addCleanup(pool.close) with pool.get_socket() as sock_info: @@ -340,8 +329,8 @@ def test_wait_queue_timeout(self): duration = time.time() - start self.assertTrue( abs(wait_queue_timeout - duration) < 1, - "Waited %.2f seconds for a socket, expected %f" % ( - duration, wait_queue_timeout)) + "Waited %.2f seconds for a socket, expected %f" % (duration, wait_queue_timeout), + ) def test_no_wait_queue_timeout(self): # Verify get_socket() with no wait_queue_timeout blocks forever. @@ -352,16 +341,16 @@ def test_no_wait_queue_timeout(self): with pool.get_socket() as s1: t = SocketGetter(self.c, pool) t.start() - while t.state != 'get_socket': + while t.state != "get_socket": time.sleep(0.1) time.sleep(1) - self.assertEqual(t.state, 'get_socket') + self.assertEqual(t.state, "get_socket") - while t.state != 'sock': + while t.state != "sock": time.sleep(0.1) - self.assertEqual(t.state, 'sock') + self.assertEqual(t.state, "sock") self.assertEqual(t.sock, s1) def test_checkout_more_than_max_pool_size(self): @@ -381,7 +370,7 @@ def test_checkout_more_than_max_pool_size(self): threads.append(t) time.sleep(1) for t in threads: - self.assertEqual(t.state, 'get_socket') + self.assertEqual(t.state, "get_socket") for socket_info in socks: socket_info.close_socket(None) @@ -394,7 +383,8 @@ def test_maxConnecting(self): # Run 50 short running operations def find_one(): - docs.append(client.test.test.find_one({'$where': delay(0.001)})) + docs.append(client.test.test.find_one({"$where": delay(0.001)})) + threads = [threading.Thread(target=find_one) for _ in range(50)] for thread in threads: thread.start() @@ -443,7 +433,7 @@ def test_max_pool_size(self): def f(): for _ in range(5): - collection.find_one({'$where': delay(0.1)}) + collection.find_one({"$where": delay(0.1)}) assert len(cx_pool.sockets) <= max_pool_size with lock: @@ -476,7 +466,7 @@ def test_max_pool_size_none(self): def f(): for _ in range(5): - collection.find_one({'$where': delay(0.1)}) + collection.find_one({"$where": delay(0.1)}) with lock: self.n_passed += 1 @@ -489,25 +479,21 @@ def f(): joinall(threads) self.assertEqual(nthreads, self.n_passed) self.assertTrue(len(cx_pool.sockets) > 1) - self.assertEqual(cx_pool.max_pool_size, float('inf')) - + self.assertEqual(cx_pool.max_pool_size, float("inf")) def test_max_pool_size_zero(self): c = rs_or_single_client(maxPoolSize=0) self.addCleanup(c.close) pool = get_pool(c) - self.assertEqual(pool.max_pool_size, float('inf')) + self.assertEqual(pool.max_pool_size, float("inf")) def test_max_pool_size_with_connection_failure(self): # The pool acquires its semaphore before attempting to connect; ensure # it releases the semaphore on connection failure. test_pool = Pool( - ('somedomainthatdoesntexist.org', 27017), - PoolOptions( - max_pool_size=1, - connect_timeout=1, - socket_timeout=1, - wait_queue_timeout=1)) + ("somedomainthatdoesntexist.org", 27017), + PoolOptions(max_pool_size=1, connect_timeout=1, socket_timeout=1, wait_queue_timeout=1), + ) test_pool.ready() # First call to get_socket fails; if pool doesn't release its semaphore @@ -521,8 +507,7 @@ def test_max_pool_size_with_connection_failure(self): # Testing for AutoReconnect instead of ConnectionFailure, above, # is sufficient right *now* to catch a semaphore leak. But that # seems error-prone, so check the message too. - self.assertNotIn('waiting for socket from pool', - str(context.exception)) + self.assertNotIn("waiting for socket from pool", str(context.exception)) if __name__ == "__main__": diff --git a/test/test_pymongo.py b/test/test_pymongo.py index 780a4beb8b..7ec32e16a6 100644 --- a/test/test_pymongo.py +++ b/test/test_pymongo.py @@ -15,17 +15,18 @@ """Test the pymongo module itself.""" import sys + sys.path[0:0] = [""] -import pymongo from test import unittest +import pymongo + class TestPyMongo(unittest.TestCase): def test_mongo_client_alias(self): # Testing that pymongo module imports mongo_client.MongoClient - self.assertEqual(pymongo.MongoClient, - pymongo.mongo_client.MongoClient) + self.assertEqual(pymongo.MongoClient, pymongo.mongo_client.MongoClient) if __name__ == "__main__": diff --git a/test/test_raw_bson.py b/test/test_raw_bson.py index 90ada05c6f..a27af6e217 100644 --- a/test/test_raw_bson.py +++ b/test/test_raw_bson.py @@ -18,15 +18,16 @@ sys.path[0:0] = [""] +from test import client_context, unittest +from test.test_client import IntegrationTest +from test.utils import rs_or_single_client + from bson import decode, encode -from bson.binary import Binary, JAVA_LEGACY, UuidRepresentation +from bson.binary import JAVA_LEGACY, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.errors import InvalidBSON -from bson.raw_bson import RawBSONDocument, DEFAULT_RAW_BSON_OPTIONS +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument from bson.son import SON -from test import client_context, unittest -from test.utils import rs_or_single_client -from test.test_client import IntegrationTest class TestRawBSONDocument(IntegrationTest): @@ -35,9 +36,9 @@ class TestRawBSONDocument(IntegrationTest): # 'name': 'Sherlock', # 'addresses': [{'street': 'Baker Street'}]} bson_string = ( - b'Z\x00\x00\x00\x07_id\x00Um\xf6\x8bn2\xab!\xa9^\x07\x85\x02name\x00\t' - b'\x00\x00\x00Sherlock\x00\x04addresses\x00&\x00\x00\x00\x030\x00\x1e' - b'\x00\x00\x00\x02street\x00\r\x00\x00\x00Baker Street\x00\x00\x00\x00' + b"Z\x00\x00\x00\x07_id\x00Um\xf6\x8bn2\xab!\xa9^\x07\x85\x02name\x00\t" + b"\x00\x00\x00Sherlock\x00\x04addresses\x00&\x00\x00\x00\x030\x00\x1e" + b"\x00\x00\x00\x02street\x00\r\x00\x00\x00Baker Street\x00\x00\x00\x00" ) document = RawBSONDocument(bson_string) @@ -52,10 +53,10 @@ def tearDown(self): self.client.pymongo_test.test_raw.drop() def test_decode(self): - self.assertEqual('Sherlock', self.document['name']) - first_address = self.document['addresses'][0] + self.assertEqual("Sherlock", self.document["name"]) + first_address = self.document["addresses"][0] self.assertIsInstance(first_address, RawBSONDocument) - self.assertEqual('Baker Street', first_address['street']) + self.assertEqual("Baker Street", first_address["street"]) def test_raw(self): self.assertEqual(self.bson_string, self.document.raw) @@ -63,44 +64,45 @@ def test_raw(self): def test_empty_doc(self): doc = RawBSONDocument(encode({})) with self.assertRaises(KeyError): - doc['does-not-exist'] + doc["does-not-exist"] def test_invalid_bson_sequence(self): - bson_byte_sequence = encode({'a': 1})+encode({}) - with self.assertRaisesRegex(InvalidBSON, 'invalid object length'): + bson_byte_sequence = encode({"a": 1}) + encode({}) + with self.assertRaisesRegex(InvalidBSON, "invalid object length"): RawBSONDocument(bson_byte_sequence) def test_invalid_bson_eoo(self): - invalid_bson_eoo = encode({'a': 1})[:-1] + b'\x01' - with self.assertRaisesRegex(InvalidBSON, 'bad eoo'): + invalid_bson_eoo = encode({"a": 1})[:-1] + b"\x01" + with self.assertRaisesRegex(InvalidBSON, "bad eoo"): RawBSONDocument(invalid_bson_eoo) @client_context.require_connection def test_round_trip(self): db = self.client.get_database( - 'pymongo_test', - codec_options=CodecOptions(document_class=RawBSONDocument)) + "pymongo_test", codec_options=CodecOptions(document_class=RawBSONDocument) + ) db.test_raw.insert_one(self.document) - result = db.test_raw.find_one(self.document['_id']) + result = db.test_raw.find_one(self.document["_id"]) assert result is not None self.assertIsInstance(result, RawBSONDocument) self.assertEqual(dict(self.document.items()), dict(result.items())) @client_context.require_connection def test_round_trip_raw_uuid(self): - coll = self.client.get_database('pymongo_test').test_raw + coll = self.client.get_database("pymongo_test").test_raw uid = uuid.uuid4() - doc = {'_id': 1, - 'bin4': Binary(uid.bytes, 4), - 'bin3': Binary(uid.bytes, 3)} + doc = {"_id": 1, "bin4": Binary(uid.bytes, 4), "bin3": Binary(uid.bytes, 3)} raw = RawBSONDocument(encode(doc)) coll.insert_one(raw) self.assertEqual(coll.find_one(), doc) uuid_coll = coll.with_options( codec_options=coll.codec_options.with_options( - uuid_representation=UuidRepresentation.STANDARD)) - self.assertEqual(uuid_coll.find_one(), - {'_id': 1, 'bin4': uid, 'bin3': Binary(uid.bytes, 3)}) + uuid_representation=UuidRepresentation.STANDARD + ) + ) + self.assertEqual( + uuid_coll.find_one(), {"_id": 1, "bin4": uid, "bin3": Binary(uid.bytes, 3)} + ) # Test that the raw bytes haven't changed. raw_coll = coll.with_options(codec_options=DEFAULT_RAW_BSON_OPTIONS) @@ -111,44 +113,46 @@ def test_with_codec_options(self): # '_id': UUID('026fab8f-975f-4965-9fbf-85ad874c60ff')} # encoded with JAVA_LEGACY uuid representation. bson_string = ( - b'-\x00\x00\x00\x05_id\x00\x10\x00\x00\x00\x03eI_\x97\x8f\xabo\x02' - b'\xff`L\x87\xad\x85\xbf\x9f\tdate\x00\x8a\xd6\xb9\xbaM' - b'\x01\x00\x00\x00' + b"-\x00\x00\x00\x05_id\x00\x10\x00\x00\x00\x03eI_\x97\x8f\xabo\x02" + b"\xff`L\x87\xad\x85\xbf\x9f\tdate\x00\x8a\xd6\xb9\xbaM" + b"\x01\x00\x00\x00" ) document = RawBSONDocument( bson_string, - codec_options=CodecOptions(uuid_representation=JAVA_LEGACY, - document_class=RawBSONDocument)) + codec_options=CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ), + ) - self.assertEqual(uuid.UUID('026fab8f-975f-4965-9fbf-85ad874c60ff'), - document['_id']) + self.assertEqual(uuid.UUID("026fab8f-975f-4965-9fbf-85ad874c60ff"), document["_id"]) @client_context.require_connection def test_round_trip_codec_options(self): doc = { - 'date': datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), - '_id': uuid.UUID('026fab8f-975f-4965-9fbf-85ad874c60ff') + "date": datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), + "_id": uuid.UUID("026fab8f-975f-4965-9fbf-85ad874c60ff"), } db = self.client.pymongo_test coll = db.get_collection( - 'test_raw', - codec_options=CodecOptions(uuid_representation=JAVA_LEGACY)) + "test_raw", codec_options=CodecOptions(uuid_representation=JAVA_LEGACY) + ) coll.insert_one(doc) - raw_java_legacy = CodecOptions(uuid_representation=JAVA_LEGACY, - document_class=RawBSONDocument) - coll = db.get_collection('test_raw', codec_options=raw_java_legacy) + raw_java_legacy = CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ) + coll = db.get_collection("test_raw", codec_options=raw_java_legacy) self.assertEqual( - RawBSONDocument(encode(doc, codec_options=raw_java_legacy)), - coll.find_one()) + RawBSONDocument(encode(doc, codec_options=raw_java_legacy)), coll.find_one() + ) @client_context.require_connection def test_raw_bson_document_embedded(self): - doc = {'embedded': self.document} + doc = {"embedded": self.document} db = self.client.pymongo_test db.test_raw.insert_one(doc) result = db.test_raw.find_one() assert result is not None - self.assertEqual(decode(self.document.raw), result['embedded']) + self.assertEqual(decode(self.document.raw), result["embedded"]) # Make sure that CodecOptions are preserved. # {'embedded': [ @@ -157,40 +161,46 @@ def test_raw_bson_document_embedded(self): # ]} # encoded with JAVA_LEGACY uuid representation. bson_string = ( - b'D\x00\x00\x00\x04embedded\x005\x00\x00\x00\x030\x00-\x00\x00\x00' - b'\tdate\x00\x8a\xd6\xb9\xbaM\x01\x00\x00\x05_id\x00\x10\x00\x00' - b'\x00\x03eI_\x97\x8f\xabo\x02\xff`L\x87\xad\x85\xbf\x9f\x00\x00' - b'\x00' + b"D\x00\x00\x00\x04embedded\x005\x00\x00\x00\x030\x00-\x00\x00\x00" + b"\tdate\x00\x8a\xd6\xb9\xbaM\x01\x00\x00\x05_id\x00\x10\x00\x00" + b"\x00\x03eI_\x97\x8f\xabo\x02\xff`L\x87\xad\x85\xbf\x9f\x00\x00" + b"\x00" ) rbd = RawBSONDocument( bson_string, - codec_options=CodecOptions(uuid_representation=JAVA_LEGACY, - document_class=RawBSONDocument)) + codec_options=CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ), + ) db.test_raw.drop() db.test_raw.insert_one(rbd) - result = db.get_collection('test_raw', codec_options=CodecOptions( - uuid_representation=JAVA_LEGACY)).find_one() + result = db.get_collection( + "test_raw", codec_options=CodecOptions(uuid_representation=JAVA_LEGACY) + ).find_one() assert result is not None - self.assertEqual(rbd['embedded'][0]['_id'], - result['embedded'][0]['_id']) + self.assertEqual(rbd["embedded"][0]["_id"], result["embedded"][0]["_id"]) @client_context.require_connection def test_write_response_raw_bson(self): coll = self.client.get_database( - 'pymongo_test', - codec_options=CodecOptions(document_class=RawBSONDocument)).test_raw + "pymongo_test", codec_options=CodecOptions(document_class=RawBSONDocument) + ).test_raw # No Exceptions raised while handling write response. coll.insert_one(self.document) coll.delete_one(self.document) coll.insert_many([self.document]) coll.delete_many(self.document) - coll.update_one(self.document, {'$set': {'a': 'b'}}, upsert=True) - coll.update_many(self.document, {'$set': {'b': 'c'}}) + coll.update_one(self.document, {"$set": {"a": "b"}}, upsert=True) + coll.update_many(self.document, {"$set": {"b": "c"}}) def test_preserve_key_ordering(self): - keyvaluepairs = [('a', 1), ('b', 2), ('c', 3),] + keyvaluepairs = [ + ("a", 1), + ("b", 2), + ("c", 3), + ] rawdoc = RawBSONDocument(encode(SON(keyvaluepairs))) for rkey, elt in zip(rawdoc, keyvaluepairs): diff --git a/test/test_read_concern.py b/test/test_read_concern.py index 1d21db8900..d5df682fba 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -14,13 +14,13 @@ """Test the read_concern module.""" +from test import IntegrationTest, client_context +from test.utils import OvertCommandListener, rs_or_single_client, single_client + from bson.son import SON from pymongo.errors import OperationFailure from pymongo.read_concern import ReadConcern -from test import client_context, IntegrationTest -from test.utils import single_client, rs_or_single_client, OvertCommandListener - class TestReadConcern(IntegrationTest): listener: OvertCommandListener @@ -32,12 +32,12 @@ def setUpClass(cls): cls.listener = OvertCommandListener() cls.client = single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test - client_context.client.pymongo_test.create_collection('coll') + client_context.client.pymongo_test.create_collection("coll") @classmethod def tearDownClass(cls): cls.client.close() - client_context.client.pymongo_test.drop_collection('coll') + client_context.client.pymongo_test.drop_collection("coll") super(TestReadConcern, cls).tearDownClass() def tearDown(self): @@ -49,25 +49,23 @@ def test_read_concern(self): self.assertIsNone(rc.level) self.assertTrue(rc.ok_for_legacy) - rc = ReadConcern('majority') - self.assertEqual('majority', rc.level) + rc = ReadConcern("majority") + self.assertEqual("majority", rc.level) self.assertFalse(rc.ok_for_legacy) - rc = ReadConcern('local') - self.assertEqual('local', rc.level) + rc = ReadConcern("local") + self.assertEqual("local", rc.level) self.assertTrue(rc.ok_for_legacy) self.assertRaises(TypeError, ReadConcern, 42) def test_read_concern_uri(self): - uri = 'mongodb://%s/?readConcernLevel=majority' % ( - client_context.pair,) + uri = "mongodb://%s/?readConcernLevel=majority" % (client_context.pair,) client = rs_or_single_client(uri, connect=False) - self.assertEqual(ReadConcern('majority'), client.read_concern) + self.assertEqual(ReadConcern("majority"), client.read_concern) def test_invalid_read_concern(self): - coll = self.db.get_collection( - 'coll', read_concern=ReadConcern('unknown')) + coll = self.db.get_collection("coll", read_concern=ReadConcern("unknown")) # We rely on the server to validate read concern. with self.assertRaises(OperationFailure): coll.find_one() @@ -75,46 +73,46 @@ def test_invalid_read_concern(self): def test_find_command(self): # readConcern not sent in command if not specified. coll = self.db.coll - tuple(coll.find({'field': 'value'})) - self.assertNotIn('readConcern', - self.listener.results['started'][0].command) + tuple(coll.find({"field": "value"})) + self.assertNotIn("readConcern", self.listener.results["started"][0].command) self.listener.results.clear() # Explicitly set readConcern to 'local'. - coll = self.db.get_collection('coll', read_concern=ReadConcern('local')) - tuple(coll.find({'field': 'value'})) + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + tuple(coll.find({"field": "value"})) self.assertEqualCommand( - SON([('find', 'coll'), - ('filter', {'field': 'value'}), - ('readConcern', {'level': 'local'})]), - self.listener.results['started'][0].command) + SON( + [ + ("find", "coll"), + ("filter", {"field": "value"}), + ("readConcern", {"level": "local"}), + ] + ), + self.listener.results["started"][0].command, + ) def test_command_cursor(self): # readConcern not sent in command if not specified. coll = self.db.coll - tuple(coll.aggregate([{'$match': {'field': 'value'}}])) - self.assertNotIn('readConcern', - self.listener.results['started'][0].command) + tuple(coll.aggregate([{"$match": {"field": "value"}}])) + self.assertNotIn("readConcern", self.listener.results["started"][0].command) self.listener.results.clear() # Explicitly set readConcern to 'local'. - coll = self.db.get_collection('coll', read_concern=ReadConcern('local')) - tuple(coll.aggregate([{'$match': {'field': 'value'}}])) + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + tuple(coll.aggregate([{"$match": {"field": "value"}}])) self.assertEqual( - {'level': 'local'}, - self.listener.results['started'][0].command['readConcern']) + {"level": "local"}, self.listener.results["started"][0].command["readConcern"] + ) def test_aggregate_out(self): - coll = self.db.get_collection('coll', read_concern=ReadConcern('local')) - tuple(coll.aggregate([{'$match': {'field': 'value'}}, - {'$out': 'output_collection'}])) + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + tuple(coll.aggregate([{"$match": {"field": "value"}}, {"$out": "output_collection"}])) # Aggregate with $out supports readConcern MongoDB 4.2 onwards. if client_context.version >= (4, 1): - self.assertIn('readConcern', - self.listener.results['started'][0].command) + self.assertIn("readConcern", self.listener.results["started"][0].command) else: - self.assertNotIn('readConcern', - self.listener.results['started'][0].command) + self.assertNotIn("readConcern", self.listener.results["started"][0].command) diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 69f61f94e8..ae2fa8bcee 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -22,52 +22,56 @@ sys.path[0:0] = [""] +from test import IntegrationTest, SkipTest, client_context, unittest +from test.utils import ( + OvertCommandListener, + connected, + one, + rs_client, + single_client, + wait_until, +) +from test.version import Version + from bson.son import SON from pymongo.errors import ConfigurationError, OperationFailure from pymongo.message import _maybe_add_read_preference from pymongo.mongo_client import MongoClient -from pymongo.read_preferences import (ReadPreference, MovingAverage, - Primary, PrimaryPreferred, - Secondary, SecondaryPreferred, - Nearest) +from pymongo.read_preferences import ( + MovingAverage, + Nearest, + Primary, + PrimaryPreferred, + ReadPreference, + Secondary, + SecondaryPreferred, +) from pymongo.server_description import ServerDescription -from pymongo.server_selectors import readable_server_selector, Selection +from pymongo.server_selectors import Selection, readable_server_selector from pymongo.server_type import SERVER_TYPE from pymongo.write_concern import WriteConcern -from test import (SkipTest, - client_context, - IntegrationTest, - unittest) -from test.utils import (connected, - one, - OvertCommandListener, - rs_client, - single_client, - wait_until) -from test.version import Version - class TestSelections(IntegrationTest): - @client_context.require_connection def test_bool(self): client = single_client() wait_until(lambda: client.address, "discover primary") - selection = Selection.from_topology_description( - client._topology.description) + selection = Selection.from_topology_description(client._topology.description) self.assertTrue(selection) self.assertFalse(selection.with_server_descriptions([])) class TestReadPreferenceObjects(unittest.TestCase): - prefs = [Primary(), - PrimaryPreferred(), - Secondary(), - Nearest(tag_sets=[{'a': 1}, {'b': 2}]), - SecondaryPreferred(max_staleness=30)] + prefs = [ + Primary(), + PrimaryPreferred(), + Secondary(), + Nearest(tag_sets=[{"a": 1}, {"b": 2}]), + SecondaryPreferred(max_staleness=30), + ] def test_pickle(self): for pref in self.prefs: @@ -83,7 +87,6 @@ def test_deepcopy(self): class TestReadPreferencesBase(IntegrationTest): - @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): @@ -94,47 +97,41 @@ def setUp(self): # Insert some data so we can use cursors in read_from_which_host self.client.pymongo_test.test.drop() self.client.get_database( - "pymongo_test", - write_concern=WriteConcern(w=client_context.w)).test.insert_many( - [{'_id': i} for i in range(10)]) + "pymongo_test", write_concern=WriteConcern(w=client_context.w) + ).test.insert_many([{"_id": i} for i in range(10)]) self.addCleanup(self.client.pymongo_test.test.drop) def read_from_which_host(self, client): - """Do a find() on the client and return which host was used - """ + """Do a find() on the client and return which host was used""" cursor = client.pymongo_test.test.find() next(cursor) return cursor.address def read_from_which_kind(self, client): """Do a find() on the client and return 'primary' or 'secondary' - depending on which the client used. + depending on which the client used. """ address = self.read_from_which_host(client) if address == client.primary: - return 'primary' + return "primary" elif address in client.secondaries: - return 'secondary' + return "secondary" else: self.fail( - 'Cursor used address %s, expected either primary ' - '%s or secondaries %s' % ( - address, client.primary, client.secondaries)) + "Cursor used address %s, expected either primary " + "%s or secondaries %s" % (address, client.primary, client.secondaries) + ) def assertReadsFrom(self, expected, **kwargs): c = rs_client(**kwargs) - wait_until( - lambda: len(c.nodes - c.arbiters) == client_context.w, - "discovered all nodes") + wait_until(lambda: len(c.nodes - c.arbiters) == client_context.w, "discovered all nodes") used = self.read_from_which_kind(c) - self.assertEqual(expected, used, 'Cursor used %s, expected %s' % ( - used, expected)) + self.assertEqual(expected, used, "Cursor used %s, expected %s" % (used, expected)) class TestSingleSecondaryOk(TestReadPreferencesBase): - def test_reads_from_secondary(self): host, port = next(iter(self.client.secondaries)) @@ -167,62 +164,53 @@ def test_reads_from_secondary(self): class TestReadPreferences(TestReadPreferencesBase): - def test_mode_validation(self): - for mode in (ReadPreference.PRIMARY, - ReadPreference.PRIMARY_PREFERRED, - ReadPreference.SECONDARY, - ReadPreference.SECONDARY_PREFERRED, - ReadPreference.NEAREST): - self.assertEqual( - mode, - rs_client(read_preference=mode).read_preference) - - self.assertRaises( - TypeError, - rs_client, read_preference='foo') + for mode in ( + ReadPreference.PRIMARY, + ReadPreference.PRIMARY_PREFERRED, + ReadPreference.SECONDARY, + ReadPreference.SECONDARY_PREFERRED, + ReadPreference.NEAREST, + ): + self.assertEqual(mode, rs_client(read_preference=mode).read_preference) + + self.assertRaises(TypeError, rs_client, read_preference="foo") def test_tag_sets_validation(self): S = Secondary(tag_sets=[{}]) - self.assertEqual( - [{}], - rs_client(read_preference=S).read_preference.tag_sets) + self.assertEqual([{}], rs_client(read_preference=S).read_preference.tag_sets) - S = Secondary(tag_sets=[{'k': 'v'}]) - self.assertEqual( - [{'k': 'v'}], - rs_client(read_preference=S).read_preference.tag_sets) + S = Secondary(tag_sets=[{"k": "v"}]) + self.assertEqual([{"k": "v"}], rs_client(read_preference=S).read_preference.tag_sets) - S = Secondary(tag_sets=[{'k': 'v'}, {}]) - self.assertEqual( - [{'k': 'v'}, {}], - rs_client(read_preference=S).read_preference.tag_sets) + S = Secondary(tag_sets=[{"k": "v"}, {}]) + self.assertEqual([{"k": "v"}, {}], rs_client(read_preference=S).read_preference.tag_sets) self.assertRaises(ValueError, Secondary, tag_sets=[]) # One dict not ok, must be a list of dicts - self.assertRaises(TypeError, Secondary, tag_sets={'k': 'v'}) + self.assertRaises(TypeError, Secondary, tag_sets={"k": "v"}) - self.assertRaises(TypeError, Secondary, tag_sets='foo') + self.assertRaises(TypeError, Secondary, tag_sets="foo") - self.assertRaises(TypeError, Secondary, tag_sets=['foo']) + self.assertRaises(TypeError, Secondary, tag_sets=["foo"]) def test_threshold_validation(self): - self.assertEqual(17, rs_client( - localThresholdMS=17, connect=False).options.local_threshold_ms) + self.assertEqual( + 17, rs_client(localThresholdMS=17, connect=False).options.local_threshold_ms + ) - self.assertEqual(42, rs_client( - localThresholdMS=42, connect=False).options.local_threshold_ms) + self.assertEqual( + 42, rs_client(localThresholdMS=42, connect=False).options.local_threshold_ms + ) - self.assertEqual(666, rs_client( - localThresholdMS=666, connect=False).options.local_threshold_ms) + self.assertEqual( + 666, rs_client(localThresholdMS=666, connect=False).options.local_threshold_ms + ) - self.assertEqual(0, rs_client( - localThresholdMS=0, connect=False).options.local_threshold_ms) + self.assertEqual(0, rs_client(localThresholdMS=0, connect=False).options.local_threshold_ms) - self.assertRaises(ValueError, - rs_client, - localthresholdms=-1) + self.assertRaises(ValueError, rs_client, localthresholdms=-1) def test_zero_latency(self): ping_times: set = set() @@ -232,11 +220,8 @@ def test_zero_latency(self): for ping_time, host in zip(ping_times, self.client.nodes): ServerDescription._host_to_round_trip_time[host] = ping_time try: - client = connected( - rs_client(readPreference='nearest', localThresholdMS=0)) - wait_until( - lambda: client.nodes == self.client.nodes, - "discovered all nodes") + client = connected(rs_client(readPreference="nearest", localThresholdMS=0)) + wait_until(lambda: client.nodes == self.client.nodes, "discovered all nodes") host = self.read_from_which_host(client) for _ in range(5): self.assertEqual(host, self.read_from_which_host(client)) @@ -244,33 +229,25 @@ def test_zero_latency(self): ServerDescription._host_to_round_trip_time.clear() def test_primary(self): - self.assertReadsFrom( - 'primary', read_preference=ReadPreference.PRIMARY) + self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY) def test_primary_with_tags(self): # Tags not allowed with PRIMARY - self.assertRaises( - ConfigurationError, - rs_client, tag_sets=[{'dc': 'ny'}]) + self.assertRaises(ConfigurationError, rs_client, tag_sets=[{"dc": "ny"}]) def test_primary_preferred(self): - self.assertReadsFrom( - 'primary', read_preference=ReadPreference.PRIMARY_PREFERRED) + self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY_PREFERRED) def test_secondary(self): - self.assertReadsFrom( - 'secondary', read_preference=ReadPreference.SECONDARY) + self.assertReadsFrom("secondary", read_preference=ReadPreference.SECONDARY) def test_secondary_preferred(self): - self.assertReadsFrom( - 'secondary', read_preference=ReadPreference.SECONDARY_PREFERRED) + self.assertReadsFrom("secondary", read_preference=ReadPreference.SECONDARY_PREFERRED) def test_nearest(self): # With high localThresholdMS, expect to read from any # member - c = rs_client( - read_preference=ReadPreference.NEAREST, - localThresholdMS=10000) # 10 seconds + c = rs_client(read_preference=ReadPreference.NEAREST, localThresholdMS=10000) # 10 seconds data_members = {self.client.primary} | self.client.secondaries @@ -286,16 +263,16 @@ def test_nearest(self): i += 1 not_used = data_members.difference(used) - latencies = ', '.join( - '%s: %dms' % (server.description.address, - server.description.round_trip_time) - for server in c._get_topology().select_servers( - readable_server_selector)) + latencies = ", ".join( + "%s: %dms" % (server.description.address, server.description.round_trip_time) + for server in c._get_topology().select_servers(readable_server_selector) + ) self.assertFalse( not_used, "Expected to use primary and all secondaries for mode NEAREST," - " but didn't use %s\nlatencies: %s" % (not_used, latencies)) + " but didn't use %s\nlatencies: %s" % (not_used, latencies), + ) class ReadPrefTester(MongoClient): @@ -307,16 +284,14 @@ def __init__(self, *args, **kwargs): @contextlib.contextmanager def _socket_for_reads(self, read_preference, session): - context = super(ReadPrefTester, self)._socket_for_reads( - read_preference, session) + context = super(ReadPrefTester, self)._socket_for_reads(read_preference, session) with context as (sock_info, read_preference): self.record_a_read(sock_info.address) yield sock_info, read_preference @contextlib.contextmanager def _socket_from_server(self, read_preference, server, session): - context = super(ReadPrefTester, self)._socket_from_server( - read_preference, server, session) + context = super(ReadPrefTester, self)._socket_from_server(read_preference, server, session) with context as (sock_info, read_preference): self.record_a_read(sock_info.address) yield sock_info, read_preference @@ -325,12 +300,13 @@ def record_a_read(self, address): server = self._get_topology().select_server_by_address(address, 0) self.has_read_from.add(server) + _PREF_MAP = [ (Primary, SERVER_TYPE.RSPrimary), (PrimaryPreferred, SERVER_TYPE.RSPrimary), (Secondary, SERVER_TYPE.RSSecondary), (SecondaryPreferred, SERVER_TYPE.RSSecondary), - (Nearest, 'any') + (Nearest, "any"), ] @@ -345,16 +321,18 @@ def setUpClass(cls): cls.c = ReadPrefTester( client_context.pair, # Ignore round trip times, to test ReadPreference modes only. - localThresholdMS=1000*1000) + localThresholdMS=1000 * 1000, + ) cls.client_version = Version.from_client(cls.c) # mapReduce fails if the collection does not exist. coll = cls.c.pymongo_test.get_collection( - 'test', write_concern=WriteConcern(w=client_context.w)) + "test", write_concern=WriteConcern(w=client_context.w) + ) coll.insert_one({}) @classmethod def tearDownClass(cls): - cls.c.drop_database('pymongo_test') + cls.c.drop_database("pymongo_test") cls.c.close() def executed_on_which_server(self, client, fn, *args, **kwargs): @@ -366,12 +344,13 @@ def executed_on_which_server(self, client, fn, *args, **kwargs): def assertExecutedOn(self, server_type, client, fn, *args, **kwargs): server = self.executed_on_which_server(client, fn, *args, **kwargs) - self.assertEqual(SERVER_TYPE._fields[server_type], - SERVER_TYPE._fields[server.description.server_type]) + self.assertEqual( + SERVER_TYPE._fields[server_type], SERVER_TYPE._fields[server.description.server_type] + ) def _test_fn(self, server_type, fn): for _ in range(10): - if server_type == 'any': + if server_type == "any": used = set() for _ in range(1000): server = self.executed_on_which_server(self.c, fn) @@ -381,13 +360,9 @@ def _test_fn(self, server_type, fn): break assert self.c.primary is not None - unused = self.c.secondaries.union( - set([self.c.primary]) - ).difference(used) + unused = self.c.secondaries.union(set([self.c.primary])).difference(used) if unused: - self.fail( - "Some members not used for NEAREST: %s" % ( - unused)) + self.fail("Some members not used for NEAREST: %s" % (unused)) else: self.assertExecutedOn(server_type, self.c, fn) @@ -408,8 +383,7 @@ def test_command(self): # Test that the generic command helper obeys the read preference # passed to it. for mode, server_type in _PREF_MAP: - func = lambda: self.c.pymongo_test.command('dbStats', - read_preference=mode()) + func = lambda: self.c.pymongo_test.command("dbStats", read_preference=mode()) self._test_fn(server_type, func) def test_create_collection(self): @@ -417,30 +391,33 @@ def test_create_collection(self): # the collection already exists. self._test_primary_helper( lambda: self.c.pymongo_test.create_collection( - 'some_collection%s' % random.randint(0, sys.maxsize))) + "some_collection%s" % random.randint(0, sys.maxsize) + ) + ) def test_count_documents(self): - self._test_coll_helper( - True, self.c.pymongo_test.test, 'count_documents', {}) + self._test_coll_helper(True, self.c.pymongo_test.test, "count_documents", {}) def test_estimated_document_count(self): - self._test_coll_helper( - True, self.c.pymongo_test.test, 'estimated_document_count') + self._test_coll_helper(True, self.c.pymongo_test.test, "estimated_document_count") def test_distinct(self): - self._test_coll_helper(True, self.c.pymongo_test.test, 'distinct', 'a') + self._test_coll_helper(True, self.c.pymongo_test.test, "distinct", "a") def test_aggregate(self): - self._test_coll_helper(True, self.c.pymongo_test.test, - 'aggregate', - [{'$project': {'_id': 1}}]) + self._test_coll_helper( + True, self.c.pymongo_test.test, "aggregate", [{"$project": {"_id": 1}}] + ) def test_aggregate_write(self): # 5.0 servers support $out on secondaries. secondary_ok = client_context.version.at_least(5, 0) - self._test_coll_helper(secondary_ok, self.c.pymongo_test.test, - 'aggregate', - [{'$project': {'_id': 1}}, {'$out': "agg_write_test"}]) + self._test_coll_helper( + secondary_ok, + self.c.pymongo_test.test, + "aggregate", + [{"$project": {"_id": 1}}, {"$out": "agg_write_test"}], + ) class TestMovingAverage(unittest.TestCase): @@ -456,77 +433,48 @@ def test_moving_average(self): class TestMongosAndReadPreference(IntegrationTest): - def test_read_preference_document(self): pref = Primary() - self.assertEqual( - pref.document, - {'mode': 'primary'}) + self.assertEqual(pref.document, {"mode": "primary"}) pref = PrimaryPreferred() + self.assertEqual(pref.document, {"mode": "primaryPreferred"}) + pref = PrimaryPreferred(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "primaryPreferred", "tags": [{"dc": "sf"}]}) + pref = PrimaryPreferred(tag_sets=[{"dc": "sf"}], max_staleness=30) self.assertEqual( pref.document, - {'mode': 'primaryPreferred'}) - pref = PrimaryPreferred(tag_sets=[{'dc': 'sf'}]) - self.assertEqual( - pref.document, - {'mode': 'primaryPreferred', 'tags': [{'dc': 'sf'}]}) - pref = PrimaryPreferred( - tag_sets=[{'dc': 'sf'}], max_staleness=30) - self.assertEqual( - pref.document, - {'mode': 'primaryPreferred', - 'tags': [{'dc': 'sf'}], - 'maxStalenessSeconds': 30}) + {"mode": "primaryPreferred", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30}, + ) pref = Secondary() + self.assertEqual(pref.document, {"mode": "secondary"}) + pref = Secondary(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "secondary", "tags": [{"dc": "sf"}]}) + pref = Secondary(tag_sets=[{"dc": "sf"}], max_staleness=30) self.assertEqual( - pref.document, - {'mode': 'secondary'}) - pref = Secondary(tag_sets=[{'dc': 'sf'}]) - self.assertEqual( - pref.document, - {'mode': 'secondary', 'tags': [{'dc': 'sf'}]}) - pref = Secondary( - tag_sets=[{'dc': 'sf'}], max_staleness=30) - self.assertEqual( - pref.document, - {'mode': 'secondary', - 'tags': [{'dc': 'sf'}], - 'maxStalenessSeconds': 30}) + pref.document, {"mode": "secondary", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30} + ) pref = SecondaryPreferred() + self.assertEqual(pref.document, {"mode": "secondaryPreferred"}) + pref = SecondaryPreferred(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "secondaryPreferred", "tags": [{"dc": "sf"}]}) + pref = SecondaryPreferred(tag_sets=[{"dc": "sf"}], max_staleness=30) self.assertEqual( pref.document, - {'mode': 'secondaryPreferred'}) - pref = SecondaryPreferred(tag_sets=[{'dc': 'sf'}]) - self.assertEqual( - pref.document, - {'mode': 'secondaryPreferred', 'tags': [{'dc': 'sf'}]}) - pref = SecondaryPreferred( - tag_sets=[{'dc': 'sf'}], max_staleness=30) - self.assertEqual( - pref.document, - {'mode': 'secondaryPreferred', - 'tags': [{'dc': 'sf'}], - 'maxStalenessSeconds': 30}) + {"mode": "secondaryPreferred", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30}, + ) pref = Nearest() + self.assertEqual(pref.document, {"mode": "nearest"}) + pref = Nearest(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "nearest", "tags": [{"dc": "sf"}]}) + pref = Nearest(tag_sets=[{"dc": "sf"}], max_staleness=30) self.assertEqual( - pref.document, - {'mode': 'nearest'}) - pref = Nearest(tag_sets=[{'dc': 'sf'}]) - self.assertEqual( - pref.document, - {'mode': 'nearest', 'tags': [{'dc': 'sf'}]}) - pref = Nearest( - tag_sets=[{'dc': 'sf'}], max_staleness=30) - self.assertEqual( - pref.document, - {'mode': 'nearest', - 'tags': [{'dc': 'sf'}], - 'maxStalenessSeconds': 30}) + pref.document, {"mode": "nearest", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30} + ) with self.assertRaises(TypeError): # Float is prohibited. @@ -540,72 +488,67 @@ def test_read_preference_document(self): def test_read_preference_document_hedge(self): cases = { - 'primaryPreferred': PrimaryPreferred, - 'secondary': Secondary, - 'secondaryPreferred': SecondaryPreferred, - 'nearest': Nearest, + "primaryPreferred": PrimaryPreferred, + "secondary": Secondary, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, } for mode, cls in cases.items(): with self.assertRaises(TypeError): cls(hedge=[]) # type: ignore pref = cls(hedge={}) - self.assertEqual(pref.document, {'mode': mode}) + self.assertEqual(pref.document, {"mode": mode}) out = _maybe_add_read_preference({}, pref) if cls == SecondaryPreferred: # SecondaryPreferred without hedge doesn't add $readPreference. self.assertEqual(out, {}) else: - self.assertEqual( - out, - SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) - hedge = {'enabled': True} + hedge = {"enabled": True} pref = cls(hedge=hedge) - self.assertEqual(pref.document, {'mode': mode, 'hedge': hedge}) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) - hedge = {'enabled': False} + hedge = {"enabled": False} pref = cls(hedge=hedge) - self.assertEqual(pref.document, {'mode': mode, 'hedge': hedge}) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) - hedge = {'enabled': False, 'extra': 'option'} + hedge = {"enabled": False, "extra": "option"} pref = cls(hedge=hedge) - self.assertEqual(pref.document, {'mode': mode, 'hedge': hedge}) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) def test_send_hedge(self): cases = { - 'primaryPreferred': PrimaryPreferred, - 'secondaryPreferred': SecondaryPreferred, - 'nearest': Nearest, + "primaryPreferred": PrimaryPreferred, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, } if client_context.supports_secondary_read_pref: - cases['secondary'] = Secondary + cases["secondary"] = Secondary listener = OvertCommandListener() client = rs_client(event_listeners=[listener]) self.addCleanup(client.close) - client.admin.command('ping') + client.admin.command("ping") for mode, cls in cases.items(): - pref = cls(hedge={'enabled': True}) - coll = client.test.get_collection('test', read_preference=pref) + pref = cls(hedge={"enabled": True}) + coll = client.test.get_collection("test", read_preference=pref) listener.reset() coll.find_one() - started = listener.results['started'] + started = listener.results["started"] self.assertEqual(len(started), 1, started) cmd = started[0].command if client_context.is_rs or client_context.is_mongos: - self.assertIn('$readPreference', cmd) - self.assertEqual(cmd['$readPreference'], pref.document) + self.assertIn("$readPreference", cmd) + self.assertEqual(cmd["$readPreference"], pref.document) else: - self.assertNotIn('$readPreference', cmd) + self.assertNotIn("$readPreference", cmd) def test_maybe_add_read_preference(self): @@ -615,72 +558,74 @@ def test_maybe_add_read_preference(self): pref = PrimaryPreferred() out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) - pref = PrimaryPreferred(tag_sets=[{'dc': 'nyc'}]) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = PrimaryPreferred(tag_sets=[{"dc": "nyc"}]) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) pref = Secondary() out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) - pref = Secondary(tag_sets=[{'dc': 'nyc'}]) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = Secondary(tag_sets=[{"dc": "nyc"}]) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) # SecondaryPreferred without tag_sets or max_staleness doesn't add # $readPreference pref = SecondaryPreferred() out = _maybe_add_read_preference({}, pref) self.assertEqual(out, {}) - pref = SecondaryPreferred(tag_sets=[{'dc': 'nyc'}]) + pref = SecondaryPreferred(tag_sets=[{"dc": "nyc"}]) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) pref = SecondaryPreferred(max_staleness=120) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) pref = Nearest() out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) - pref = Nearest(tag_sets=[{'dc': 'nyc'}]) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = Nearest(tag_sets=[{"dc": "nyc"}]) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) criteria = SON([("$query", {}), ("$orderby", SON([("_id", 1)]))]) pref = Nearest() out = _maybe_add_read_preference(criteria, pref) self.assertEqual( out, - SON([("$query", {}), - ("$orderby", SON([("_id", 1)])), - ("$readPreference", pref.document)])) - pref = Nearest(tag_sets=[{'dc': 'nyc'}]) + SON( + [ + ("$query", {}), + ("$orderby", SON([("_id", 1)])), + ("$readPreference", pref.document), + ] + ), + ) + pref = Nearest(tag_sets=[{"dc": "nyc"}]) out = _maybe_add_read_preference(criteria, pref) self.assertEqual( out, - SON([("$query", {}), - ("$orderby", SON([("_id", 1)])), - ("$readPreference", pref.document)])) + SON( + [ + ("$query", {}), + ("$orderby", SON([("_id", 1)])), + ("$readPreference", pref.document), + ] + ), + ) @client_context.require_mongos def test_mongos(self): res = client_context.client.config.shards.find_one() assert res is not None - shard = res['host'] - num_members = shard.count(',') + 1 + shard = res["host"] + num_members = shard.count(",") + 1 if num_members == 1: raise SkipTest("Need a replica set shard to test.") coll = client_context.client.pymongo_test.get_collection( - "test", - write_concern=WriteConcern(w=num_members)) + "test", write_concern=WriteConcern(w=num_members) + ) coll.drop() res = coll.insert_many([{} for _ in range(5)]) first_id = res.inserted_ids[0] @@ -688,11 +633,7 @@ def test_mongos(self): # Note - this isn't a perfect test since there's no way to # tell what shard member a query ran on. - for pref in (Primary(), - PrimaryPreferred(), - Secondary(), - SecondaryPreferred(), - Nearest()): + for pref in (Primary(), PrimaryPreferred(), Secondary(), SecondaryPreferred(), Nearest()): qcoll = coll.with_options(read_preference=pref) results = list(qcoll.find().sort([("_id", 1)])) self.assertEqual(first_id, results[0]["_id"]) @@ -705,12 +646,14 @@ def test_mongos(self): def test_mongos_max_staleness(self): # Sanity check that we're sending maxStalenessSeconds coll = client_context.client.pymongo_test.get_collection( - "test", read_preference=SecondaryPreferred(max_staleness=120)) + "test", read_preference=SecondaryPreferred(max_staleness=120) + ) # No error coll.find_one() coll = client_context.client.pymongo_test.get_collection( - "test", read_preference=SecondaryPreferred(max_staleness=10)) + "test", read_preference=SecondaryPreferred(max_staleness=10) + ) try: coll.find_one() except OperationFailure as exc: @@ -719,14 +662,14 @@ def test_mongos_max_staleness(self): self.fail("mongos accepted invalid staleness") coll = single_client( - readPreference='secondaryPreferred', - maxStalenessSeconds=120).pymongo_test.test + readPreference="secondaryPreferred", maxStalenessSeconds=120 + ).pymongo_test.test # No error coll.find_one() coll = single_client( - readPreference='secondaryPreferred', - maxStalenessSeconds=10).pymongo_test.test + readPreference="secondaryPreferred", maxStalenessSeconds=10 + ).pymongo_test.test try: coll.find_one() except OperationFailure as exc: @@ -734,5 +677,6 @@ def test_mongos_max_staleness(self): else: self.fail("mongos accepted invalid staleness") + if __name__ == "__main__": unittest.main() diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 13bc83a023..4dfc8f068c 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -21,33 +21,33 @@ sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import ( + EventListener, + TestCreator, + disable_replication, + enable_replication, + rs_or_single_client, +) +from test.utils_spec_runner import SpecRunner + from pymongo import DESCENDING -from pymongo.errors import (BulkWriteError, - ConfigurationError, - WTimeoutError, - WriteConcernError, - WriteError) +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + WriteConcernError, + WriteError, + WTimeoutError, +) from pymongo.mongo_client import MongoClient from pymongo.operations import IndexModel, InsertOne from pymongo.read_concern import ReadConcern from pymongo.write_concern import WriteConcern -from test import (client_context, - IntegrationTest, - unittest) -from test.utils import (EventListener, - disable_replication, - enable_replication, - rs_or_single_client, - TestCreator) -from test.utils_spec_runner import SpecRunner - -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'read_write_concern') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "read_write_concern") class TestReadWriteConcernSpec(IntegrationTest): - def test_omit_default_read_write_concern(self): listener = EventListener() # Client with default readConcern and writeConcern @@ -63,85 +63,87 @@ def test_omit_default_read_write_concern(self): def rename_and_drop(): # Ensure collection exists. collection.insert_one({}) - collection.rename('collection2') + collection.rename("collection2") client.pymongo_test.collection2.drop() def insert_command_default_write_concern(): collection.database.command( - 'insert', 'collection', documents=[{}], - write_concern=WriteConcern()) + "insert", "collection", documents=[{}], write_concern=WriteConcern() + ) ops = [ - ('aggregate', lambda: list(collection.aggregate([]))), - ('find', lambda: list(collection.find())), - ('insert_one', lambda: collection.insert_one({})), - ('update_one', - lambda: collection.update_one({}, {'$set': {'x': 1}})), - ('update_many', - lambda: collection.update_many({}, {'$set': {'x': 1}})), - ('delete_one', lambda: collection.delete_one({})), - ('delete_many', lambda: collection.delete_many({})), - ('bulk_write', lambda: collection.bulk_write([InsertOne({})])), - ('rename_and_drop', rename_and_drop), - ('command', insert_command_default_write_concern) + ("aggregate", lambda: list(collection.aggregate([]))), + ("find", lambda: list(collection.find())), + ("insert_one", lambda: collection.insert_one({})), + ("update_one", lambda: collection.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: collection.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: collection.delete_one({})), + ("delete_many", lambda: collection.delete_many({})), + ("bulk_write", lambda: collection.bulk_write([InsertOne({})])), + ("rename_and_drop", rename_and_drop), + ("command", insert_command_default_write_concern), ] for name, f in ops: listener.results.clear() f() - self.assertGreaterEqual(len(listener.results['started']), 1) - for i, event in enumerate(listener.results['started']): + self.assertGreaterEqual(len(listener.results["started"]), 1) + for i, event in enumerate(listener.results["started"]): self.assertNotIn( - 'readConcern', event.command, - "%s sent default readConcern with %s" % ( - name, event.command_name)) + "readConcern", + event.command, + "%s sent default readConcern with %s" % (name, event.command_name), + ) self.assertNotIn( - 'writeConcern', event.command, - "%s sent default writeConcern with %s" % ( - name, event.command_name)) + "writeConcern", + event.command, + "%s sent default writeConcern with %s" % (name, event.command_name), + ) def assertWriteOpsRaise(self, write_concern, expected_exception): wc = write_concern.document # Set socket timeout to avoid indefinite stalls - client = rs_or_single_client( - w=wc['w'], wTimeoutMS=wc['wtimeout'], socketTimeoutMS=30000) - db = client.get_database('pymongo_test') + client = rs_or_single_client(w=wc["w"], wTimeoutMS=wc["wtimeout"], socketTimeoutMS=30000) + db = client.get_database("pymongo_test") coll = db.test def insert_command(): coll.database.command( - 'insert', 'new_collection', documents=[{}], + "insert", + "new_collection", + documents=[{}], writeConcern=write_concern.document, - parse_write_concern_error=True) + parse_write_concern_error=True, + ) ops = [ - ('insert_one', lambda: coll.insert_one({})), - ('insert_many', lambda: coll.insert_many([{}, {}])), - ('update_one', lambda: coll.update_one({}, {'$set': {'x': 1}})), - ('update_many', lambda: coll.update_many({}, {'$set': {'x': 1}})), - ('delete_one', lambda: coll.delete_one({})), - ('delete_many', lambda: coll.delete_many({})), - ('bulk_write', lambda: coll.bulk_write([InsertOne({})])), - ('command', insert_command), - ('aggregate', lambda: coll.aggregate([{'$out': 'out'}])), + ("insert_one", lambda: coll.insert_one({})), + ("insert_many", lambda: coll.insert_many([{}, {}])), + ("update_one", lambda: coll.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: coll.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: coll.delete_one({})), + ("delete_many", lambda: coll.delete_many({})), + ("bulk_write", lambda: coll.bulk_write([InsertOne({})])), + ("command", insert_command), + ("aggregate", lambda: coll.aggregate([{"$out": "out"}])), # SERVER-46668 Delete all the documents in the collection to # workaround a hang in createIndexes. - ('delete_many', lambda: coll.delete_many({})), - ('create_index', lambda: coll.create_index([('a', DESCENDING)])), - ('create_indexes', lambda: coll.create_indexes([IndexModel('b')])), - ('drop_index', lambda: coll.drop_index([('a', DESCENDING)])), - ('create', lambda: db.create_collection('new')), - ('rename', lambda: coll.rename('new')), - ('drop', lambda: db.new.drop()), + ("delete_many", lambda: coll.delete_many({})), + ("create_index", lambda: coll.create_index([("a", DESCENDING)])), + ("create_indexes", lambda: coll.create_indexes([IndexModel("b")])), + ("drop_index", lambda: coll.drop_index([("a", DESCENDING)])), + ("create", lambda: db.create_collection("new")), + ("rename", lambda: coll.rename("new")), + ("drop", lambda: db.new.drop()), ] # SERVER-47194: dropDatabase does not respect wtimeout in 3.6. if client_context.version[:2] != (3, 6): - ops.append(('drop_database', lambda: client.drop_database(db))) + ops.append(("drop_database", lambda: client.drop_database(db))) for name, f in ops: # Ensure insert_many and bulk_write still raise BulkWriteError. - if name in ('insert_many', 'bulk_write'): + if name in ("insert_many", "bulk_write"): expected = BulkWriteError else: expected = expected_exception @@ -150,25 +152,25 @@ def insert_command(): if expected == BulkWriteError: bulk_result = cm.exception.details assert bulk_result is not None - wc_errors = bulk_result['writeConcernErrors'] + wc_errors = bulk_result["writeConcernErrors"] self.assertTrue(wc_errors) @client_context.require_replica_set def test_raise_write_concern_error(self): - self.addCleanup(client_context.client.drop_database, 'pymongo_test') + self.addCleanup(client_context.client.drop_database, "pymongo_test") assert client_context.w is not None self.assertWriteOpsRaise( - WriteConcern(w=client_context.w+1, wtimeout=1), WriteConcernError) + WriteConcern(w=client_context.w + 1, wtimeout=1), WriteConcernError + ) @client_context.require_secondaries_count(1) @client_context.require_test_commands def test_raise_wtimeout(self): - self.addCleanup(client_context.client.drop_database, 'pymongo_test') + self.addCleanup(client_context.client.drop_database, "pymongo_test") self.addCleanup(enable_replication, client_context.client) # Disable replication to guarantee a wtimeout error. disable_replication(client_context.client) - self.assertWriteOpsRaise(WriteConcern(w=client_context.w, wtimeout=1), - WTimeoutError) + self.assertWriteOpsRaise(WriteConcern(w=client_context.w, wtimeout=1), WTimeoutError) @client_context.require_failCommand_fail_point def test_error_includes_errInfo(self): @@ -176,21 +178,12 @@ def test_error_includes_errInfo(self): "code": 100, "codeName": "UnsatisfiableWriteConcern", "errmsg": "Not enough data-bearing nodes", - "errInfo": { - "writeConcern": { - "w": 2, - "wtimeout": 0, - "provenance": "clientSupplied" - } - } + "errInfo": {"writeConcern": {"w": 2, "wtimeout": 0, "provenance": "clientSupplied"}}, } cause_wce = { "configureFailPoint": "failCommand", "mode": {"times": 2}, - "data": { - "failCommands": ["insert"], - "writeConcernError": expected_wce - }, + "data": {"failCommands": ["insert"], "writeConcernError": expected_wce}, } with self.fail_point(cause_wce): # Write concern error on insert includes errInfo. @@ -202,10 +195,15 @@ def test_error_includes_errInfo(self): with self.assertRaises(BulkWriteError) as ctx: self.db.test.bulk_write([InsertOne({})]) expected_details = { - 'writeErrors': [], - 'writeConcernErrors': [expected_wce], - 'nInserted': 1, 'nUpserted': 0, 'nMatched': 0, 'nModified': 0, - 'nRemoved': 0, 'upserted': []} + "writeErrors": [], + "writeConcernErrors": [expected_wce], + "nInserted": 1, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nRemoved": 0, + "upserted": [], + } self.assertEqual(ctx.exception.details, expected_details) @client_context.require_version_min(4, 9) @@ -218,15 +216,14 @@ def test_write_error_details_exposes_errinfo(self): validator = {"x": {"$type": "string"}} db.create_collection("test", validator=validator) with self.assertRaises(WriteError) as ctx: - db.test.insert_one({'x': 1}) + db.test.insert_one({"x": 1}) self.assertEqual(ctx.exception.code, 121) self.assertIsNotNone(ctx.exception.details) assert ctx.exception.details is not None - self.assertIsNotNone(ctx.exception.details.get('errInfo')) - for event in listener.results['succeeded']: - if event.command_name == 'insert': - self.assertEqual( - event.reply['writeErrors'][0], ctx.exception.details) + self.assertIsNotNone(ctx.exception.details.get("errInfo")) + for event in listener.results["succeeded"]: + if event.command_name == "insert": + self.assertEqual(event.reply["writeErrors"][0], ctx.exception.details) break else: self.fail("Couldn't find insert event.") @@ -235,77 +232,58 @@ def test_write_error_details_exposes_errinfo(self): def normalize_write_concern(concern): result = {} for key in concern: - if key.lower() == 'wtimeoutms': - result['wtimeout'] = concern[key] - elif key == 'journal': - result['j'] = concern[key] + if key.lower() == "wtimeoutms": + result["wtimeout"] = concern[key] + elif key == "journal": + result["j"] = concern[key] else: result[key] = concern[key] return result def create_connection_string_test(test_case): - def run_test(self): - uri = test_case['uri'] - valid = test_case['valid'] - warning = test_case['warning'] + uri = test_case["uri"] + valid = test_case["valid"] + warning = test_case["warning"] if not valid: if warning is False: - self.assertRaises( - (ConfigurationError, ValueError), - MongoClient, - uri, - connect=False) + self.assertRaises((ConfigurationError, ValueError), MongoClient, uri, connect=False) else: with warnings.catch_warnings(): - warnings.simplefilter('error', UserWarning) - self.assertRaises( - UserWarning, - MongoClient, - uri, - connect=False) + warnings.simplefilter("error", UserWarning) + self.assertRaises(UserWarning, MongoClient, uri, connect=False) else: client = MongoClient(uri, connect=False) - if 'writeConcern' in test_case: + if "writeConcern" in test_case: document = client.write_concern.document - self.assertEqual( - document, - normalize_write_concern(test_case['writeConcern'])) - if 'readConcern' in test_case: + self.assertEqual(document, normalize_write_concern(test_case["writeConcern"])) + if "readConcern" in test_case: document = client.read_concern.document - self.assertEqual(document, test_case['readConcern']) + self.assertEqual(document, test_case["readConcern"]) return run_test def create_document_test(test_case): - def run_test(self): - valid = test_case['valid'] + valid = test_case["valid"] - if 'writeConcern' in test_case: - normalized = normalize_write_concern(test_case['writeConcern']) + if "writeConcern" in test_case: + normalized = normalize_write_concern(test_case["writeConcern"]) if not valid: - self.assertRaises( - (ConfigurationError, ValueError), - WriteConcern, - **normalized) + self.assertRaises((ConfigurationError, ValueError), WriteConcern, **normalized) else: write_concern = WriteConcern(**normalized) - self.assertEqual( - write_concern.document, test_case['writeConcernDocument']) - self.assertEqual( - write_concern.acknowledged, test_case['isAcknowledged']) - self.assertEqual( - write_concern.is_server_default, test_case['isServerDefault']) - if 'readConcern' in test_case: + self.assertEqual(write_concern.document, test_case["writeConcernDocument"]) + self.assertEqual(write_concern.acknowledged, test_case["isAcknowledged"]) + self.assertEqual(write_concern.is_server_default, test_case["isServerDefault"]) + if "readConcern" in test_case: # Any string for 'level' is equaly valid - read_concern = ReadConcern(**test_case['readConcern']) - self.assertEqual(read_concern.document, test_case['readConcernDocument']) - self.assertEqual( - not bool(read_concern.level), test_case['isServerDefault']) + read_concern = ReadConcern(**test_case["readConcern"]) + self.assertEqual(read_concern.document, test_case["readConcernDocument"]) + self.assertEqual(not bool(read_concern.level), test_case["isServerDefault"]) return run_test @@ -314,25 +292,26 @@ def create_tests(): for dirpath, _, filenames in os.walk(_TEST_PATH): dirname = os.path.split(dirpath)[-1] - if dirname == 'operation': + if dirname == "operation": # This directory is tested by TestOperations. continue - elif dirname == 'connection-string': + elif dirname == "connection-string": create_test = create_connection_string_test else: create_test = create_document_test for filename in filenames: with open(os.path.join(dirpath, filename)) as test_stream: - test_cases = json.load(test_stream)['tests'] + test_cases = json.load(test_stream)["tests"] fname = os.path.splitext(filename)[0] for test_case in test_cases: new_test = create_test(test_case) - test_name = 'test_%s_%s_%s' % ( - dirname.replace('-', '_'), - fname.replace('-', '_'), - str(test_case['description'].lower().replace(' ', '_'))) + test_name = "test_%s_%s_%s" % ( + dirname.replace("-", "_"), + fname.replace("-", "_"), + str(test_case["description"].lower().replace(" ", "_")), + ) new_test.__name__ = test_name setattr(TestReadWriteConcernSpec, new_test.__name__, new_test) @@ -343,11 +322,11 @@ def create_tests(): class TestOperation(SpecRunner): # Location of JSON test specifications. - TEST_PATH = os.path.join(_TEST_PATH, 'operation') + TEST_PATH = os.path.join(_TEST_PATH, "operation") def get_outcome_coll_name(self, outcome, collection): """Spec says outcome has an optional 'collection.name'.""" - return outcome['collection'].get('name', collection.name) + return outcome["collection"].get("name", collection.name) def create_operation_test(scenario_def, test, name): @@ -358,10 +337,9 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator( - create_operation_test, TestOperation, TestOperation.TEST_PATH) +test_creator = TestCreator(create_operation_test, TestOperation, TestOperation.TEST_PATH) test_creator.create_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_replica_set_reconfig.py b/test/test_replica_set_reconfig.py index f19a32ea4e..898be99d4d 100644 --- a/test/test_replica_set_reconfig.py +++ b/test/test_replica_set_reconfig.py @@ -18,12 +18,13 @@ sys.path[0:0] = [""] -from pymongo.errors import ConnectionFailure, ServerSelectionTimeoutError -from pymongo import ReadPreference -from test import unittest, client_context, client_knobs, MockClientTest +from test import MockClientTest, client_context, client_knobs, unittest from test.pymongo_mocks import MockClient from test.utils import wait_until +from pymongo import ReadPreference +from pymongo.errors import ConnectionFailure, ServerSelectionTimeoutError + @client_context.require_connection @client_context.require_no_load_balancer @@ -38,54 +39,53 @@ class TestSecondaryBecomesStandalone(MockClientTest): def test_client(self): c = MockClient( standalones=[], - members=['a:1', 'b:2', 'c:3'], + members=["a:1", "b:2", "c:3"], mongoses=[], - host='a:1,b:2,c:3', - replicaSet='rs', + host="a:1,b:2,c:3", + replicaSet="rs", serverSelectionTimeoutMS=100, - connect=False) + connect=False, + ) self.addCleanup(c.close) # C is brought up as a standalone. - c.mock_members.remove('c:3') - c.mock_standalones.append('c:3') + c.mock_members.remove("c:3") + c.mock_standalones.append("c:3") # Fail over. - c.kill_host('a:1') - c.kill_host('b:2') + c.kill_host("a:1") + c.kill_host("b:2") with self.assertRaises(ServerSelectionTimeoutError): - c.db.command('ping') + c.db.command("ping") self.assertEqual(c.address, None) # Client can still discover the primary node - c.revive_host('a:1') - wait_until(lambda: c.address is not None, 'connect to primary') - self.assertEqual(c.address, ('a', 1)) + c.revive_host("a:1") + wait_until(lambda: c.address is not None, "connect to primary") + self.assertEqual(c.address, ("a", 1)) def test_replica_set_client(self): c = MockClient( standalones=[], - members=['a:1', 'b:2', 'c:3'], + members=["a:1", "b:2", "c:3"], mongoses=[], - host='a:1,b:2,c:3', - replicaSet='rs') + host="a:1,b:2,c:3", + replicaSet="rs", + ) self.addCleanup(c.close) - wait_until(lambda: ('b', 2) in c.secondaries, - 'discover host "b"') + wait_until(lambda: ("b", 2) in c.secondaries, 'discover host "b"') - wait_until(lambda: ('c', 3) in c.secondaries, - 'discover host "c"') + wait_until(lambda: ("c", 3) in c.secondaries, 'discover host "c"') # C is brought up as a standalone. - c.mock_members.remove('c:3') - c.mock_standalones.append('c:3') + c.mock_members.remove("c:3") + c.mock_standalones.append("c:3") - wait_until(lambda: set([('b', 2)]) == c.secondaries, - 'update the list of secondaries') + wait_until(lambda: set([("b", 2)]) == c.secondaries, "update the list of secondaries") - self.assertEqual(('a', 1), c.primary) + self.assertEqual(("a", 1), c.primary) class TestSecondaryRemoved(MockClientTest): @@ -94,21 +94,21 @@ class TestSecondaryRemoved(MockClientTest): def test_replica_set_client(self): c = MockClient( standalones=[], - members=['a:1', 'b:2', 'c:3'], + members=["a:1", "b:2", "c:3"], mongoses=[], - host='a:1,b:2,c:3', - replicaSet='rs') + host="a:1,b:2,c:3", + replicaSet="rs", + ) self.addCleanup(c.close) - wait_until(lambda: ('b', 2) in c.secondaries, 'discover host "b"') - wait_until(lambda: ('c', 3) in c.secondaries, 'discover host "c"') + wait_until(lambda: ("b", 2) in c.secondaries, 'discover host "b"') + wait_until(lambda: ("c", 3) in c.secondaries, 'discover host "c"') # C is removed. - c.mock_hello_hosts.remove('c:3') - wait_until(lambda: set([('b', 2)]) == c.secondaries, - 'update list of secondaries') + c.mock_hello_hosts.remove("c:3") + wait_until(lambda: set([("b", 2)]) == c.secondaries, "update list of secondaries") - self.assertEqual(('a', 1), c.primary) + self.assertEqual(("a", 1), c.primary) class TestSocketError(MockClientTest): @@ -117,21 +117,22 @@ def test_socket_error_marks_member_down(self): with client_knobs(heartbeat_frequency=999999): c = MockClient( standalones=[], - members=['a:1', 'b:2'], + members=["a:1", "b:2"], mongoses=[], - host='a:1', - replicaSet='rs', - serverSelectionTimeoutMS=100) + host="a:1", + replicaSet="rs", + serverSelectionTimeoutMS=100, + ) self.addCleanup(c.close) - wait_until(lambda: len(c.nodes) == 2, 'discover both nodes') + wait_until(lambda: len(c.nodes) == 2, "discover both nodes") # b now raises socket.error. - c.mock_down_hosts.append('b:2') + c.mock_down_hosts.append("b:2") self.assertRaises( ConnectionFailure, - c.db.collection.with_options( - read_preference=ReadPreference.SECONDARY).find_one) + c.db.collection.with_options(read_preference=ReadPreference.SECONDARY).find_one, + ) self.assertEqual(1, len(c.nodes)) @@ -139,51 +140,44 @@ def test_socket_error_marks_member_down(self): class TestSecondaryAdded(MockClientTest): def test_client(self): c = MockClient( - standalones=[], - members=['a:1', 'b:2'], - mongoses=[], - host='a:1', - replicaSet='rs') + standalones=[], members=["a:1", "b:2"], mongoses=[], host="a:1", replicaSet="rs" + ) self.addCleanup(c.close) - wait_until(lambda: len(c.nodes) == 2, 'discover both nodes') + wait_until(lambda: len(c.nodes) == 2, "discover both nodes") # MongoClient connects to primary by default. - self.assertEqual(c.address, ('a', 1)) - self.assertEqual(set([('a', 1), ('b', 2)]), c.nodes) + self.assertEqual(c.address, ("a", 1)) + self.assertEqual(set([("a", 1), ("b", 2)]), c.nodes) # C is added. - c.mock_members.append('c:3') - c.mock_hello_hosts.append('c:3') + c.mock_members.append("c:3") + c.mock_hello_hosts.append("c:3") - c.db.command('ping') + c.db.command("ping") - self.assertEqual(c.address, ('a', 1)) + self.assertEqual(c.address, ("a", 1)) - wait_until(lambda: set([('a', 1), ('b', 2), ('c', 3)]) == c.nodes, - 'reconnect to both secondaries') + wait_until( + lambda: set([("a", 1), ("b", 2), ("c", 3)]) == c.nodes, "reconnect to both secondaries" + ) def test_replica_set_client(self): c = MockClient( - standalones=[], - members=['a:1', 'b:2'], - mongoses=[], - host='a:1', - replicaSet='rs') + standalones=[], members=["a:1", "b:2"], mongoses=[], host="a:1", replicaSet="rs" + ) self.addCleanup(c.close) - wait_until(lambda: ('a', 1) == c.primary, 'discover the primary') - wait_until(lambda: set([('b', 2)]) == c.secondaries, - 'discover the secondary') + wait_until(lambda: ("a", 1) == c.primary, "discover the primary") + wait_until(lambda: set([("b", 2)]) == c.secondaries, "discover the secondary") # C is added. - c.mock_members.append('c:3') - c.mock_hello_hosts.append('c:3') + c.mock_members.append("c:3") + c.mock_hello_hosts.append("c:3") - wait_until(lambda: set([('b', 2), ('c', 3)]) == c.secondaries, - 'discover the new secondary') + wait_until(lambda: set([("b", 2), ("c", 3)]) == c.secondaries, "discover the new secondary") - self.assertEqual(('a', 1), c.primary) + self.assertEqual(("a", 1), c.primary) if __name__ == "__main__": diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index c4c093f66f..808477a8c0 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -21,28 +21,32 @@ sys.path[0:0] = [""] -from pymongo.mongo_client import MongoClient -from pymongo.monitoring import (ConnectionCheckedOutEvent, - ConnectionCheckOutFailedEvent, - ConnectionCheckOutFailedReason, - PoolClearedEvent) -from pymongo.write_concern import WriteConcern - -from test import (client_context, - client_knobs, - IntegrationTest, - PyMongoTestCase, - unittest) -from test.utils import (CMAPListener, - OvertCommandListener, - rs_or_single_client, - TestCreator) +from test import ( + IntegrationTest, + PyMongoTestCase, + client_context, + client_knobs, + unittest, +) +from test.utils import ( + CMAPListener, + OvertCommandListener, + TestCreator, + rs_or_single_client, +) from test.utils_spec_runner import SpecRunner +from pymongo.mongo_client import MongoClient +from pymongo.monitoring import ( + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + PoolClearedEvent, +) +from pymongo.write_concern import WriteConcern # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'retryable_reads') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_reads") class TestClientOptions(PyMongoTestCase): @@ -57,9 +61,9 @@ def test_kwargs(self): self.assertEqual(client.options.retry_reads, False) def test_uri(self): - client = MongoClient('mongodb://h/?retryReads=true', connect=False) + client = MongoClient("mongodb://h/?retryReads=true", connect=False) self.assertEqual(client.options.retry_reads, True) - client = MongoClient('mongodb://h/?retryReads=false', connect=False) + client = MongoClient("mongodb://h/?retryReads=false", connect=False) self.assertEqual(client.options.retry_reads, False) @@ -76,51 +80,49 @@ def setUpClass(cls): def maybe_skip_scenario(self, test): super(TestSpec, self).maybe_skip_scenario(test) - skip_names = [ - 'listCollectionObjects', 'listIndexNames', 'listDatabaseObjects'] + skip_names = ["listCollectionObjects", "listIndexNames", "listDatabaseObjects"] for name in skip_names: - if name.lower() in test['description'].lower(): - self.skipTest('PyMongo does not support %s' % (name,)) + if name.lower() in test["description"].lower(): + self.skipTest("PyMongo does not support %s" % (name,)) # Serverless does not support $out and collation. if client_context.serverless: - for operation in test['operations']: - if operation['name'] == 'aggregate': - for stage in operation['arguments']['pipeline']: + for operation in test["operations"]: + if operation["name"] == "aggregate": + for stage in operation["arguments"]["pipeline"]: if "$out" in stage: - self.skipTest( - "MongoDB Serverless does not support $out") - if "collation" in operation['arguments']: - self.skipTest( - "MongoDB Serverless does not support collations") + self.skipTest("MongoDB Serverless does not support $out") + if "collation" in operation["arguments"]: + self.skipTest("MongoDB Serverless does not support collations") # Skip changeStream related tests on MMAPv1 and serverless. - test_name = self.id().rsplit('.')[-1] - if 'changestream' in test_name.lower(): - if client_context.storage_engine == 'mmapv1': + test_name = self.id().rsplit(".")[-1] + if "changestream" in test_name.lower(): + if client_context.storage_engine == "mmapv1": self.skipTest("MMAPv1 does not support change streams.") if client_context.serverless: self.skipTest("Serverless does not support change streams.") def get_scenario_coll_name(self, scenario_def): """Override a test's collection name to support GridFS tests.""" - if 'bucket_name' in scenario_def: - return scenario_def['bucket_name'] + if "bucket_name" in scenario_def: + return scenario_def["bucket_name"] return super(TestSpec, self).get_scenario_coll_name(scenario_def) def setup_scenario(self, scenario_def): """Override a test's setup to support GridFS tests.""" - if 'bucket_name' in scenario_def: + if "bucket_name" in scenario_def: db_name = self.get_scenario_db_name(scenario_def) db = client_context.client.get_database( - db_name, write_concern=WriteConcern(w='majority')) + db_name, write_concern=WriteConcern(w="majority") + ) # Create a bucket for the retryable reads GridFS tests. client_context.client.drop_database(db_name) - if scenario_def['data']: - data = scenario_def['data'] + if scenario_def["data"]: + data = scenario_def["data"] # Load data. - db['fs.chunks'].insert_many(data['fs.chunks']) - db['fs.files'].insert_many(data['fs.files']) + db["fs.chunks"].insert_many(data["fs.chunks"]) + db["fs.files"].insert_many(data["fs.files"]) else: super(TestSpec, self).setup_scenario(scenario_def) @@ -155,25 +157,23 @@ class TestPoolPausedError(IntegrationTest): RUN_ON_SERVERLESS = False @client_context.require_failCommand_blockConnection - @client_knobs(heartbeat_frequency=.05, min_heartbeat_interval=.05) + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) def test_pool_paused_error_is_retryable(self): cmap_listener = CMAPListener() cmd_listener = OvertCommandListener() - client = rs_or_single_client( - maxPoolSize=1, - event_listeners=[cmap_listener, cmd_listener]) + client = rs_or_single_client(maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener]) self.addCleanup(client.close) for _ in range(10): cmap_listener.reset() cmd_listener.reset() threads = [FindThread(client.pymongo_test.test) for _ in range(2)] fail_command = { - 'mode': {'times': 1}, - 'data': { - 'failCommands': ['find'], - 'blockConnection': True, - 'blockTimeMS': 1000, - 'errorCode': 91, + "mode": {"times": 1}, + "data": { + "failCommands": ["find"], + "blockConnection": True, + "blockTimeMS": 1000, + "errorCode": 91, }, } with self.fail_point(fail_command): @@ -192,29 +192,25 @@ def test_pool_paused_error_is_retryable(self): break # Via CMAP monitoring, assert that the first check out succeeds. - cmap_events = cmap_listener.events_by_type(( - ConnectionCheckedOutEvent, - ConnectionCheckOutFailedEvent, - PoolClearedEvent)) + cmap_events = cmap_listener.events_by_type( + (ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, PoolClearedEvent) + ) msg = pprint.pformat(cmap_listener.events) self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) - self.assertIsInstance( - cmap_events[2], ConnectionCheckOutFailedEvent, msg) - self.assertEqual(cmap_events[2].reason, - ConnectionCheckOutFailedReason.CONN_ERROR, - msg) + self.assertIsInstance(cmap_events[2], ConnectionCheckOutFailedEvent, msg) + self.assertEqual(cmap_events[2].reason, ConnectionCheckOutFailedReason.CONN_ERROR, msg) self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) # Connection check out failures are not reflected in command # monitoring because we only publish command events _after_ checking # out a connection. - started = cmd_listener.results['started'] + started = cmd_listener.results["started"] msg = pprint.pformat(cmd_listener.results) self.assertEqual(3, len(started), msg) - succeeded = cmd_listener.results['succeeded'] + succeeded = cmd_listener.results["succeeded"] self.assertEqual(2, len(succeeded), msg) - failed = cmd_listener.results['failed'] + failed = cmd_listener.results["failed"] self.assertEqual(1, len(failed), msg) diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index c4a401428a..0eb863f4cf 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -22,45 +22,46 @@ sys.path[0:0] = [""] +from test import IntegrationTest, SkipTest, client_context, client_knobs, unittest +from test.utils import ( + CMAPListener, + DeprecationFilter, + OvertCommandListener, + TestCreator, + rs_or_single_client, +) +from test.utils_spec_runner import SpecRunner +from test.version import Version + from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.int64 import Int64 from bson.raw_bson import RawBSONDocument from bson.son import SON - - -from pymongo.errors import (ConnectionFailure, - OperationFailure, - ServerSelectionTimeoutError, - WriteConcernError) +from pymongo.errors import ( + ConnectionFailure, + OperationFailure, + ServerSelectionTimeoutError, + WriteConcernError, +) from pymongo.mongo_client import MongoClient -from pymongo.monitoring import (ConnectionCheckedOutEvent, - ConnectionCheckOutFailedEvent, - ConnectionCheckOutFailedReason, - PoolClearedEvent) -from pymongo.operations import (InsertOne, - DeleteMany, - DeleteOne, - ReplaceOne, - UpdateMany, - UpdateOne) +from pymongo.monitoring import ( + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + PoolClearedEvent, +) +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, +) from pymongo.write_concern import WriteConcern -from test import (client_context, - client_knobs, - IntegrationTest, - SkipTest, - unittest) -from test.utils import (CMAPListener, - DeprecationFilter, - OvertCommandListener, - rs_or_single_client, - TestCreator) -from test.utils_spec_runner import SpecRunner -from test.version import Version - # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'retryable_writes', 'legacy') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_writes", "legacy") class TestAllScenarios(SpecRunner): @@ -68,23 +69,23 @@ class TestAllScenarios(SpecRunner): RUN_ON_SERVERLESS = True def get_object_name(self, op): - return op.get('object', 'collection') + return op.get("object", "collection") def get_scenario_db_name(self, scenario_def): - return scenario_def.get('database_name', 'pymongo_test') + return scenario_def.get("database_name", "pymongo_test") def get_scenario_coll_name(self, scenario_def): - return scenario_def.get('collection_name', 'test') + return scenario_def.get("collection_name", "test") def run_test_ops(self, sessions, collection, test): # Transform retryable writes spec format into transactions. - operation = test['operation'] - outcome = test['outcome'] - if 'error' in outcome: - operation['error'] = outcome['error'] - if 'result' in outcome: - operation['result'] = outcome['result'] - test['operations'] = [operation] + operation = test["operation"] + outcome = test["outcome"] + if "error" in outcome: + operation["error"] = outcome["error"] + if "result" in outcome: + operation["result"] = outcome["result"] + test["operations"] = [operation] super(TestAllScenarios, self).run_test_ops(sessions, collection, test) @@ -96,6 +97,7 @@ def run_scenario(self): return run_scenario + test_creator = TestCreator(create_test, TestAllScenarios, _TEST_PATH) test_creator.create_tests() @@ -103,31 +105,36 @@ def run_scenario(self): def retryable_single_statement_ops(coll): return [ (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {}), - (coll.bulk_write, [[InsertOne({}), - InsertOne({})]], {'ordered': False}), + (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {"ordered": False}), (coll.bulk_write, [[ReplaceOne({}, {})]], {}), (coll.bulk_write, [[ReplaceOne({}, {}), ReplaceOne({}, {})]], {}), - (coll.bulk_write, [[UpdateOne({}, {'$set': {'a': 1}}), - UpdateOne({}, {'$set': {'a': 1}})]], {}), + ( + coll.bulk_write, + [[UpdateOne({}, {"$set": {"a": 1}}), UpdateOne({}, {"$set": {"a": 1}})]], + {}, + ), (coll.bulk_write, [[DeleteOne({})]], {}), (coll.bulk_write, [[DeleteOne({}), DeleteOne({})]], {}), (coll.insert_one, [{}], {}), (coll.insert_many, [[{}, {}]], {}), (coll.replace_one, [{}, {}], {}), - (coll.update_one, [{}, {'$set': {'a': 1}}], {}), + (coll.update_one, [{}, {"$set": {"a": 1}}], {}), (coll.delete_one, [{}], {}), - (coll.find_one_and_replace, [{}, {'a': 3}], {}), - (coll.find_one_and_update, [{}, {'$set': {'a': 1}}], {}), + (coll.find_one_and_replace, [{}, {"a": 3}], {}), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}], {}), (coll.find_one_and_delete, [{}, {}], {}), ] def non_retryable_single_statement_ops(coll): return [ - (coll.bulk_write, [[UpdateOne({}, {'$set': {'a': 1}}), - UpdateMany({}, {'$set': {'a': 1}})]], {}), + ( + coll.bulk_write, + [[UpdateOne({}, {"$set": {"a": 1}}), UpdateMany({}, {"$set": {"a": 1}})]], + {}, + ), (coll.bulk_write, [[DeleteOne({}), DeleteMany({})]], {}), - (coll.update_many, [{}, {'$set': {'a': 1}}], {}), + (coll.update_many, [{}, {"$set": {"a": 1}}], {}), (coll.delete_many, [{}], {}), ] @@ -155,8 +162,7 @@ class TestRetryableWritesMMAPv1(IgnoreDeprecationsTest): def setUpClass(cls): super(TestRetryableWritesMMAPv1, cls).setUpClass() # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(heartbeat_frequency=0.1, - min_heartbeat_interval=0.1) + cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() cls.client = rs_or_single_client(retryWrites=True) cls.db = cls.client.pymongo_test @@ -169,14 +175,15 @@ def tearDownClass(cls): @client_context.require_no_standalone def test_actionable_error_message(self): - if client_context.storage_engine != 'mmapv1': - raise SkipTest('This cluster is not running MMAPv1') - - expected_msg = ("This MongoDB deployment does not support retryable " - "writes. Please add retryWrites=false to your " - "connection string.") - for method, args, kwargs in retryable_single_statement_ops( - self.db.retryable_write_test): + if client_context.storage_engine != "mmapv1": + raise SkipTest("This cluster is not running MMAPv1") + + expected_msg = ( + "This MongoDB deployment does not support retryable " + "writes. Please add retryWrites=false to your " + "connection string." + ) + for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): with self.assertRaisesRegex(OperationFailure, expected_msg): method(*args, **kwargs) @@ -190,12 +197,10 @@ class TestRetryableWrites(IgnoreDeprecationsTest): def setUpClass(cls): super(TestRetryableWrites, cls).setUpClass() # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(heartbeat_frequency=0.1, - min_heartbeat_interval=0.1) + cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() cls.listener = OvertCommandListener() - cls.client = rs_or_single_client( - retryWrites=True, event_listeners=[cls.listener]) + cls.client = rs_or_single_client(retryWrites=True, event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test @classmethod @@ -206,117 +211,123 @@ def tearDownClass(cls): def setUp(self): if client_context.is_rs and client_context.test_commands_enabled: - self.client.admin.command(SON([ - ('configureFailPoint', 'onPrimaryTransactionalWrite'), - ('mode', 'alwaysOn')])) + self.client.admin.command( + SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "alwaysOn")]) + ) def tearDown(self): if client_context.is_rs and client_context.test_commands_enabled: - self.client.admin.command(SON([ - ('configureFailPoint', 'onPrimaryTransactionalWrite'), - ('mode', 'off')])) + self.client.admin.command( + SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "off")]) + ) def test_supported_single_statement_no_retry(self): listener = OvertCommandListener() - client = rs_or_single_client( - retryWrites=False, event_listeners=[listener]) + client = rs_or_single_client(retryWrites=False, event_listeners=[listener]) self.addCleanup(client.close) - for method, args, kwargs in retryable_single_statement_ops( - client.db.retryable_write_test): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) listener.results.clear() method(*args, **kwargs) - for event in listener.results['started']: + for event in listener.results["started"]: self.assertNotIn( - 'txnNumber', event.command, - '%s sent txnNumber with %s' % (msg, event.command_name)) + "txnNumber", + event.command, + "%s sent txnNumber with %s" % (msg, event.command_name), + ) @client_context.require_no_standalone def test_supported_single_statement_supported_cluster(self): - for method, args, kwargs in retryable_single_statement_ops( - self.db.retryable_write_test): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) + for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): + msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) self.listener.results.clear() method(*args, **kwargs) - commands_started = self.listener.results['started'] - self.assertEqual(len(self.listener.results['succeeded']), 1, msg) + commands_started = self.listener.results["started"] + self.assertEqual(len(self.listener.results["succeeded"]), 1, msg) first_attempt = commands_started[0] self.assertIn( - 'lsid', first_attempt.command, - '%s sent no lsid with %s' % (msg, first_attempt.command_name)) - initial_session_id = first_attempt.command['lsid'] + "lsid", + first_attempt.command, + "%s sent no lsid with %s" % (msg, first_attempt.command_name), + ) + initial_session_id = first_attempt.command["lsid"] self.assertIn( - 'txnNumber', first_attempt.command, - '%s sent no txnNumber with %s' % ( - msg, first_attempt.command_name)) + "txnNumber", + first_attempt.command, + "%s sent no txnNumber with %s" % (msg, first_attempt.command_name), + ) # There should be no retry when the failpoint is not active. - if (client_context.is_mongos or - not client_context.test_commands_enabled): + if client_context.is_mongos or not client_context.test_commands_enabled: self.assertEqual(len(commands_started), 1) continue - initial_transaction_id = first_attempt.command['txnNumber'] + initial_transaction_id = first_attempt.command["txnNumber"] retry_attempt = commands_started[1] self.assertIn( - 'lsid', retry_attempt.command, - '%s sent no lsid with %s' % (msg, first_attempt.command_name)) - self.assertEqual( - retry_attempt.command['lsid'], initial_session_id, msg) + "lsid", + retry_attempt.command, + "%s sent no lsid with %s" % (msg, first_attempt.command_name), + ) + self.assertEqual(retry_attempt.command["lsid"], initial_session_id, msg) self.assertIn( - 'txnNumber', retry_attempt.command, - '%s sent no txnNumber with %s' % ( - msg, first_attempt.command_name)) - self.assertEqual(retry_attempt.command['txnNumber'], - initial_transaction_id, msg) + "txnNumber", + retry_attempt.command, + "%s sent no txnNumber with %s" % (msg, first_attempt.command_name), + ) + self.assertEqual(retry_attempt.command["txnNumber"], initial_transaction_id, msg) def test_supported_single_statement_unsupported_cluster(self): if client_context.is_rs or client_context.is_mongos: - raise SkipTest('This cluster supports retryable writes') + raise SkipTest("This cluster supports retryable writes") - for method, args, kwargs in retryable_single_statement_ops( - self.db.retryable_write_test): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) + for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): + msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) self.listener.results.clear() method(*args, **kwargs) - for event in self.listener.results['started']: + for event in self.listener.results["started"]: self.assertNotIn( - 'txnNumber', event.command, - '%s sent txnNumber with %s' % (msg, event.command_name)) + "txnNumber", + event.command, + "%s sent txnNumber with %s" % (msg, event.command_name), + ) def test_unsupported_single_statement(self): coll = self.db.retryable_write_test coll.insert_many([{}, {}]) coll_w0 = coll.with_options(write_concern=WriteConcern(w=0)) - for method, args, kwargs in (non_retryable_single_statement_ops(coll) + - retryable_single_statement_ops(coll_w0)): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) + for method, args, kwargs in non_retryable_single_statement_ops( + coll + ) + retryable_single_statement_ops(coll_w0): + msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) self.listener.results.clear() method(*args, **kwargs) - started_events = self.listener.results['started'] - self.assertEqual(len(self.listener.results['succeeded']), - len(started_events), msg) - self.assertEqual(len(self.listener.results['failed']), 0, msg) + started_events = self.listener.results["started"] + self.assertEqual(len(self.listener.results["succeeded"]), len(started_events), msg) + self.assertEqual(len(self.listener.results["failed"]), 0, msg) for event in started_events: self.assertNotIn( - 'txnNumber', event.command, - '%s sent txnNumber with %s' % (msg, event.command_name)) + "txnNumber", + event.command, + "%s sent txnNumber with %s" % (msg, event.command_name), + ) def test_server_selection_timeout_not_retried(self): """A ServerSelectionTimeoutError is not retried.""" listener = OvertCommandListener() client = MongoClient( - 'somedomainthatdoesntexist.org', + "somedomainthatdoesntexist.org", serverSelectionTimeoutMS=1, - retryWrites=True, event_listeners=[listener]) - for method, args, kwargs in retryable_single_statement_ops( - client.db.retryable_write_test): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) + retryWrites=True, + event_listeners=[listener], + ) + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) listener.results.clear() with self.assertRaises(ServerSelectionTimeoutError, msg=msg): method(*args, **kwargs) - self.assertEqual(len(listener.results['started']), 0, msg) + self.assertEqual(len(listener.results["started"]), 0, msg) @client_context.require_replica_set @client_context.require_test_commands @@ -325,8 +336,7 @@ def test_retry_timeout_raises_original_error(self): original error. """ listener = OvertCommandListener() - client = rs_or_single_client( - retryWrites=True, event_listeners=[listener]) + client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) self.addCleanup(client.close) topology = client._topology select_server = topology.select_server @@ -335,43 +345,44 @@ def mock_select_server(*args, **kwargs): server = select_server(*args, **kwargs) def raise_error(*args, **kwargs): - raise ServerSelectionTimeoutError( - 'No primary available for writes') + raise ServerSelectionTimeoutError("No primary available for writes") + # Raise ServerSelectionTimeout on the retry attempt. topology.select_server = raise_error return server - for method, args, kwargs in retryable_single_statement_ops( - client.db.retryable_write_test): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) listener.results.clear() topology.select_server = mock_select_server with self.assertRaises(ConnectionFailure, msg=msg): method(*args, **kwargs) - self.assertEqual(len(listener.results['started']), 1, msg) + self.assertEqual(len(listener.results["started"]), 1, msg) @client_context.require_replica_set @client_context.require_test_commands def test_batch_splitting(self): """Test retry succeeds after failures during batch splitting.""" - large = 's' * 1024 * 1024 * 15 + large = "s" * 1024 * 1024 * 15 coll = self.db.retryable_write_test coll.delete_many({}) self.listener.results.clear() - bulk_result = coll.bulk_write([ - InsertOne({'_id': 1, 'l': large}), - InsertOne({'_id': 2, 'l': large}), - InsertOne({'_id': 3, 'l': large}), - UpdateOne({'_id': 1, 'l': large}, - {'$unset': {'l': 1}, '$inc': {'count': 1}}), - UpdateOne({'_id': 2, 'l': large}, {'$set': {'foo': 'bar'}}), - DeleteOne({'l': large}), - DeleteOne({'l': large})]) + bulk_result = coll.bulk_write( + [ + InsertOne({"_id": 1, "l": large}), + InsertOne({"_id": 2, "l": large}), + InsertOne({"_id": 3, "l": large}), + UpdateOne({"_id": 1, "l": large}, {"$unset": {"l": 1}, "$inc": {"count": 1}}), + UpdateOne({"_id": 2, "l": large}, {"$set": {"foo": "bar"}}), + DeleteOne({"l": large}), + DeleteOne({"l": large}), + ] + ) # Each command should fail and be retried. # With OP_MSG 3 inserts are one batch. 2 updates another. # 2 deletes a third. - self.assertEqual(len(self.listener.results['started']), 6) - self.assertEqual(coll.find_one(), {'_id': 1, 'count': 1}) + self.assertEqual(len(self.listener.results["started"]), 6) + self.assertEqual(coll.find_one(), {"_id": 1, "count": 1}) # Assert the final result expected_result = { "writeErrors": [], @@ -389,42 +400,51 @@ def test_batch_splitting(self): @client_context.require_test_commands def test_batch_splitting_retry_fails(self): """Test retry fails during batch splitting.""" - large = 's' * 1024 * 1024 * 15 + large = "s" * 1024 * 1024 * 15 coll = self.db.retryable_write_test coll.delete_many({}) - self.client.admin.command(SON([ - ('configureFailPoint', 'onPrimaryTransactionalWrite'), - ('mode', {'skip': 3}), # The number of _documents_ to skip. - ('data', {'failBeforeCommitExceptionCode': 1})])) + self.client.admin.command( + SON( + [ + ("configureFailPoint", "onPrimaryTransactionalWrite"), + ("mode", {"skip": 3}), # The number of _documents_ to skip. + ("data", {"failBeforeCommitExceptionCode": 1}), + ] + ) + ) self.listener.results.clear() with self.client.start_session() as session: initial_txn = session._server_session._transaction_id try: - coll.bulk_write([InsertOne({'_id': 1, 'l': large}), - InsertOne({'_id': 2, 'l': large}), - InsertOne({'_id': 3, 'l': large}), - InsertOne({'_id': 4, 'l': large})], - session=session) + coll.bulk_write( + [ + InsertOne({"_id": 1, "l": large}), + InsertOne({"_id": 2, "l": large}), + InsertOne({"_id": 3, "l": large}), + InsertOne({"_id": 4, "l": large}), + ], + session=session, + ) except ConnectionFailure: pass else: self.fail("bulk_write should have failed") - started = self.listener.results['started'] + started = self.listener.results["started"] self.assertEqual(len(started), 3) - self.assertEqual(len(self.listener.results['succeeded']), 1) + self.assertEqual(len(self.listener.results["succeeded"]), 1) expected_txn = Int64(initial_txn + 1) - self.assertEqual(started[0].command['txnNumber'], expected_txn) - self.assertEqual(started[0].command['lsid'], session.session_id) + self.assertEqual(started[0].command["txnNumber"], expected_txn) + self.assertEqual(started[0].command["lsid"], session.session_id) expected_txn = Int64(initial_txn + 2) - self.assertEqual(started[1].command['txnNumber'], expected_txn) - self.assertEqual(started[1].command['lsid'], session.session_id) - started[1].command.pop('$clusterTime') - started[2].command.pop('$clusterTime') + self.assertEqual(started[1].command["txnNumber"], expected_txn) + self.assertEqual(started[1].command["lsid"], session.session_id) + started[1].command.pop("$clusterTime") + started[2].command.pop("$clusterTime") self.assertEqual(started[1].command, started[2].command) final_txn = session._server_session._transaction_id self.assertEqual(final_txn, expected_txn) - self.assertEqual(coll.find_one(projection={'_id': True}), {'_id': 1}) + self.assertEqual(coll.find_one(projection={"_id": True}), {"_id": 1}) class TestWriteConcernError(IntegrationTest): @@ -439,20 +459,18 @@ class TestWriteConcernError(IntegrationTest): def setUpClass(cls): super(TestWriteConcernError, cls).setUpClass() cls.fail_insert = { - 'configureFailPoint': 'failCommand', - 'mode': {'times': 2}, - 'data': { - 'failCommands': ['insert'], - 'writeConcernError': { - 'code': 91, - 'errmsg': 'Replication is being shut down'}, - }} + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": { + "failCommands": ["insert"], + "writeConcernError": {"code": 91, "errmsg": "Replication is being shut down"}, + }, + } @client_context.require_version_min(4, 0) def test_RetryableWriteError_error_label(self): listener = OvertCommandListener() - client = rs_or_single_client( - retryWrites=True, event_listeners=[listener]) + client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) # Ensure collection exists. client.pymongo_test.testcoll.insert_one({}) @@ -460,14 +478,13 @@ def test_RetryableWriteError_error_label(self): with self.fail_point(self.fail_insert): with self.assertRaises(WriteConcernError) as cm: client.pymongo_test.testcoll.insert_one({}) - self.assertTrue(cm.exception.has_error_label( - 'RetryableWriteError')) + self.assertTrue(cm.exception.has_error_label("RetryableWriteError")) if client_context.version >= Version(4, 4): # In MongoDB 4.4+ we rely on the server returning the error label. self.assertIn( - 'RetryableWriteError', - listener.results['succeeded'][-1].reply['errorLabels']) + "RetryableWriteError", listener.results["succeeded"][-1].reply["errorLabels"] + ) @client_context.require_version_min(4, 4) def test_RetryableWriteError_error_label_RawBSONDocument(self): @@ -476,13 +493,18 @@ def test_RetryableWriteError_error_label_RawBSONDocument(self): with self.client.start_session() as s: s._start_retryable_write() result = self.client.pymongo_test.command( - 'insert', 'testcoll', documents=[{'_id': 1}], - txnNumber=s._server_session.transaction_id, session=s, + "insert", + "testcoll", + documents=[{"_id": 1}], + txnNumber=s._server_session.transaction_id, + session=s, codec_options=DEFAULT_CODEC_OPTIONS.with_options( - document_class=RawBSONDocument)) + document_class=RawBSONDocument + ), + ) - self.assertIn('writeConcernError', result) - self.assertIn('RetryableWriteError', result['errorLabels']) + self.assertIn("writeConcernError", result) + self.assertIn("RetryableWriteError", result["errorLabels"]) class InsertThread(threading.Thread): @@ -504,26 +526,24 @@ class TestPoolPausedError(IntegrationTest): @client_context.require_failCommand_blockConnection @client_context.require_retryable_writes - @client_knobs(heartbeat_frequency=.05, min_heartbeat_interval=.05) + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) def test_pool_paused_error_is_retryable(self): cmap_listener = CMAPListener() cmd_listener = OvertCommandListener() - client = rs_or_single_client( - maxPoolSize=1, - event_listeners=[cmap_listener, cmd_listener]) + client = rs_or_single_client(maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener]) self.addCleanup(client.close) for _ in range(10): cmap_listener.reset() cmd_listener.reset() threads = [InsertThread(client.pymongo_test.test) for _ in range(2)] fail_command = { - 'mode': {'times': 1}, - 'data': { - 'failCommands': ['insert'], - 'blockConnection': True, - 'blockTimeMS': 1000, - 'errorCode': 91, - 'errorLabels': ['RetryableWriteError'], + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "blockConnection": True, + "blockTimeMS": 1000, + "errorCode": 91, + "errorLabels": ["RetryableWriteError"], }, } with self.fail_point(fail_command): @@ -541,29 +561,25 @@ def test_pool_paused_error_is_retryable(self): break # Via CMAP monitoring, assert that the first check out succeeds. - cmap_events = cmap_listener.events_by_type(( - ConnectionCheckedOutEvent, - ConnectionCheckOutFailedEvent, - PoolClearedEvent)) + cmap_events = cmap_listener.events_by_type( + (ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, PoolClearedEvent) + ) msg = pprint.pformat(cmap_listener.events) self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) - self.assertIsInstance( - cmap_events[2], ConnectionCheckOutFailedEvent, msg) - self.assertEqual(cmap_events[2].reason, - ConnectionCheckOutFailedReason.CONN_ERROR, - msg) + self.assertIsInstance(cmap_events[2], ConnectionCheckOutFailedEvent, msg) + self.assertEqual(cmap_events[2].reason, ConnectionCheckOutFailedReason.CONN_ERROR, msg) self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) # Connection check out failures are not reflected in command # monitoring because we only publish command events _after_ checking # out a connection. - started = cmd_listener.results['started'] + started = cmd_listener.results["started"] msg = pprint.pformat(cmd_listener.results) self.assertEqual(3, len(started), msg) - succeeded = cmd_listener.results['succeeded'] + succeeded = cmd_listener.results["succeeded"] self.assertEqual(2, len(succeeded), msg) - failed = cmd_listener.results['failed'] + failed = cmd_listener.results["failed"] self.assertEqual(1, len(failed), msg) @@ -576,8 +592,7 @@ def test_increment_transaction_id_without_sending_command(self): the first attempt fails before sending the command. """ listener = OvertCommandListener() - client = rs_or_single_client( - retryWrites=True, event_listeners=[listener]) + client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) self.addCleanup(client.close) topology = client._topology select_server = topology.select_server @@ -586,28 +601,27 @@ def raise_connection_err_select_server(*args, **kwargs): # Raise ConnectionFailure on the first attempt and perform # normal selection on the retry attempt. topology.select_server = select_server - raise ConnectionFailure('Connection refused') + raise ConnectionFailure("Connection refused") - for method, args, kwargs in retryable_single_statement_ops( - client.db.retryable_write_test): + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): listener.results.clear() topology.select_server = raise_connection_err_select_server with client.start_session() as session: kwargs = copy.deepcopy(kwargs) - kwargs['session'] = session - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) + kwargs["session"] = session + msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) initial_txn_id = session._server_session.transaction_id # Each operation should fail on the first attempt and succeed # on the second. method(*args, **kwargs) - self.assertEqual(len(listener.results['started']), 1, msg) - retry_cmd = listener.results['started'][0].command - sent_txn_id = retry_cmd['txnNumber'] + self.assertEqual(len(listener.results["started"]), 1, msg) + retry_cmd = listener.results["started"][0].command + sent_txn_id = retry_cmd["txnNumber"] final_txn_id = session._server_session.transaction_id self.assertEqual(Int64(initial_txn_id + 1), sent_txn_id, msg) self.assertEqual(sent_txn_id, final_txn_id, msg) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_retryable_writes_unified.py b/test/test_retryable_writes_unified.py index 4e851de273..4e97c14d4b 100644 --- a/test/test_retryable_writes_unified.py +++ b/test/test_retryable_writes_unified.py @@ -23,8 +23,7 @@ from test.unified_format import generate_test_classes # Location of JSON test specifications. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'retryable_writes', 'unified') +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_writes", "unified") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/test/test_saslprep.py b/test/test_saslprep.py index c694224a6c..1dd4727181 100644 --- a/test/test_saslprep.py +++ b/test/test_saslprep.py @@ -16,11 +16,12 @@ sys.path[0:0] = [""] -from pymongo.saslprep import saslprep from test import unittest -class TestSASLprep(unittest.TestCase): +from pymongo.saslprep import saslprep + +class TestSASLprep(unittest.TestCase): def test_saslprep(self): try: import stringprep diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index e7a8a7ef05..fee751fbdc 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -21,44 +21,41 @@ sys.path[0:0] = [""] -from pymongo import MongoClient +from test import IntegrationTest, client_context, client_knobs, unittest +from test.utils import ( + ServerAndTopologyEventListener, + rs_or_single_client, + server_name_to_type, + wait_until, +) + from bson.json_util import object_hook -from pymongo import monitoring +from pymongo import MongoClient, monitoring from pymongo.collection import Collection from pymongo.common import clean_node -from pymongo.errors import (ConnectionFailure, - NotPrimaryError) +from pymongo.errors import ConnectionFailure, NotPrimaryError from pymongo.hello import Hello from pymongo.monitor import Monitor from pymongo.server_description import ServerDescription from pymongo.topology_description import TOPOLOGY_TYPE -from test import unittest, client_context, client_knobs, IntegrationTest -from test.utils import (ServerAndTopologyEventListener, - server_name_to_type, - rs_or_single_client, - wait_until) # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'sdam_monitoring') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sdam_monitoring") def compare_server_descriptions(expected, actual): - if ((not expected['address'] == "%s:%s" % actual.address) or - (not server_name_to_type(expected['type']) == - actual.server_type)): + if (not expected["address"] == "%s:%s" % actual.address) or ( + not server_name_to_type(expected["type"]) == actual.server_type + ): return False - expected_hosts = set( - expected['arbiters'] + expected['passives'] + expected['hosts']) + expected_hosts = set(expected["arbiters"] + expected["passives"] + expected["hosts"]) return expected_hosts == set("%s:%s" % s for s in actual.all_hosts) def compare_topology_descriptions(expected, actual): - if not (TOPOLOGY_TYPE.__getattribute__( - expected['topologyType']) == actual.topology_type): + if not (TOPOLOGY_TYPE.__getattribute__(expected["topologyType"]) == actual.topology_type): return False - expected = expected['servers'] + expected = expected["servers"] actual = actual.server_descriptions() if len(expected) != len(actual): return False @@ -81,70 +78,74 @@ def compare_events(expected_dict, actual): if expected_type == "server_opening_event": if not isinstance(actual, monitoring.ServerOpeningEvent): - return False, "Expected ServerOpeningEvent, got %s" % ( - actual.__class__) - if not expected['address'] == "%s:%s" % actual.server_address: - return (False, - "ServerOpeningEvent published with wrong address (expected" - " %s, got %s" % (expected['address'], - actual.server_address)) + return False, "Expected ServerOpeningEvent, got %s" % (actual.__class__) + if not expected["address"] == "%s:%s" % actual.server_address: + return ( + False, + "ServerOpeningEvent published with wrong address (expected" + " %s, got %s" % (expected["address"], actual.server_address), + ) elif expected_type == "server_description_changed_event": if not isinstance(actual, monitoring.ServerDescriptionChangedEvent): - return (False, - "Expected ServerDescriptionChangedEvent, got %s" % ( - actual.__class__)) - if not expected['address'] == "%s:%s" % actual.server_address: - return (False, "ServerDescriptionChangedEvent has wrong address" - " (expected %s, got %s" % (expected['address'], - actual.server_address)) + return (False, "Expected ServerDescriptionChangedEvent, got %s" % (actual.__class__)) + if not expected["address"] == "%s:%s" % actual.server_address: + return ( + False, + "ServerDescriptionChangedEvent has wrong address" + " (expected %s, got %s" % (expected["address"], actual.server_address), + ) + if not compare_server_descriptions(expected["newDescription"], actual.new_description): + return (False, "New ServerDescription incorrect in" " ServerDescriptionChangedEvent") if not compare_server_descriptions( - expected['newDescription'], actual.new_description): - return (False, "New ServerDescription incorrect in" - " ServerDescriptionChangedEvent") - if not compare_server_descriptions(expected['previousDescription'], - actual.previous_description): - return (False, "Previous ServerDescription incorrect in" - " ServerDescriptionChangedEvent") + expected["previousDescription"], actual.previous_description + ): + return ( + False, + "Previous ServerDescription incorrect in" " ServerDescriptionChangedEvent", + ) elif expected_type == "server_closed_event": if not isinstance(actual, monitoring.ServerClosedEvent): - return False, "Expected ServerClosedEvent, got %s" % ( - actual.__class__) - if not expected['address'] == "%s:%s" % actual.server_address: - return (False, "ServerClosedEvent published with wrong address" - " (expected %s, got %s" % (expected['address'], - actual.server_address)) + return False, "Expected ServerClosedEvent, got %s" % (actual.__class__) + if not expected["address"] == "%s:%s" % actual.server_address: + return ( + False, + "ServerClosedEvent published with wrong address" + " (expected %s, got %s" % (expected["address"], actual.server_address), + ) elif expected_type == "topology_opening_event": if not isinstance(actual, monitoring.TopologyOpenedEvent): - return False, "Expected TopologyOpeningEvent, got %s" % ( - actual.__class__) + return False, "Expected TopologyOpeningEvent, got %s" % (actual.__class__) elif expected_type == "topology_description_changed_event": if not isinstance(actual, monitoring.TopologyDescriptionChangedEvent): - return (False, "Expected TopologyDescriptionChangedEvent," - " got %s" % (actual.__class__)) - if not compare_topology_descriptions(expected['newDescription'], - actual.new_description): - return (False, "New TopologyDescription incorrect in " - "TopologyDescriptionChangedEvent") + return ( + False, + "Expected TopologyDescriptionChangedEvent," " got %s" % (actual.__class__), + ) + if not compare_topology_descriptions(expected["newDescription"], actual.new_description): + return ( + False, + "New TopologyDescription incorrect in " "TopologyDescriptionChangedEvent", + ) if not compare_topology_descriptions( - expected['previousDescription'], - actual.previous_description): - return (False, "Previous TopologyDescription incorrect in" - " TopologyDescriptionChangedEvent") + expected["previousDescription"], actual.previous_description + ): + return ( + False, + "Previous TopologyDescription incorrect in" " TopologyDescriptionChangedEvent", + ) elif expected_type == "topology_closed_event": if not isinstance(actual, monitoring.TopologyClosedEvent): - return False, "Expected TopologyClosedEvent, got %s" % ( - actual.__class__) + return False, "Expected TopologyClosedEvent, got %s" % (actual.__class__) else: - return False, "Incorrect event: expected %s, actual %s" % ( - expected_type, actual) + return False, "Incorrect event: expected %s, actual %s" % (expected_type, actual) return True, "" @@ -152,12 +153,10 @@ def compare_events(expected_dict, actual): def compare_multiple_events(i, expected_results, actual_results): events_in_a_row = [] j = i - while(j < len(expected_results) and isinstance( - actual_results[j], - actual_results[i].__class__)): + while j < len(expected_results) and isinstance(actual_results[j], actual_results[i].__class__): events_in_a_row.append(actual_results[j]) j += 1 - message = '' + message = "" for event in events_in_a_row: for k in range(i, j): passed, message = compare_events(expected_results[k], event) @@ -166,11 +165,10 @@ def compare_multiple_events(i, expected_results, actual_results): break else: return i, False, message - return j, True, '' + return j, True, "" class TestAllScenarios(IntegrationTest): - def setUp(self): super(TestAllScenarios, self).setUp() self.all_listener = ServerAndTopologyEventListener() @@ -184,51 +182,60 @@ def run_scenario(self): def _run_scenario(self): class NoopMonitor(Monitor): """Override the _run method to do nothing.""" + def _run(self): time.sleep(0.05) - m = MongoClient(host=scenario_def['uri'], port=27017, - event_listeners=[self.all_listener], - _monitor_class=NoopMonitor) + m = MongoClient( + host=scenario_def["uri"], + port=27017, + event_listeners=[self.all_listener], + _monitor_class=NoopMonitor, + ) topology = m._get_topology() try: - for phase in scenario_def['phases']: - for (source, response) in phase.get('responses', []): + for phase in scenario_def["phases"]: + for (source, response) in phase.get("responses", []): source_address = clean_node(source) - topology.on_change(ServerDescription( - address=source_address, - hello=Hello(response), - round_trip_time=0)) + topology.on_change( + ServerDescription( + address=source_address, hello=Hello(response), round_trip_time=0 + ) + ) - expected_results = phase['outcome']['events'] + expected_results = phase["outcome"]["events"] expected_len = len(expected_results) wait_until( lambda: len(self.all_listener.results) >= expected_len, - "publish all events", timeout=15) + "publish all events", + timeout=15, + ) # Wait some time to catch possible lagging extra events. time.sleep(0.5) i = 0 while i < expected_len: - result = self.all_listener.results[i] if len( - self.all_listener.results) > i else None + result = ( + self.all_listener.results[i] if len(self.all_listener.results) > i else None + ) # The order of ServerOpening/ClosedEvents doesn't matter - if isinstance(result, (monitoring.ServerOpeningEvent, - monitoring.ServerClosedEvent)): + if isinstance( + result, (monitoring.ServerOpeningEvent, monitoring.ServerClosedEvent) + ): i, passed, message = compare_multiple_events( - i, expected_results, self.all_listener.results) + i, expected_results, self.all_listener.results + ) self.assertTrue(passed, message) else: - self.assertTrue( - *compare_events(expected_results[i], result)) + self.assertTrue(*compare_events(expected_results[i], result)) i += 1 # Assert no extra events. extra_events = self.all_listener.results[expected_len:] if extra_events: - self.fail('Extra events %r' % (extra_events,)) + self.fail("Extra events %r" % (extra_events,)) self.all_listener.reset() finally: @@ -241,11 +248,10 @@ def create_tests(): for dirpath, _, filenames in os.walk(_TEST_PATH): for filename in filenames: with open(os.path.join(dirpath, filename)) as scenario_stream: - scenario_def = json.load( - scenario_stream, object_hook=object_hook) + scenario_def = json.load(scenario_stream, object_hook=object_hook) # Construct test from scenario. new_test = create_test(scenario_def) - test_name = 'test_%s' % (os.path.splitext(filename)[0],) + test_name = "test_%s" % (os.path.splitext(filename)[0],) new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) @@ -269,7 +275,8 @@ def setUpClass(cls): cls.listener = ServerAndTopologyEventListener() retry_writes = client_context.supports_transactions() cls.test_client = rs_or_single_client( - event_listeners=[cls.listener], retryWrites=retry_writes) + event_listeners=[cls.listener], retryWrites=retry_writes + ) cls.coll = cls.test_client[cls.client.db.name].test cls.coll.insert_one({}) @@ -287,12 +294,12 @@ def _test_app_error(self, fail_command_opts, expected_error): # Test that an application error causes a ServerDescriptionChangedEvent # to be published. - data = {'failCommands': ['insert']} + data = {"failCommands": ["insert"]} data.update(fail_command_opts) fail_insert = { - 'configureFailPoint': 'failCommand', - 'mode': {'times': 1}, - 'data': data, + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": data, } with self.fail_point(fail_insert): if self.test_client.options.retry_writes: @@ -306,43 +313,48 @@ def marked_unknown(event): return ( isinstance(event, monitoring.ServerDescriptionChangedEvent) and event.server_address == address - and not event.new_description.is_server_type_known) + and not event.new_description.is_server_type_known + ) def discovered_node(event): return ( isinstance(event, monitoring.ServerDescriptionChangedEvent) and event.server_address == address and not event.previous_description.is_server_type_known - and event.new_description.is_server_type_known) + and event.new_description.is_server_type_known + ) def marked_unknown_and_rediscovered(): - return (len(self.listener.matching(marked_unknown)) >= 1 and - len(self.listener.matching(discovered_node)) >= 1) + return ( + len(self.listener.matching(marked_unknown)) >= 1 + and len(self.listener.matching(discovered_node)) >= 1 + ) # Topology events are published asynchronously - wait_until(marked_unknown_and_rediscovered, 'rediscover node') + wait_until(marked_unknown_and_rediscovered, "rediscover node") # Expect a single ServerDescriptionChangedEvent for the network error. marked_unknown_events = self.listener.matching(marked_unknown) self.assertEqual(len(marked_unknown_events), 1, marked_unknown_events) - self.assertIsInstance( - marked_unknown_events[0].new_description.error, expected_error) + self.assertIsInstance(marked_unknown_events[0].new_description.error, expected_error) def test_network_error_publishes_events(self): - self._test_app_error({'closeConnection': True}, ConnectionFailure) + self._test_app_error({"closeConnection": True}, ConnectionFailure) # In 4.4+, not primary errors from failCommand don't cause SDAM state # changes because topologyVersion is not incremented. @client_context.require_version_max(4, 3) def test_not_primary_error_publishes_events(self): - self._test_app_error({'errorCode': 10107, 'closeConnection': False, - 'errorLabels': ['RetryableWriteError']}, - NotPrimaryError) + self._test_app_error( + {"errorCode": 10107, "closeConnection": False, "errorLabels": ["RetryableWriteError"]}, + NotPrimaryError, + ) def test_shutdown_error_publishes_events(self): - self._test_app_error({'errorCode': 91, 'closeConnection': False, - 'errorLabels': ['RetryableWriteError']}, - NotPrimaryError) + self._test_app_error( + {"errorCode": 91, "closeConnection": False, "errorLabels": ["RetryableWriteError"]}, + NotPrimaryError, + ) if __name__ == "__main__": diff --git a/test/test_server.py b/test/test_server.py index e4996d2e09..064d77d024 100644 --- a/test/test_server.py +++ b/test/test_server.py @@ -18,18 +18,19 @@ sys.path[0:0] = [""] +from test import unittest + from pymongo.hello import Hello from pymongo.server import Server from pymongo.server_description import ServerDescription -from test import unittest class TestServer(unittest.TestCase): def test_repr(self): - hello = Hello({'ok': 1}) - sd = ServerDescription(('localhost', 27017), hello) + hello = Hello({"ok": 1}) + sd = ServerDescription(("localhost", 27017), hello) server = Server(sd, pool=object(), monitor=object()) - self.assertTrue('Standalone' in str(server)) + self.assertTrue("Standalone" in str(server)) if __name__ == "__main__": diff --git a/test/test_server_description.py b/test/test_server_description.py index 23d6c8f377..1562711375 100644 --- a/test/test_server_description.py +++ b/test/test_server_description.py @@ -18,14 +18,15 @@ sys.path[0:0] = [""] -from bson.objectid import ObjectId +from test import unittest + from bson.int64 import Int64 -from pymongo.server_type import SERVER_TYPE +from bson.objectid import ObjectId from pymongo.hello import Hello, HelloCompat from pymongo.server_description import ServerDescription -from test import unittest +from pymongo.server_type import SERVER_TYPE -address = ('localhost', 27017) +address = ("localhost", 27017) def parse_hello_response(doc): @@ -42,82 +43,88 @@ def test_unknown(self): self.assertFalse(s.is_readable) def test_mongos(self): - s = parse_hello_response({'ok': 1, 'msg': 'isdbgrid'}) + s = parse_hello_response({"ok": 1, "msg": "isdbgrid"}) self.assertEqual(SERVER_TYPE.Mongos, s.server_type) - self.assertEqual('Mongos', s.server_type_name) + self.assertEqual("Mongos", s.server_type_name) self.assertTrue(s.is_writable) self.assertTrue(s.is_readable) def test_primary(self): - s = parse_hello_response( - {'ok': 1, HelloCompat.LEGACY_CMD: True, 'setName': 'rs'}) + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs"}) self.assertEqual(SERVER_TYPE.RSPrimary, s.server_type) - self.assertEqual('RSPrimary', s.server_type_name) + self.assertEqual("RSPrimary", s.server_type_name) self.assertTrue(s.is_writable) self.assertTrue(s.is_readable) def test_secondary(self): s = parse_hello_response( - {'ok': 1, HelloCompat.LEGACY_CMD: False, 'secondary': True, 'setName': 'rs'}) + {"ok": 1, HelloCompat.LEGACY_CMD: False, "secondary": True, "setName": "rs"} + ) self.assertEqual(SERVER_TYPE.RSSecondary, s.server_type) - self.assertEqual('RSSecondary', s.server_type_name) + self.assertEqual("RSSecondary", s.server_type_name) self.assertFalse(s.is_writable) self.assertTrue(s.is_readable) def test_arbiter(self): s = parse_hello_response( - {'ok': 1, HelloCompat.LEGACY_CMD: False, 'arbiterOnly': True, 'setName': 'rs'}) + {"ok": 1, HelloCompat.LEGACY_CMD: False, "arbiterOnly": True, "setName": "rs"} + ) self.assertEqual(SERVER_TYPE.RSArbiter, s.server_type) - self.assertEqual('RSArbiter', s.server_type_name) + self.assertEqual("RSArbiter", s.server_type_name) self.assertFalse(s.is_writable) self.assertFalse(s.is_readable) def test_other(self): - s = parse_hello_response( - {'ok': 1, HelloCompat.LEGACY_CMD: False, 'setName': 'rs'}) + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: False, "setName": "rs"}) self.assertEqual(SERVER_TYPE.RSOther, s.server_type) - self.assertEqual('RSOther', s.server_type_name) + self.assertEqual("RSOther", s.server_type_name) - s = parse_hello_response({ - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'hidden': True, - 'setName': 'rs'}) + s = parse_hello_response( + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "hidden": True, + "setName": "rs", + } + ) self.assertEqual(SERVER_TYPE.RSOther, s.server_type) self.assertFalse(s.is_writable) self.assertFalse(s.is_readable) def test_ghost(self): - s = parse_hello_response({'ok': 1, 'isreplicaset': True}) + s = parse_hello_response({"ok": 1, "isreplicaset": True}) self.assertEqual(SERVER_TYPE.RSGhost, s.server_type) - self.assertEqual('RSGhost', s.server_type_name) + self.assertEqual("RSGhost", s.server_type_name) self.assertFalse(s.is_writable) self.assertFalse(s.is_readable) def test_fields(self): - s = parse_hello_response({ - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'primary': 'a:27017', - 'tags': {'a': 'foo', 'b': 'baz'}, - 'maxMessageSizeBytes': 1, - 'maxBsonObjectSize': 2, - 'maxWriteBatchSize': 3, - 'minWireVersion': 4, - 'maxWireVersion': 5, - 'setName': 'rs'}) + s = parse_hello_response( + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "primary": "a:27017", + "tags": {"a": "foo", "b": "baz"}, + "maxMessageSizeBytes": 1, + "maxBsonObjectSize": 2, + "maxWriteBatchSize": 3, + "minWireVersion": 4, + "maxWireVersion": 5, + "setName": "rs", + } + ) self.assertEqual(SERVER_TYPE.RSSecondary, s.server_type) - self.assertEqual(('a', 27017), s.primary) - self.assertEqual({'a': 'foo', 'b': 'baz'}, s.tags) + self.assertEqual(("a", 27017), s.primary) + self.assertEqual({"a": "foo", "b": "baz"}, s.tags) self.assertEqual(1, s.max_message_size) self.assertEqual(2, s.max_bson_size) self.assertEqual(3, s.max_write_batch_size) @@ -125,55 +132,57 @@ def test_fields(self): self.assertEqual(5, s.max_wire_version) def test_default_max_message_size(self): - s = parse_hello_response({ - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'maxBsonObjectSize': 2}) + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True, "maxBsonObjectSize": 2}) # Twice max_bson_size. self.assertEqual(4, s.max_message_size) def test_standalone(self): - s = parse_hello_response({'ok': 1, HelloCompat.LEGACY_CMD: True}) + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True}) self.assertEqual(SERVER_TYPE.Standalone, s.server_type) # Mongod started with --slave. # master-slave replication was removed in MongoDB 4.0. - s = parse_hello_response({'ok': 1, HelloCompat.LEGACY_CMD: False}) + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: False}) self.assertEqual(SERVER_TYPE.Standalone, s.server_type) self.assertTrue(s.is_writable) self.assertTrue(s.is_readable) def test_ok_false(self): - s = parse_hello_response({'ok': 0, HelloCompat.LEGACY_CMD: True}) + s = parse_hello_response({"ok": 0, HelloCompat.LEGACY_CMD: True}) self.assertEqual(SERVER_TYPE.Unknown, s.server_type) self.assertFalse(s.is_writable) self.assertFalse(s.is_readable) def test_all_hosts(self): - s = parse_hello_response({ - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'hosts': ['a'], - 'passives': ['b:27018'], - 'arbiters': ['c'] - }) + s = parse_hello_response( + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "hosts": ["a"], + "passives": ["b:27018"], + "arbiters": ["c"], + } + ) - self.assertEqual( - [('a', 27017), ('b', 27018), ('c', 27017)], - sorted(s.all_hosts)) + self.assertEqual([("a", 27017), ("b", 27018), ("c", 27017)], sorted(s.all_hosts)) def test_repr(self): - s = parse_hello_response({'ok': 1, 'msg': 'isdbgrid'}) - self.assertEqual(repr(s), - "") + s = parse_hello_response({"ok": 1, "msg": "isdbgrid"}) + self.assertEqual( + repr(s), "" + ) def test_topology_version(self): - topology_version = {'processId': ObjectId(), 'counter': Int64('0')} + topology_version = {"processId": ObjectId(), "counter": Int64("0")} s = parse_hello_response( - {'ok': 1, HelloCompat.LEGACY_CMD: True, 'setName': 'rs', - 'topologyVersion': topology_version}) + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "topologyVersion": topology_version, + } + ) self.assertEqual(SERVER_TYPE.RSPrimary, s.server_type) self.assertEqual(topology_version, s.topology_version) @@ -185,8 +194,7 @@ def test_topology_version(self): def test_topology_version_not_present(self): # No topologyVersion field. - s = parse_hello_response( - {'ok': 1, HelloCompat.LEGACY_CMD: True, 'setName': 'rs'}) + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs"}) self.assertEqual(SERVER_TYPE.RSPrimary, s.server_type) self.assertEqual(None, s.topology_version) diff --git a/test/test_server_selection.py b/test/test_server_selection.py index 955736709d..a80d5f13d9 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -17,8 +17,7 @@ import os import sys -from pymongo import MongoClient -from pymongo import ReadPreference +from pymongo import MongoClient, ReadPreference from pymongo.errors import ServerSelectionTimeoutError from pymongo.hello import HelloCompat from pymongo.server_selectors import writable_server_selector @@ -27,22 +26,30 @@ sys.path[0:0] = [""] -from test import client_context, unittest, IntegrationTest -from test.utils import (rs_or_single_client, wait_until, EventListener, - FunctionCallRecorder) +from test import IntegrationTest, client_context, unittest +from test.utils import ( + EventListener, + FunctionCallRecorder, + rs_or_single_client, + wait_until, +) from test.utils_selection_tests import ( - create_selection_tests, get_addresses, get_topology_settings_dict, - make_server_description) - + create_selection_tests, + get_addresses, + get_topology_settings_dict, + make_server_description, +) # Location of JSON test specifications. _TEST_PATH = os.path.join( os.path.dirname(os.path.realpath(__file__)), - os.path.join('server_selection', 'server_selection')) + os.path.join("server_selection", "server_selection"), +) class SelectionStoreSelector(object): """No-op selector that keeps track of what was passed to it.""" + def __init__(self): self.selection = None @@ -51,7 +58,6 @@ def __call__(self, selection): return selection - class TestAllScenarios(create_selection_tests(_TEST_PATH)): # type: ignore pass @@ -67,37 +73,33 @@ def custom_selector(servers): # Initialize client with appropriate listeners. listener = EventListener() - client = rs_or_single_client( - server_selector=custom_selector, event_listeners=[listener]) + client = rs_or_single_client(server_selector=custom_selector, event_listeners=[listener]) self.addCleanup(client.close) - coll = client.get_database( - 'testdb', read_preference=ReadPreference.NEAREST).coll - self.addCleanup(client.drop_database, 'testdb') + coll = client.get_database("testdb", read_preference=ReadPreference.NEAREST).coll + self.addCleanup(client.drop_database, "testdb") # Wait the node list to be fully populated. def all_hosts_started(): - return (len(client.admin.command(HelloCompat.LEGACY_CMD)['hosts']) == - len(client._topology._description.readable_servers)) + return len(client.admin.command(HelloCompat.LEGACY_CMD)["hosts"]) == len( + client._topology._description.readable_servers + ) - wait_until(all_hosts_started, 'receive heartbeat from all hosts') - expected_port = max([ - n.address[1] - for n in client._topology._description.readable_servers]) + wait_until(all_hosts_started, "receive heartbeat from all hosts") + expected_port = max([n.address[1] for n in client._topology._description.readable_servers]) # Insert 1 record and access it 10 times. - coll.insert_one({'name': 'John Doe'}) + coll.insert_one({"name": "John Doe"}) for _ in range(10): - coll.find_one({'name': 'John Doe'}) + coll.find_one({"name": "John Doe"}) # Confirm all find commands are run against appropriate host. - for command in listener.results['started']: - if command.command_name == 'find': - self.assertEqual( - command.connection_id[1], expected_port) + for command in listener.results["started"]: + if command.command_name == "find": + self.assertEqual(command.connection_id[1], expected_port) def test_invalid_server_selector(self): # Client initialization must fail if server_selector is not callable. - for selector_candidate in [list(), 10, 'string', {}]: + for selector_candidate in [list(), 10, "string", {}]: with self.assertRaisesRegex(ValueError, "must be a callable"): MongoClient(connect=False, server_selector=selector_candidate) @@ -112,13 +114,13 @@ def test_selector_called(self): mongo_client = rs_or_single_client(server_selector=selector) test_collection = mongo_client.testdb.test_collection self.addCleanup(mongo_client.close) - self.addCleanup(mongo_client.drop_database, 'testdb') + self.addCleanup(mongo_client.drop_database, "testdb") # Do N operations and test selector is called at least N times. - test_collection.insert_one({'age': 20, 'name': 'John'}) - test_collection.insert_one({'age': 31, 'name': 'Jane'}) - test_collection.update_one({'name': 'Jane'}, {'$set': {'age': 21}}) - test_collection.find_one({'name': 'Roe'}) + test_collection.insert_one({"age": 20, "name": "John"}) + test_collection.insert_one({"age": 31, "name": "Jane"}) + test_collection.update_one({"name": "Jane"}, {"$set": {"age": 21}}) + test_collection.find_one({"name": "Roe"}) self.assertGreaterEqual(selector.call_count, 4) @client_context.require_replica_set @@ -126,34 +128,26 @@ def test_latency_threshold_application(self): selector = SelectionStoreSelector() scenario_def: dict = { - 'topology_description': { - 'type': 'ReplicaSetWithPrimary', 'servers': [ - {'address': 'b:27017', - 'avg_rtt_ms': 10000, - 'type': 'RSSecondary', - 'tag': {}}, - {'address': 'c:27017', - 'avg_rtt_ms': 20000, - 'type': 'RSSecondary', - 'tag': {}}, - {'address': 'a:27017', - 'avg_rtt_ms': 30000, - 'type': 'RSPrimary', - 'tag': {}}, - ]}} + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + {"address": "b:27017", "avg_rtt_ms": 10000, "type": "RSSecondary", "tag": {}}, + {"address": "c:27017", "avg_rtt_ms": 20000, "type": "RSSecondary", "tag": {}}, + {"address": "a:27017", "avg_rtt_ms": 30000, "type": "RSPrimary", "tag": {}}, + ], + } + } # Create & populate Topology such that all but one server is too slow. - rtt_times = [srv['avg_rtt_ms'] for srv in - scenario_def['topology_description']['servers']] + rtt_times = [srv["avg_rtt_ms"] for srv in scenario_def["topology_description"]["servers"]] min_rtt_idx = rtt_times.index(min(rtt_times)) - seeds, hosts = get_addresses( - scenario_def["topology_description"]["servers"]) + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) settings = get_topology_settings_dict( - heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, - server_selector=selector) + heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, server_selector=selector + ) topology = Topology(TopologySettings(**settings)) topology.open() - for server in scenario_def['topology_description']['servers']: + for server in scenario_def["topology_description"]["servers"]: server_description = make_server_description(server, hosts) topology.on_change(server_description) @@ -161,52 +155,40 @@ def test_latency_threshold_application(self): # prior to custom server selection logic kicking in. server = topology.select_server(ReadPreference.NEAREST) assert selector.selection is not None - self.assertEqual( - len(selector.selection), - len(topology.description.server_descriptions())) + self.assertEqual(len(selector.selection), len(topology.description.server_descriptions())) # Ensure proper filtering based on latency after custom selection. - self.assertEqual( - server.description.address, seeds[min_rtt_idx]) + self.assertEqual(server.description.address, seeds[min_rtt_idx]) @client_context.require_replica_set def test_server_selector_bypassed(self): selector = FunctionCallRecorder(lambda x: x) scenario_def = { - 'topology_description': { - 'type': 'ReplicaSetNoPrimary', 'servers': [ - {'address': 'b:27017', - 'avg_rtt_ms': 10000, - 'type': 'RSSecondary', - 'tag': {}}, - {'address': 'c:27017', - 'avg_rtt_ms': 20000, - 'type': 'RSSecondary', - 'tag': {}}, - {'address': 'a:27017', - 'avg_rtt_ms': 30000, - 'type': 'RSSecondary', - 'tag': {}}, - ]}} + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + {"address": "b:27017", "avg_rtt_ms": 10000, "type": "RSSecondary", "tag": {}}, + {"address": "c:27017", "avg_rtt_ms": 20000, "type": "RSSecondary", "tag": {}}, + {"address": "a:27017", "avg_rtt_ms": 30000, "type": "RSSecondary", "tag": {}}, + ], + } + } # Create & populate Topology such that no server is writeable. - seeds, hosts = get_addresses( - scenario_def["topology_description"]["servers"]) + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) settings = get_topology_settings_dict( - heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, - server_selector=selector) + heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, server_selector=selector + ) topology = Topology(TopologySettings(**settings)) topology.open() - for server in scenario_def['topology_description']['servers']: + for server in scenario_def["topology_description"]["servers"]: server_description = make_server_description(server, hosts) topology.on_change(server_description) # Invoke server selection and assert no calls to our custom selector. - with self.assertRaisesRegex( - ServerSelectionTimeoutError, 'No primary available for writes'): - topology.select_server( - writable_server_selector, server_selection_timeout=0.1) + with self.assertRaisesRegex(ServerSelectionTimeoutError, "No primary available for writes"): + topology.select_server(writable_server_selector, server_selection_timeout=0.1) self.assertEqual(selector.call_count, 0) diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index a0cbcd5f4c..4b24d0d7b0 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -16,18 +16,17 @@ import os import threading +from test import IntegrationTest, client_context, unittest +from test.utils import OvertCommandListener, TestCreator, rs_client, wait_until +from test.utils_selection_tests import create_topology from pymongo.common import clean_node from pymongo.read_preferences import ReadPreference -from test import client_context, IntegrationTest, unittest -from test.utils_selection_tests import create_topology -from test.utils import TestCreator, rs_client, OvertCommandListener, wait_until - # Location of JSON test specifications. TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - os.path.join('server_selection', 'in_window')) + os.path.dirname(os.path.realpath(__file__)), os.path.join("server_selection", "in_window") +) class TestAllScenarios(unittest.TestCase): @@ -35,28 +34,27 @@ def run_scenario(self, scenario_def): topology = create_topology(scenario_def) # Update mock operation_count state: - for mock in scenario_def['mocked_topology_state']: - address = clean_node(mock['address']) + for mock in scenario_def["mocked_topology_state"]: + address = clean_node(mock["address"]) server = topology.get_server_by_address(address) - server.pool.operation_count = mock['operation_count'] + server.pool.operation_count = mock["operation_count"] pref = ReadPreference.NEAREST - counts = dict((address, 0) for address in - topology._description.server_descriptions()) + counts = dict((address, 0) for address in topology._description.server_descriptions()) # Number of times to repeat server selection - iterations = scenario_def['iterations'] + iterations = scenario_def["iterations"] for _ in range(iterations): server = topology.select_server(pref, server_selection_timeout=0) counts[server.description.address] += 1 # Verify expected_frequencies - outcome = scenario_def['outcome'] - tolerance = outcome['tolerance'] - expected_frequencies = outcome['expected_frequencies'] + outcome = scenario_def["outcome"] + tolerance = outcome["tolerance"] + expected_frequencies = outcome["expected_frequencies"] for host_str, freq in expected_frequencies.items(): address = clean_node(host_str) - actual_freq = float(counts[address])/iterations + actual_freq = float(counts[address]) / iterations if freq == 0: # Should be exactly 0. self.assertEqual(actual_freq, 0) @@ -112,7 +110,7 @@ def frequencies(self, client, listener): for thread in threads: self.assertTrue(thread.passed) - events = listener.results['started'] + events = listener.results["started"] self.assertEqual(len(events), N_FINDS * N_THREADS) nodes = client.nodes self.assertEqual(len(nodes), 2) @@ -120,7 +118,7 @@ def frequencies(self, client, listener): for event in events: freqs[event.connection_id] += 1 for address in freqs: - freqs[address] = freqs[address]/float(len(events)) + freqs[address] = freqs[address] / float(len(events)) return freqs @client_context.require_failCommand_appName @@ -129,21 +127,23 @@ def test_load_balancing(self): listener = OvertCommandListener() # PYTHON-2584: Use a large localThresholdMS to avoid the impact of # varying RTTs. - client = rs_client(client_context.mongos_seeds(), - appName='loadBalancingTest', - event_listeners=[listener], - localThresholdMS=10000) + client = rs_client( + client_context.mongos_seeds(), + appName="loadBalancingTest", + event_listeners=[listener], + localThresholdMS=10000, + ) self.addCleanup(client.close) - wait_until(lambda: len(client.nodes) == 2, 'discover both nodes') + wait_until(lambda: len(client.nodes) == 2, "discover both nodes") # Delay find commands on delay_finds = { - 'configureFailPoint': 'failCommand', - 'mode': {'times': 10000}, - 'data': { - 'failCommands': ['find'], - 'blockConnection': True, - 'blockTimeMS': 500, - 'appName': 'loadBalancingTest', + "configureFailPoint": "failCommand", + "mode": {"times": 10000}, + "data": { + "failCommands": ["find"], + "blockConnection": True, + "blockTimeMS": 500, + "appName": "loadBalancingTest", }, } with self.fail_point(delay_finds): diff --git a/test/test_server_selection_rtt.py b/test/test_server_selection_rtt.py index f914e03030..d2d8768809 100644 --- a/test/test_server_selection_rtt.py +++ b/test/test_server_selection_rtt.py @@ -21,11 +21,11 @@ sys.path[0:0] = [""] from test import unittest + from pymongo.read_preferences import MovingAverage # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'server_selection/rtt') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "server_selection/rtt") class TestAllScenarios(unittest.TestCase): @@ -36,14 +36,13 @@ def create_test(scenario_def): def run_scenario(self): moving_average = MovingAverage() - if scenario_def['avg_rtt_ms'] != "NULL": - moving_average.add_sample(scenario_def['avg_rtt_ms']) + if scenario_def["avg_rtt_ms"] != "NULL": + moving_average.add_sample(scenario_def["avg_rtt_ms"]) - if scenario_def['new_rtt_ms'] != "NULL": - moving_average.add_sample(scenario_def['new_rtt_ms']) + if scenario_def["new_rtt_ms"] != "NULL": + moving_average.add_sample(scenario_def["new_rtt_ms"]) - self.assertAlmostEqual(moving_average.get(), - scenario_def['new_avg_rtt']) + self.assertAlmostEqual(moving_average.get(), scenario_def["new_avg_rtt"]) return run_scenario @@ -58,8 +57,7 @@ def create_tests(): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = 'test_%s_%s' % ( - dirname, os.path.splitext(filename)[0]) + test_name = "test_%s_%s" % (dirname, os.path.splitext(filename)[0]) new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/test_session.py b/test/test_session.py index 98eccbae36..5a242d6c69 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -18,7 +18,6 @@ import os import sys import time - from io import BytesIO from typing import Set @@ -26,40 +25,36 @@ sys.path[0:0] = [""] +from test import IntegrationTest, SkipTest, client_context, unittest +from test.utils import EventListener, TestCreator, rs_or_single_client, wait_until +from test.utils_spec_runner import SpecRunner + from bson import DBRef from gridfs import GridFS, GridFSBucket -from pymongo import ASCENDING, InsertOne, IndexModel, monitoring +from pymongo import ASCENDING, IndexModel, InsertOne, monitoring from pymongo.common import _MAX_END_SESSIONS -from pymongo.errors import (ConfigurationError, - InvalidOperation, - OperationFailure) +from pymongo.errors import ConfigurationError, InvalidOperation, OperationFailure from pymongo.read_concern import ReadConcern -from test import IntegrationTest, client_context, unittest, SkipTest -from test.utils import (rs_or_single_client, - EventListener, - TestCreator, - wait_until) -from test.utils_spec_runner import SpecRunner + # Ignore auth commands like saslStart, so we can assert lsid is in all commands. class SessionTestListener(EventListener): def started(self, event): - if not event.command_name.startswith('sasl'): + if not event.command_name.startswith("sasl"): super(SessionTestListener, self).started(event) def succeeded(self, event): - if not event.command_name.startswith('sasl'): + if not event.command_name.startswith("sasl"): super(SessionTestListener, self).succeeded(event) def failed(self, event): - if not event.command_name.startswith('sasl'): + if not event.command_name.startswith("sasl"): super(SessionTestListener, self).failed(event) def first_command_started(self): - assert len(self.results['started']) >= 1, ( - "No command-started events") + assert len(self.results["started"]) >= 1, "No command-started events" - return self.results['started'][0] + return self.results["started"][0] def session_ids(client): @@ -92,20 +87,21 @@ def setUp(self): self.listener = SessionTestListener() self.session_checker_listener = SessionTestListener() self.client = rs_or_single_client( - event_listeners=[self.listener, self.session_checker_listener]) + event_listeners=[self.listener, self.session_checker_listener] + ) self.addCleanup(self.client.close) self.db = self.client.pymongo_test - self.initial_lsids = set(s['id'] for s in session_ids(self.client)) + self.initial_lsids = set(s["id"] for s in session_ids(self.client)) def tearDown(self): """All sessions used in the test must be returned to the pool.""" - self.client.drop_database('pymongo_test') + self.client.drop_database("pymongo_test") used_lsids = self.initial_lsids.copy() - for event in self.session_checker_listener.results['started']: - if 'lsid' in event.command: - used_lsids.add(event.command['lsid']['id']) + for event in self.session_checker_listener.results["started"]: + if "lsid" in event.command: + used_lsids.add(event.command["lsid"]["id"]) - current_lsids = set(s['id'] for s in session_ids(self.client)) + current_lsids = set(s["id"] for s in session_ids(self.client)) self.assertLessEqual(used_lsids, current_lsids) def _test_ops(self, client, *ops): @@ -120,21 +116,21 @@ def _test_ops(self, client, *ops): # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) - kw['session'] = s + kw["session"] = s f(*args, **kw) self.assertGreaterEqual(s._server_session.last_use, start) - self.assertGreaterEqual(len(listener.results['started']), 1) - for event in listener.results['started']: + self.assertGreaterEqual(len(listener.results["started"]), 1) + for event in listener.results["started"]: self.assertTrue( - 'lsid' in event.command, - "%s sent no lsid with %s" % ( - f.__name__, event.command_name)) + "lsid" in event.command, + "%s sent no lsid with %s" % (f.__name__, event.command_name), + ) self.assertEqual( s.session_id, - event.command['lsid'], - "%s sent wrong lsid with %s" % ( - f.__name__, event.command_name)) + event.command["lsid"], + "%s sent wrong lsid with %s" % (f.__name__, event.command_name), + ) self.assertFalse(s.has_ended) @@ -147,35 +143,35 @@ def _test_ops(self, client, *ops): # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) - kw['session'] = s + kw["session"] = s with self.assertRaisesRegex( - InvalidOperation, - 'Can only use session with the MongoClient' - ' that started it'): + InvalidOperation, "Can only use session with the MongoClient" " that started it" + ): f(*args, **kw) # No explicit session. for f, args, kw in ops: listener.results.clear() f(*args, **kw) - self.assertGreaterEqual(len(listener.results['started']), 1) + self.assertGreaterEqual(len(listener.results["started"]), 1) lsids = [] - for event in listener.results['started']: + for event in listener.results["started"]: self.assertTrue( - 'lsid' in event.command, - "%s sent no lsid with %s" % ( - f.__name__, event.command_name)) + "lsid" in event.command, + "%s sent no lsid with %s" % (f.__name__, event.command_name), + ) - lsids.append(event.command['lsid']) + lsids.append(event.command["lsid"]) - if not (sys.platform.startswith('java') or 'PyPy' in sys.version): + if not (sys.platform.startswith("java") or "PyPy" in sys.version): # Server session was returned to pool. Ignore interpreters with # non-deterministic GC. for lsid in lsids: self.assertIn( - lsid, session_ids(client), - "%s did not return implicit session to pool" % ( - f.__name__,)) + lsid, + session_ids(client), + "%s did not return implicit session to pool" % (f.__name__,), + ) def test_pool_lifo(self): # "Pool is LIFO" test from Driver Sessions Spec. @@ -215,31 +211,28 @@ def test_end_sessions(self): listener = SessionTestListener() client = rs_or_single_client(event_listeners=[listener]) # Start many sessions. - sessions = [client.start_session() - for _ in range(_MAX_END_SESSIONS + 1)] + sessions = [client.start_session() for _ in range(_MAX_END_SESSIONS + 1)] for s in sessions: s.end_session() # Closing the client should end all sessions and clear the pool. - self.assertEqual(len(client._topology._session_pool), - _MAX_END_SESSIONS + 1) + self.assertEqual(len(client._topology._session_pool), _MAX_END_SESSIONS + 1) client.close() self.assertEqual(len(client._topology._session_pool), 0) - end_sessions = [e for e in listener.results['started'] - if e.command_name == 'endSessions'] + end_sessions = [e for e in listener.results["started"] if e.command_name == "endSessions"] self.assertEqual(len(end_sessions), 2) # Closing again should not send any commands. listener.results.clear() client.close() - self.assertEqual(len(listener.results['started']), 0) + self.assertEqual(len(listener.results["started"]), 0) def test_client(self): client = self.client ops: list = [ (client.server_info, [], {}), (client.list_database_names, [], {}), - (client.drop_database, ['pymongo_test'], {}), + (client.drop_database, ["pymongo_test"], {}), ] self._test_ops(client, *ops) @@ -248,12 +241,12 @@ def test_database(self): client = self.client db = client.pymongo_test ops: list = [ - (db.command, ['ping'], {}), - (db.create_collection, ['collection'], {}), + (db.command, ["ping"], {}), + (db.create_collection, ["collection"], {}), (db.list_collection_names, [], {}), - (db.validate_collection, ['collection'], {}), - (db.drop_collection, ['collection'], {}), - (db.dereference, [DBRef('collection', 1)], {}), + (db.validate_collection, ["collection"], {}), + (db.drop_collection, ["collection"], {}), + (db.dereference, [DBRef("collection", 1)], {}), ] self._test_ops(client, *ops) @@ -266,19 +259,19 @@ def collection_write_ops(coll): (coll.insert_one, [{}], {}), (coll.insert_many, [[{}, {}]], {}), (coll.replace_one, [{}, {}], {}), - (coll.update_one, [{}, {'$set': {'a': 1}}], {}), - (coll.update_many, [{}, {'$set': {'a': 1}}], {}), + (coll.update_one, [{}, {"$set": {"a": 1}}], {}), + (coll.update_many, [{}, {"$set": {"a": 1}}], {}), (coll.delete_one, [{}], {}), (coll.delete_many, [{}], {}), (coll.find_one_and_replace, [{}, {}], {}), - (coll.find_one_and_update, [{}, {'$set': {'a': 1}}], {}), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}], {}), (coll.find_one_and_delete, [{}, {}], {}), - (coll.rename, ['collection2'], {}), + (coll.rename, ["collection2"], {}), # Drop collection2 between tests of "rename", above. - (coll.database.drop_collection, ['collection2'], {}), - (coll.create_indexes, [[IndexModel('a')]], {}), - (coll.create_index, ['a'], {}), - (coll.drop_index, ['a_1'], {}), + (coll.database.drop_collection, ["collection2"], {}), + (coll.create_indexes, [[IndexModel("a")]], {}), + (coll.create_index, ["a"], {}), + (coll.drop_index, ["a_1"], {}), (coll.drop_indexes, [], {}), (coll.aggregate, [[{"$out": "aggout"}]], {}), ] @@ -289,15 +282,17 @@ def test_collection(self): # Test some collection methods - the rest are in test_cursor. ops = self.collection_write_ops(coll) - ops.extend([ - (coll.distinct, ['a'], {}), - (coll.find_one, [], {}), - (coll.count_documents, [{}], {}), - (coll.list_indexes, [], {}), - (coll.index_information, [], {}), - (coll.options, [], {}), - (coll.aggregate, [[]], {}), - ]) + ops.extend( + [ + (coll.distinct, ["a"], {}), + (coll.find_one, [], {}), + (coll.count_documents, [{}], {}), + (coll.list_indexes, [], {}), + (coll.index_information, [], {}), + (coll.options, [], {}), + (coll.aggregate, [[]], {}), + ] + ) self._test_ops(client, *ops) @@ -335,29 +330,28 @@ def test_cursor(self): # Test all cursor methods. ops = [ - ('find', lambda session: list(coll.find(session=session))), - ('getitem', lambda session: coll.find(session=session)[0]), - ('distinct', - lambda session: coll.find(session=session).distinct('a')), - ('explain', lambda session: coll.find(session=session).explain()), + ("find", lambda session: list(coll.find(session=session))), + ("getitem", lambda session: coll.find(session=session)[0]), + ("distinct", lambda session: coll.find(session=session).distinct("a")), + ("explain", lambda session: coll.find(session=session).explain()), ] for name, f in ops: with client.start_session() as s: listener.results.clear() f(session=s) - self.assertGreaterEqual(len(listener.results['started']), 1) - for event in listener.results['started']: + self.assertGreaterEqual(len(listener.results["started"]), 1) + for event in listener.results["started"]: self.assertTrue( - 'lsid' in event.command, - "%s sent no lsid with %s" % ( - name, event.command_name)) + "lsid" in event.command, + "%s sent no lsid with %s" % (name, event.command_name), + ) self.assertEqual( s.session_id, - event.command['lsid'], - "%s sent wrong lsid with %s" % ( - name, event.command_name)) + event.command["lsid"], + "%s sent wrong lsid with %s" % (name, event.command_name), + ) with self.assertRaisesRegex(InvalidOperation, "ended session"): f(session=s) @@ -368,67 +362,64 @@ def test_cursor(self): f(session=None) event0 = listener.first_command_started() self.assertTrue( - 'lsid' in event0.command, - "%s sent no lsid with %s" % ( - name, event0.command_name)) + "lsid" in event0.command, "%s sent no lsid with %s" % (name, event0.command_name) + ) - lsid = event0.command['lsid'] + lsid = event0.command["lsid"] - for event in listener.results['started'][1:]: + for event in listener.results["started"][1:]: self.assertTrue( - 'lsid' in event.command, - "%s sent no lsid with %s" % ( - name, event.command_name)) + "lsid" in event.command, "%s sent no lsid with %s" % (name, event.command_name) + ) self.assertEqual( lsid, - event.command['lsid'], - "%s sent wrong lsid with %s" % ( - name, event.command_name)) + event.command["lsid"], + "%s sent wrong lsid with %s" % (name, event.command_name), + ) def test_gridfs(self): client = self.client fs = GridFS(client.pymongo_test) def new_file(session=None): - grid_file = fs.new_file(_id=1, filename='f', session=session) + grid_file = fs.new_file(_id=1, filename="f", session=session) # 1 MB, 5 chunks, to test that each chunk is fetched with same lsid. - grid_file.write(b'a' * 1048576) + grid_file.write(b"a" * 1048576) grid_file.close() def find(session=None): - files = list(fs.find({'_id': 1}, session=session)) + files = list(fs.find({"_id": 1}, session=session)) for f in files: f.read() self._test_ops( client, (new_file, [], {}), - (fs.put, [b'data'], {}), + (fs.put, [b"data"], {}), (lambda session=None: fs.get(1, session=session).read(), [], {}), - (lambda session=None: fs.get_version('f', session=session).read(), - [], {}), - (lambda session=None: - fs.get_last_version('f', session=session).read(), [], {}), + (lambda session=None: fs.get_version("f", session=session).read(), [], {}), + (lambda session=None: fs.get_last_version("f", session=session).read(), [], {}), (fs.list, [], {}), (fs.find_one, [1], {}), (lambda session=None: list(fs.find(session=session)), [], {}), (fs.exists, [1], {}), (find, [], {}), - (fs.delete, [1], {})) + (fs.delete, [1], {}), + ) def test_gridfs_bucket(self): client = self.client bucket = GridFSBucket(client.pymongo_test) def upload(session=None): - stream = bucket.open_upload_stream('f', session=session) - stream.write(b'a' * 1048576) + stream = bucket.open_upload_stream("f", session=session) + stream.write(b"a" * 1048576) stream.close() def upload_with_id(session=None): - stream = bucket.open_upload_stream_with_id(1, 'f1', session=session) - stream.write(b'a' * 1048576) + stream = bucket.open_upload_stream_with_id(1, "f1", session=session) + stream.write(b"a" * 1048576) stream.close() def open_download_stream(session=None): @@ -436,11 +427,11 @@ def open_download_stream(session=None): stream.read() def open_download_stream_by_name(session=None): - stream = bucket.open_download_stream_by_name('f', session=session) + stream = bucket.open_download_stream_by_name("f", session=session) stream.read() def find(session=None): - files = list(bucket.find({'_id': 1}, session=session)) + files = list(bucket.find({"_id": 1}, session=session)) for f in files: f.read() @@ -450,17 +441,18 @@ def find(session=None): client, (upload, [], {}), (upload_with_id, [], {}), - (bucket.upload_from_stream, ['f', b'data'], {}), - (bucket.upload_from_stream_with_id, [2, 'f', b'data'], {}), + (bucket.upload_from_stream, ["f", b"data"], {}), + (bucket.upload_from_stream_with_id, [2, "f", b"data"], {}), (open_download_stream, [], {}), (open_download_stream_by_name, [], {}), (bucket.download_to_stream, [1, sio], {}), - (bucket.download_to_stream_by_name, ['f', sio], {}), + (bucket.download_to_stream_by_name, ["f", sio], {}), (find, [], {}), - (bucket.rename, [1, 'f2'], {}), + (bucket.rename, [1, "f2"], {}), # Delete both files so _test_ops can run these operations twice. (bucket.delete, [1], {}), - (bucket.delete, [2], {})) + (bucket.delete, [2], {}), + ) def test_gridfsbucket_cursor(self): client = self.client @@ -468,7 +460,7 @@ def test_gridfsbucket_cursor(self): for file_id in 1, 2: stream = bucket.open_upload_stream_with_id(file_id, str(file_id)) - stream.write(b'a' * 1048576) + stream.write(b"a" * 1048576) stream.close() with client.start_session() as s: @@ -518,10 +510,7 @@ def test_aggregate(self): coll = client.pymongo_test.collection def agg(session=None): - list(coll.aggregate( - [], - batchSize=2, - session=session)) + list(coll.aggregate([], batchSize=2, session=session)) # With empty collection. self._test_ops(client, (agg, [], {})) @@ -553,11 +542,11 @@ def test_aggregate_error(self): listener.results.clear() with self.assertRaises(OperationFailure): - coll.aggregate([{'$badOperation': {'bar': 1}}]) + coll.aggregate([{"$badOperation": {"bar": 1}}]) event = listener.first_command_started() - self.assertEqual(event.command_name, 'aggregate') - lsid = event.command['lsid'] + self.assertEqual(event.command_name, "aggregate") + lsid = event.command["lsid"] # Session was returned to pool despite error. self.assertIn(lsid, session_ids(client)) @@ -568,7 +557,7 @@ def _test_cursor_helper(self, create_cursor, close_cursor): cursor = create_cursor(coll, None) next(cursor) # Session is "owned" by cursor. - session = getattr(cursor, '_%s__session' % cursor.__class__.__name__) + session = getattr(cursor, "_%s__session" % cursor.__class__.__name__) self.assertIsNotNone(session) lsid = session.session_id next(cursor) @@ -591,45 +580,46 @@ def _test_cursor_helper(self, create_cursor, close_cursor): def test_cursor_close(self): self._test_cursor_helper( - lambda coll, session: coll.find(session=session), - lambda cursor: cursor.close()) + lambda coll, session: coll.find(session=session), lambda cursor: cursor.close() + ) def test_command_cursor_close(self): self._test_cursor_helper( - lambda coll, session: coll.aggregate([], session=session), - lambda cursor: cursor.close()) + lambda coll, session: coll.aggregate([], session=session), lambda cursor: cursor.close() + ) def test_cursor_del(self): self._test_cursor_helper( - lambda coll, session: coll.find(session=session), - lambda cursor: cursor.__del__()) + lambda coll, session: coll.find(session=session), lambda cursor: cursor.__del__() + ) def test_command_cursor_del(self): self._test_cursor_helper( lambda coll, session: coll.aggregate([], session=session), - lambda cursor: cursor.__del__()) + lambda cursor: cursor.__del__(), + ) def test_cursor_exhaust(self): self._test_cursor_helper( - lambda coll, session: coll.find(session=session), - lambda cursor: list(cursor)) + lambda coll, session: coll.find(session=session), lambda cursor: list(cursor) + ) def test_command_cursor_exhaust(self): self._test_cursor_helper( - lambda coll, session: coll.aggregate([], session=session), - lambda cursor: list(cursor)) + lambda coll, session: coll.aggregate([], session=session), lambda cursor: list(cursor) + ) def test_cursor_limit_reached(self): self._test_cursor_helper( - lambda coll, session: coll.find(limit=4, batch_size=2, - session=session), - lambda cursor: list(cursor)) + lambda coll, session: coll.find(limit=4, batch_size=2, session=session), + lambda cursor: list(cursor), + ) def test_command_cursor_limit_reached(self): self._test_cursor_helper( - lambda coll, session: coll.aggregate([], batchSize=900, - session=session), - lambda cursor: list(cursor)) + lambda coll, session: coll.aggregate([], batchSize=900, session=session), + lambda cursor: list(cursor), + ) def _test_unacknowledged_ops(self, client, *ops): listener = client.options.event_listeners[0] @@ -640,23 +630,23 @@ def _test_unacknowledged_ops(self, client, *ops): # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) - kw['session'] = s + kw["session"] = s with self.assertRaises( - ConfigurationError, - msg="%s did not raise ConfigurationError" % ( - f.__name__,)): + ConfigurationError, msg="%s did not raise ConfigurationError" % (f.__name__,) + ): f(*args, **kw) - if f.__name__ == 'create_collection': + if f.__name__ == "create_collection": # create_collection runs listCollections first. - event = listener.results['started'].pop(0) - self.assertEqual('listCollections', event.command_name) - self.assertIn('lsid', event.command, - "%s sent no lsid with %s" % ( - f.__name__, event.command_name)) + event = listener.results["started"].pop(0) + self.assertEqual("listCollections", event.command_name) + self.assertIn( + "lsid", + event.command, + "%s sent no lsid with %s" % (f.__name__, event.command_name), + ) # Should not run any command before raising an error. - self.assertFalse(listener.results['started'], - "%s sent command" % (f.__name__,)) + self.assertFalse(listener.results["started"], "%s sent command" % (f.__name__,)) self.assertTrue(s.has_ended) @@ -664,20 +654,22 @@ def _test_unacknowledged_ops(self, client, *ops): for f, args, kw in ops: listener.results.clear() f(*args, **kw) - self.assertGreaterEqual(len(listener.results['started']), 1) + self.assertGreaterEqual(len(listener.results["started"]), 1) - if f.__name__ == 'create_collection': + if f.__name__ == "create_collection": # create_collection runs listCollections first. - event = listener.results['started'].pop(0) - self.assertEqual('listCollections', event.command_name) - self.assertIn('lsid', event.command, - "%s sent no lsid with %s" % ( - f.__name__, event.command_name)) - - for event in listener.results['started']: - self.assertNotIn('lsid', event.command, - "%s sent lsid with %s" % ( - f.__name__, event.command_name)) + event = listener.results["started"].pop(0) + self.assertEqual("listCollections", event.command_name) + self.assertIn( + "lsid", + event.command, + "%s sent no lsid with %s" % (f.__name__, event.command_name), + ) + + for event in listener.results["started"]: + self.assertNotIn( + "lsid", event.command, "%s sent lsid with %s" % (f.__name__, event.command_name) + ) def test_unacknowledged_writes(self): # Ensure the collection exists. @@ -688,8 +680,8 @@ def test_unacknowledged_writes(self): coll = db.test_unacked_writes ops: list = [ (client.drop_database, [db.name], {}), - (db.create_collection, ['collection'], {}), - (db.drop_collection, ['collection'], {}), + (db.create_collection, ["collection"], {}), + (db.drop_collection, ["collection"], {}), ] ops.extend(self.collection_write_ops(coll)) self._test_unacknowledged_ops(client, *ops) @@ -705,21 +697,17 @@ def drop_db(): return False raise - wait_until(drop_db, 'dropped database after w=0 writes') + wait_until(drop_db, "dropped database after w=0 writes") def test_snapshot_incompatible_with_causal_consistency(self): - with self.client.start_session(causal_consistency=False, - snapshot=False): + with self.client.start_session(causal_consistency=False, snapshot=False): pass - with self.client.start_session(causal_consistency=False, - snapshot=True): + with self.client.start_session(causal_consistency=False, snapshot=True): pass - with self.client.start_session(causal_consistency=True, - snapshot=False): + with self.client.start_session(causal_consistency=True, snapshot=False): pass with self.assertRaises(ConfigurationError): - with self.client.start_session(causal_consistency=True, - snapshot=True): + with self.client.start_session(causal_consistency=True, snapshot=True): pass def test_session_not_copyable(self): @@ -727,6 +715,7 @@ def test_session_not_copyable(self): with client.start_session() as s: self.assertRaises(TypeError, lambda: copy.copy(s)) + class TestCausalConsistency(unittest.TestCase): listener: SessionTestListener client: MongoClient @@ -751,33 +740,32 @@ def test_core(self): self.assertIsNone(sess.operation_time) self.listener.results.clear() self.client.pymongo_test.test.find_one(session=sess) - started = self.listener.results['started'][0] + started = self.listener.results["started"][0] cmd = started.command - self.assertIsNone(cmd.get('readConcern')) + self.assertIsNone(cmd.get("readConcern")) op_time = sess.operation_time self.assertIsNotNone(op_time) - succeeded = self.listener.results['succeeded'][0] + succeeded = self.listener.results["succeeded"][0] reply = succeeded.reply - self.assertEqual(op_time, reply.get('operationTime')) + self.assertEqual(op_time, reply.get("operationTime")) # No explicit session self.client.pymongo_test.test.insert_one({}) self.assertEqual(sess.operation_time, op_time) self.listener.results.clear() try: - self.client.pymongo_test.command('doesntexist', session=sess) + self.client.pymongo_test.command("doesntexist", session=sess) except: pass - failed = self.listener.results['failed'][0] - failed_op_time = failed.failure.get('operationTime') + failed = self.listener.results["failed"][0] + failed_op_time = failed.failure.get("operationTime") # Some older builds of MongoDB 3.5 / 3.6 return None for # operationTime when a command fails. Make sure we don't # change operation_time to None. if failed_op_time is None: self.assertIsNotNone(sess.operation_time) else: - self.assertEqual( - sess.operation_time, failed_op_time) + self.assertEqual(sess.operation_time, failed_op_time) with self.client.start_session() as sess2: self.assertIsNone(sess2.cluster_time) @@ -805,36 +793,32 @@ def _test_reads(self, op, exception=None): op(coll, sess) else: op(coll, sess) - act = self.listener.results['started'][0].command.get( - 'readConcern', {}).get('afterClusterTime') + act = ( + self.listener.results["started"][0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) self.assertEqual(operation_time, act) @client_context.require_no_standalone def test_reads(self): # Make sure the collection exists. self.client.pymongo_test.test.insert_one({}) + self._test_reads(lambda coll, session: list(coll.aggregate([], session=session))) + self._test_reads(lambda coll, session: list(coll.find({}, session=session))) + self._test_reads(lambda coll, session: coll.find_one({}, session=session)) + self._test_reads(lambda coll, session: coll.count_documents({}, session=session)) + self._test_reads(lambda coll, session: coll.distinct("foo", session=session)) self._test_reads( - lambda coll, session: list(coll.aggregate([], session=session))) - self._test_reads( - lambda coll, session: list(coll.find({}, session=session))) - self._test_reads( - lambda coll, session: coll.find_one({}, session=session)) - self._test_reads( - lambda coll, session: coll.count_documents({}, session=session)) - self._test_reads( - lambda coll, session: coll.distinct('foo', session=session)) - self._test_reads( - lambda coll, session: list(coll.aggregate_raw_batches( - [], session=session))) - self._test_reads( - lambda coll, session: list(coll.find_raw_batches( - {}, session=session))) + lambda coll, session: list(coll.aggregate_raw_batches([], session=session)) + ) + self._test_reads(lambda coll, session: list(coll.find_raw_batches({}, session=session))) self.assertRaises( ConfigurationError, self._test_reads, - lambda coll, session: coll.estimated_document_count( - session=session)) + lambda coll, session: coll.estimated_document_count(session=session), + ) def _test_writes(self, op): coll = self.client.pymongo_test.test @@ -844,50 +828,46 @@ def _test_writes(self, op): self.assertIsNotNone(operation_time) self.listener.results.clear() coll.find_one({}, session=sess) - act = self.listener.results['started'][0].command.get( - 'readConcern', {}).get('afterClusterTime') + act = ( + self.listener.results["started"][0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) self.assertEqual(operation_time, act) @client_context.require_no_standalone def test_writes(self): + self._test_writes(lambda coll, session: coll.bulk_write([InsertOne({})], session=session)) + self._test_writes(lambda coll, session: coll.insert_one({}, session=session)) + self._test_writes(lambda coll, session: coll.insert_many([{}], session=session)) self._test_writes( - lambda coll, session: coll.bulk_write( - [InsertOne({})], session=session)) - self._test_writes( - lambda coll, session: coll.insert_one({}, session=session)) + lambda coll, session: coll.replace_one({"_id": 1}, {"x": 1}, session=session) + ) self._test_writes( - lambda coll, session: coll.insert_many([{}], session=session)) + lambda coll, session: coll.update_one({}, {"$set": {"X": 1}}, session=session) + ) self._test_writes( - lambda coll, session: coll.replace_one( - {'_id': 1}, {'x': 1}, session=session)) + lambda coll, session: coll.update_many({}, {"$set": {"x": 1}}, session=session) + ) + self._test_writes(lambda coll, session: coll.delete_one({}, session=session)) + self._test_writes(lambda coll, session: coll.delete_many({}, session=session)) self._test_writes( - lambda coll, session: coll.update_one( - {}, {'$set': {'X': 1}}, session=session)) - self._test_writes( - lambda coll, session: coll.update_many( - {}, {'$set': {'x': 1}}, session=session)) - self._test_writes( - lambda coll, session: coll.delete_one({}, session=session)) - self._test_writes( - lambda coll, session: coll.delete_many({}, session=session)) - self._test_writes( - lambda coll, session: coll.find_one_and_replace( - {'x': 1}, {'y': 1}, session=session)) + lambda coll, session: coll.find_one_and_replace({"x": 1}, {"y": 1}, session=session) + ) self._test_writes( lambda coll, session: coll.find_one_and_update( - {'y': 1}, {'$set': {'x': 1}}, session=session)) - self._test_writes( - lambda coll, session: coll.find_one_and_delete( - {'x': 1}, session=session)) - self._test_writes( - lambda coll, session: coll.create_index("foo", session=session)) + {"y": 1}, {"$set": {"x": 1}}, session=session + ) + ) + self._test_writes(lambda coll, session: coll.find_one_and_delete({"x": 1}, session=session)) + self._test_writes(lambda coll, session: coll.create_index("foo", session=session)) self._test_writes( lambda coll, session: coll.create_indexes( - [IndexModel([("bar", ASCENDING)])], session=session)) - self._test_writes( - lambda coll, session: coll.drop_index("foo_1", session=session)) - self._test_writes( - lambda coll, session: coll.drop_indexes(session=session)) + [IndexModel([("bar", ASCENDING)])], session=session + ) + ) + self._test_writes(lambda coll, session: coll.drop_index("foo_1", session=session)) + self._test_writes(lambda coll, session: coll.drop_indexes(session=session)) def _test_no_read_concern(self, op): coll = self.client.pymongo_test.test @@ -897,61 +877,56 @@ def _test_no_read_concern(self, op): self.assertIsNotNone(operation_time) self.listener.results.clear() op(coll, sess) - rc = self.listener.results['started'][0].command.get( - 'readConcern') + rc = self.listener.results["started"][0].command.get("readConcern") self.assertIsNone(rc) @client_context.require_no_standalone def test_writes_do_not_include_read_concern(self): self._test_no_read_concern( - lambda coll, session: coll.bulk_write( - [InsertOne({})], session=session)) - self._test_no_read_concern( - lambda coll, session: coll.insert_one({}, session=session)) + lambda coll, session: coll.bulk_write([InsertOne({})], session=session) + ) + self._test_no_read_concern(lambda coll, session: coll.insert_one({}, session=session)) + self._test_no_read_concern(lambda coll, session: coll.insert_many([{}], session=session)) self._test_no_read_concern( - lambda coll, session: coll.insert_many([{}], session=session)) + lambda coll, session: coll.replace_one({"_id": 1}, {"x": 1}, session=session) + ) self._test_no_read_concern( - lambda coll, session: coll.replace_one( - {'_id': 1}, {'x': 1}, session=session)) + lambda coll, session: coll.update_one({}, {"$set": {"X": 1}}, session=session) + ) self._test_no_read_concern( - lambda coll, session: coll.update_one( - {}, {'$set': {'X': 1}}, session=session)) + lambda coll, session: coll.update_many({}, {"$set": {"x": 1}}, session=session) + ) + self._test_no_read_concern(lambda coll, session: coll.delete_one({}, session=session)) + self._test_no_read_concern(lambda coll, session: coll.delete_many({}, session=session)) self._test_no_read_concern( - lambda coll, session: coll.update_many( - {}, {'$set': {'x': 1}}, session=session)) - self._test_no_read_concern( - lambda coll, session: coll.delete_one({}, session=session)) - self._test_no_read_concern( - lambda coll, session: coll.delete_many({}, session=session)) - self._test_no_read_concern( - lambda coll, session: coll.find_one_and_replace( - {'x': 1}, {'y': 1}, session=session)) + lambda coll, session: coll.find_one_and_replace({"x": 1}, {"y": 1}, session=session) + ) self._test_no_read_concern( lambda coll, session: coll.find_one_and_update( - {'y': 1}, {'$set': {'x': 1}}, session=session)) - self._test_no_read_concern( - lambda coll, session: coll.find_one_and_delete( - {'x': 1}, session=session)) + {"y": 1}, {"$set": {"x": 1}}, session=session + ) + ) self._test_no_read_concern( - lambda coll, session: coll.create_index("foo", session=session)) + lambda coll, session: coll.find_one_and_delete({"x": 1}, session=session) + ) + self._test_no_read_concern(lambda coll, session: coll.create_index("foo", session=session)) self._test_no_read_concern( lambda coll, session: coll.create_indexes( - [IndexModel([("bar", ASCENDING)])], session=session)) - self._test_no_read_concern( - lambda coll, session: coll.drop_index("foo_1", session=session)) - self._test_no_read_concern( - lambda coll, session: coll.drop_indexes(session=session)) + [IndexModel([("bar", ASCENDING)])], session=session + ) + ) + self._test_no_read_concern(lambda coll, session: coll.drop_index("foo_1", session=session)) + self._test_no_read_concern(lambda coll, session: coll.drop_indexes(session=session)) # Not a write, but explain also doesn't support readConcern. - self._test_no_read_concern( - lambda coll, session: coll.find({}, session=session).explain()) + self._test_no_read_concern(lambda coll, session: coll.find({}, session=session).explain()) @client_context.require_no_standalone @client_context.require_version_max(4, 1, 0) def test_aggregate_out_does_not_include_read_concern(self): self._test_no_read_concern( - lambda coll, session: list( - coll.aggregate([{"$out": "aggout"}], session=session))) + lambda coll, session: list(coll.aggregate([{"$out": "aggout"}], session=session)) + ) @client_context.require_no_standalone def test_get_more_does_not_include_read_concern(self): @@ -965,17 +940,20 @@ def test_get_more_does_not_include_read_concern(self): next(cursor) self.listener.results.clear() list(cursor) - started = self.listener.results['started'][0] - self.assertEqual(started.command_name, 'getMore') - self.assertIsNone(started.command.get('readConcern')) + started = self.listener.results["started"][0] + self.assertEqual(started.command_name, "getMore") + self.assertIsNone(started.command.get("readConcern")) def test_session_not_causal(self): with self.client.start_session(causal_consistency=False) as s: self.client.pymongo_test.test.insert_one({}, session=s) self.listener.results.clear() self.client.pymongo_test.test.find_one({}, session=s) - act = self.listener.results['started'][0].command.get( - 'readConcern', {}).get('afterClusterTime') + act = ( + self.listener.results["started"][0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) self.assertIsNone(act) @client_context.require_standalone @@ -984,8 +962,11 @@ def test_server_not_causal(self): self.client.pymongo_test.test.insert_one({}, session=s) self.listener.results.clear() self.client.pymongo_test.test.find_one({}, session=s) - act = self.listener.results['started'][0].command.get( - 'readConcern', {}).get('afterClusterTime') + act = ( + self.listener.results["started"][0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) self.assertIsNone(act) @client_context.require_no_standalone @@ -996,28 +977,25 @@ def test_read_concern(self): coll.insert_one({}, session=s) self.listener.results.clear() coll.find_one({}, session=s) - read_concern = self.listener.results['started'][0].command.get( - 'readConcern') + read_concern = self.listener.results["started"][0].command.get("readConcern") self.assertIsNotNone(read_concern) - self.assertIsNone(read_concern.get('level')) - self.assertIsNotNone(read_concern.get('afterClusterTime')) + self.assertIsNone(read_concern.get("level")) + self.assertIsNotNone(read_concern.get("afterClusterTime")) coll = coll.with_options(read_concern=ReadConcern("majority")) self.listener.results.clear() coll.find_one({}, session=s) - read_concern = self.listener.results['started'][0].command.get( - 'readConcern') + read_concern = self.listener.results["started"][0].command.get("readConcern") self.assertIsNotNone(read_concern) - self.assertEqual(read_concern.get('level'), 'majority') - self.assertIsNotNone(read_concern.get('afterClusterTime')) + self.assertEqual(read_concern.get("level"), "majority") + self.assertIsNotNone(read_concern.get("afterClusterTime")) @client_context.require_no_standalone def test_cluster_time_with_server_support(self): self.client.pymongo_test.test.insert_one({}) self.listener.results.clear() self.client.pymongo_test.test.find_one({}) - after_cluster_time = self.listener.results['started'][0].command.get( - '$clusterTime') + after_cluster_time = self.listener.results["started"][0].command.get("$clusterTime") self.assertIsNotNone(after_cluster_time) @client_context.require_standalone @@ -1025,22 +1003,20 @@ def test_cluster_time_no_server_support(self): self.client.pymongo_test.test.insert_one({}) self.listener.results.clear() self.client.pymongo_test.test.find_one({}) - after_cluster_time = self.listener.results['started'][0].command.get( - '$clusterTime') + after_cluster_time = self.listener.results["started"][0].command.get("$clusterTime") self.assertIsNone(after_cluster_time) class TestClusterTime(IntegrationTest): def setUp(self): super(TestClusterTime, self).setUp() - if '$clusterTime' not in client_context.hello: - raise SkipTest('$clusterTime not supported') + if "$clusterTime" not in client_context.hello: + raise SkipTest("$clusterTime not supported") def test_cluster_time(self): listener = SessionTestListener() # Prevent heartbeats from updating $clusterTime between operations. - client = rs_or_single_client(event_listeners=[listener], - heartbeatFrequencyMS=999999) + client = rs_or_single_client(event_listeners=[listener], heartbeatFrequencyMS=999999) self.addCleanup(client.close) collection = client.pymongo_test.collection # Prepare for tests of find() and aggregate(). @@ -1051,7 +1027,7 @@ def test_cluster_time(self): def rename_and_drop(): # Ensure collection exists. collection.insert_one({}) - collection.rename('collection2') + collection.rename("collection2") client.pymongo_test.collection2.drop() def insert_and_find(): @@ -1074,22 +1050,19 @@ def insert_and_aggregate(): ops = [ # Tests from Driver Sessions Spec. - ('ping', lambda: client.admin.command('ping')), - ('aggregate', lambda: list(collection.aggregate([]))), - ('find', lambda: list(collection.find())), - ('insert_one', lambda: collection.insert_one({})), - + ("ping", lambda: client.admin.command("ping")), + ("aggregate", lambda: list(collection.aggregate([]))), + ("find", lambda: list(collection.find())), + ("insert_one", lambda: collection.insert_one({})), # Additional PyMongo tests. - ('insert_and_find', insert_and_find), - ('insert_and_aggregate', insert_and_aggregate), - ('update_one', - lambda: collection.update_one({}, {'$set': {'x': 1}})), - ('update_many', - lambda: collection.update_many({}, {'$set': {'x': 1}})), - ('delete_one', lambda: collection.delete_one({})), - ('delete_many', lambda: collection.delete_many({})), - ('bulk_write', lambda: collection.bulk_write([InsertOne({})])), - ('rename_and_drop', rename_and_drop), + ("insert_and_find", insert_and_find), + ("insert_and_aggregate", insert_and_aggregate), + ("update_one", lambda: collection.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: collection.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: collection.delete_one({})), + ("delete_many", lambda: collection.delete_many({})), + ("bulk_write", lambda: collection.bulk_write([InsertOne({})])), + ("rename_and_drop", rename_and_drop), ] for name, f in ops: @@ -1100,48 +1073,48 @@ def insert_and_aggregate(): collection.insert_one({}) f() - self.assertGreaterEqual(len(listener.results['started']), 1) - for i, event in enumerate(listener.results['started']): + self.assertGreaterEqual(len(listener.results["started"]), 1) + for i, event in enumerate(listener.results["started"]): self.assertTrue( - '$clusterTime' in event.command, - "%s sent no $clusterTime with %s" % ( - f.__name__, event.command_name)) + "$clusterTime" in event.command, + "%s sent no $clusterTime with %s" % (f.__name__, event.command_name), + ) if i > 0: - succeeded = listener.results['succeeded'][i - 1] + succeeded = listener.results["succeeded"][i - 1] self.assertTrue( - '$clusterTime' in succeeded.reply, - "%s received no $clusterTime with %s" % ( - f.__name__, succeeded.command_name)) + "$clusterTime" in succeeded.reply, + "%s received no $clusterTime with %s" + % (f.__name__, succeeded.command_name), + ) self.assertTrue( - event.command['$clusterTime']['clusterTime'] >= - succeeded.reply['$clusterTime']['clusterTime'], - "%s sent wrong $clusterTime with %s" % ( - f.__name__, event.command_name)) + event.command["$clusterTime"]["clusterTime"] + >= succeeded.reply["$clusterTime"]["clusterTime"], + "%s sent wrong $clusterTime with %s" % (f.__name__, event.command_name), + ) class TestSpec(SpecRunner): RUN_ON_SERVERLESS = True # Location of JSON test specifications. - TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'sessions', 'legacy') + TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sessions", "legacy") def last_two_command_events(self): """Return the last two command started events.""" - started_events = self.listener.results['started'][-2:] + started_events = self.listener.results["started"][-2:] self.assertEqual(2, len(started_events)) return started_events def assert_same_lsid_on_last_two_commands(self): """Run the assertSameLsidOnLastTwoCommands test operation.""" event1, event2 = self.last_two_command_events() - self.assertEqual(event1.command['lsid'], event2.command['lsid']) + self.assertEqual(event1.command["lsid"], event2.command["lsid"]) def assert_different_lsid_on_last_two_commands(self): """Run the assertDifferentLsidOnLastTwoCommands test operation.""" event1, event2 = self.last_two_command_events() - self.assertNotEqual(event1.command['lsid'], event2.command['lsid']) + self.assertNotEqual(event1.command["lsid"], event2.command["lsid"]) def assert_session_dirty(self, session): """Run the assertSessionDirty test operation. diff --git a/test/test_sessions_unified.py b/test/test_sessions_unified.py index fe25536e7e..2320d52718 100644 --- a/test/test_sessions_unified.py +++ b/test/test_sessions_unified.py @@ -23,8 +23,7 @@ from test.unified_format import generate_test_classes # Location of JSON test specifications. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'sessions', 'unified') +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sessions", "unified") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/test/test_son.py b/test/test_son.py index edddd6b8b8..69beb81439 100644 --- a/test/test_son.py +++ b/test/test_son.py @@ -21,9 +21,11 @@ sys.path[0:0] = [""] -from bson.son import SON -from test import unittest from collections import OrderedDict +from test import unittest + +from bson.son import SON + class TestSON(unittest.TestCase): def test_ordered_dict(self): @@ -31,9 +33,9 @@ def test_ordered_dict(self): a1["hello"] = "world" a1["mike"] = "awesome" a1["hello_"] = "mike" - self.assertEqual(list(a1.items()), [("hello", "world"), - ("mike", "awesome"), - ("hello_", "mike")]) + self.assertEqual( + list(a1.items()), [("hello", "world"), ("mike", "awesome"), ("hello_", "mike")] + ) b2 = SON({"hello": "world"}) self.assertEqual(b2["hello"], "world") @@ -41,38 +43,28 @@ def test_ordered_dict(self): def test_equality(self): a1 = SON({"hello": "world"}) - b2 = SON((('hello', 'world'), ('mike', 'awesome'), ('hello_', 'mike'))) + b2 = SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike"))) self.assertEqual(a1, SON({"hello": "world"})) - self.assertEqual(b2, SON((('hello', 'world'), - ('mike', 'awesome'), - ('hello_', 'mike')))) - self.assertEqual(b2, dict((('hello_', 'mike'), - ('mike', 'awesome'), - ('hello', 'world')))) + self.assertEqual(b2, SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike")))) + self.assertEqual(b2, dict((("hello_", "mike"), ("mike", "awesome"), ("hello", "world")))) self.assertNotEqual(a1, b2) - self.assertNotEqual(b2, SON((('hello_', 'mike'), - ('mike', 'awesome'), - ('hello', 'world')))) + self.assertNotEqual(b2, SON((("hello_", "mike"), ("mike", "awesome"), ("hello", "world")))) # Explicitly test inequality self.assertFalse(a1 != SON({"hello": "world"})) - self.assertFalse(b2 != SON((('hello', 'world'), - ('mike', 'awesome'), - ('hello_', 'mike')))) - self.assertFalse(b2 != dict((('hello_', 'mike'), - ('mike', 'awesome'), - ('hello', 'world')))) + self.assertFalse(b2 != SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike")))) + self.assertFalse(b2 != dict((("hello_", "mike"), ("mike", "awesome"), ("hello", "world")))) # Embedded SON. - d4 = SON([('blah', {'foo': SON()})]) - self.assertEqual(d4, {'blah': {'foo': {}}}) - self.assertEqual(d4, {'blah': {'foo': SON()}}) - self.assertNotEqual(d4, {'blah': {'foo': []}}) + d4 = SON([("blah", {"foo": SON()})]) + self.assertEqual(d4, {"blah": {"foo": {}}}) + self.assertEqual(d4, {"blah": {"foo": SON()}}) + self.assertNotEqual(d4, {"blah": {"foo": []}}) # Original data unaffected. - self.assertEqual(SON, d4['blah']['foo'].__class__) + self.assertEqual(SON, d4["blah"]["foo"].__class__) def test_to_dict(self): a1 = SON() @@ -89,19 +81,17 @@ def test_to_dict(self): self.assertEqual(dict, d4.to_dict()["blah"]["foo"].__class__) # Original data unaffected. - self.assertEqual(SON, d4['blah']['foo'].__class__) + self.assertEqual(SON, d4["blah"]["foo"].__class__) def test_pickle(self): simple_son = SON([]) - complex_son = SON([('son', simple_son), - ('list', [simple_son, simple_son])]) + complex_son = SON([("son", simple_son), ("list", [simple_son, simple_son])]) for protocol in range(pickle.HIGHEST_PROTOCOL + 1): - pickled = pickle.loads(pickle.dumps(complex_son, - protocol=protocol)) - self.assertEqual(pickled['son'], pickled['list'][0]) - self.assertEqual(pickled['son'], pickled['list'][1]) + pickled = pickle.loads(pickle.dumps(complex_son, protocol=protocol)) + self.assertEqual(pickled["son"], pickled["list"][0]) + self.assertEqual(pickled["son"], pickled["list"][1]) def test_pickle_backwards_compatability(self): # This string was generated by pickling a SON object in pymongo @@ -109,16 +99,16 @@ def test_pickle_backwards_compatability(self): pickled_with_2_1_1 = ( "ccopy_reg\n_reconstructor\np0\n(cbson.son\nSON\np1\n" "c__builtin__\ndict\np2\n(dp3\ntp4\nRp5\n(dp6\n" - "S'_SON__keys'\np7\n(lp8\nsb.").encode('utf8') + "S'_SON__keys'\np7\n(lp8\nsb." + ).encode("utf8") son_2_1_1 = pickle.loads(pickled_with_2_1_1) self.assertEqual(son_2_1_1, SON([])) def test_copying(self): simple_son = SON([]) - complex_son = SON([('son', simple_son), - ('list', [simple_son, simple_son])]) + complex_son = SON([("son", simple_son), ("list", [simple_son, simple_son])]) regex_son = SON([("x", re.compile("^hello.*"))]) - reflexive_son = SON([('son', simple_son)]) + reflexive_son = SON([("son", simple_son)]) reflexive_son["reflexive"] = reflexive_son simple_son1 = copy.copy(simple_son) @@ -196,8 +186,10 @@ def test_keys(self): try: d - i().keys() except TypeError: - self.fail("SON().keys() is not returning an object compatible " - "with %s objects" % (str(i))) + self.fail( + "SON().keys() is not returning an object compatible " + "with %s objects" % (str(i)) + ) # Test to verify correctness d = SON({"k": "v"}).keys() for i in [OrderedDict, dict]: @@ -205,5 +197,6 @@ def test_keys(self): for i in [OrderedDict, dict]: self.assertEqual(d - i({"k": 0}).keys(), set()) + if __name__ == "__main__": unittest.main() diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 64581d83b7..6c240d7a78 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -15,29 +15,31 @@ """Run the SRV support tests.""" import sys - from time import sleep from typing import Any sys.path[0:0] = [""] -import pymongo +from test import client_knobs, unittest +from test.utils import FunctionCallRecorder, wait_until +import pymongo from pymongo import common from pymongo.errors import ConfigurationError -from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.mongo_client import MongoClient -from test import client_knobs, unittest -from test.utils import wait_until, FunctionCallRecorder - +from pymongo.srv_resolver import _HAVE_DNSPYTHON WAIT_TIME = 0.1 class SrvPollingKnobs(object): - def __init__(self, ttl_time=None, min_srv_rescan_interval=None, - nodelist_callback=None, - count_resolver_calls=False): + def __init__( + self, + ttl_time=None, + min_srv_rescan_interval=None, + nodelist_callback=None, + count_resolver_calls=False, + ): self.ttl_time = ttl_time self.min_srv_rescan_interval = min_srv_rescan_interval self.nodelist_callback = nodelist_callback @@ -48,8 +50,7 @@ def __init__(self, ttl_time=None, min_srv_rescan_interval=None, def enable(self): self.old_min_srv_rescan_interval = common.MIN_SRV_RESCAN_INTERVAL - self.old_dns_resolver_response = \ - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl + self.old_dns_resolver_response = pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl if self.min_srv_rescan_interval is not None: common.MIN_SRV_RESCAN_INTERVAL = self.min_srv_rescan_interval @@ -88,18 +89,20 @@ class TestSrvPolling(unittest.TestCase): BASE_SRV_RESPONSE = [ ("localhost.test.build.10gen.cc", 27017), - ("localhost.test.build.10gen.cc", 27018)] + ("localhost.test.build.10gen.cc", 27018), + ] CONNECTION_STRING = "mongodb+srv://test1.test.build.10gen.cc" def setUp(self): if not _HAVE_DNSPYTHON: - raise unittest.SkipTest("SRV polling tests require the dnspython " - "module") + raise unittest.SkipTest("SRV polling tests require the dnspython " "module") # Patch timeouts to ensure short rescan SRV interval. self.client_knobs = client_knobs( - heartbeat_frequency=WAIT_TIME, min_heartbeat_interval=WAIT_TIME, - events_queue_frequency=WAIT_TIME) + heartbeat_frequency=WAIT_TIME, + min_heartbeat_interval=WAIT_TIME, + events_queue_frequency=WAIT_TIME, + ) self.client_knobs.enable() def tearDown(self): @@ -112,13 +115,14 @@ def assert_nodelist_change(self, expected_nodelist, client): """Check if the client._topology eventually sees all nodes in the expected_nodelist. """ + def predicate(): nodelist = self.get_nodelist(client) if set(expected_nodelist) == set(nodelist): return True return False - wait_until(predicate, "see expected nodelist", - timeout=100*WAIT_TIME) + + wait_until(predicate, "see expected nodelist", timeout=100 * WAIT_TIME) def assert_nodelist_nochange(self, expected_nodelist, client): """Check if the client._topology ever deviates from seeing all nodes @@ -126,20 +130,23 @@ def assert_nodelist_nochange(self, expected_nodelist, client): (WAIT_TIME * 10) seconds. Also check that the resolver is called at least once. """ - sleep(WAIT_TIME*10) + sleep(WAIT_TIME * 10) nodelist = self.get_nodelist(client) if set(expected_nodelist) != set(nodelist): msg = "Client nodelist %s changed unexpectedly (expected %s)" raise self.fail(msg % (nodelist, expected_nodelist)) self.assertGreaterEqual( pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count, # type: ignore - 1, "resolver was never called") + 1, + "resolver was never called", + ) return True def run_scenario(self, dns_response, expect_change): if callable(dns_response): dns_resolver_response = dns_response else: + def dns_resolver_response(): return dns_response @@ -153,34 +160,29 @@ def dns_resolver_response(): expected_response = self.BASE_SRV_RESPONSE # Patch timeouts to ensure short test running times. - with SrvPollingKnobs( - ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = MongoClient(self.CONNECTION_STRING) self.assert_nodelist_change(self.BASE_SRV_RESPONSE, client) # Patch list of hosts returned by DNS query. with SrvPollingKnobs( - nodelist_callback=dns_resolver_response, - count_resolver_calls=count_resolver_calls): + nodelist_callback=dns_resolver_response, count_resolver_calls=count_resolver_calls + ): assertion_method(expected_response, client) def test_addition(self): response = self.BASE_SRV_RESPONSE[:] - response.append( - ("localhost.test.build.10gen.cc", 27019)) + response.append(("localhost.test.build.10gen.cc", 27019)) self.run_scenario(response, True) def test_removal(self): response = self.BASE_SRV_RESPONSE[:] - response.remove( - ("localhost.test.build.10gen.cc", 27018)) + response.remove(("localhost.test.build.10gen.cc", 27018)) self.run_scenario(response, True) def test_replace_one(self): response = self.BASE_SRV_RESPONSE[:] - response.remove( - ("localhost.test.build.10gen.cc", 27018)) - response.append( - ("localhost.test.build.10gen.cc", 27019)) + response.remove(("localhost.test.build.10gen.cc", 27018)) + response.append(("localhost.test.build.10gen.cc", 27019)) self.run_scenario(response, True) def test_replace_both_with_one(self): @@ -188,15 +190,20 @@ def test_replace_both_with_one(self): self.run_scenario(response, True) def test_replace_both_with_two(self): - response = [("localhost.test.build.10gen.cc", 27019), - ("localhost.test.build.10gen.cc", 27020)] + response = [ + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] self.run_scenario(response, True) def test_dns_failures(self): from dns import exception + for exc in (exception.FormError, exception.TooBig, exception.Timeout): + def response_callback(*args): raise exc("DNS Failure!") + self.run_scenario(response_callback, False) def test_dns_record_lookup_empty(self): @@ -207,89 +214,95 @@ def _test_recover_from_initial(self, initial_callback): # Construct a valid final response callback distinct from base. response_final = self.BASE_SRV_RESPONSE[:] response_final.pop() + def final_callback(): return response_final with SrvPollingKnobs( - ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME, - nodelist_callback=initial_callback, - count_resolver_calls=True): + ttl_time=WAIT_TIME, + min_srv_rescan_interval=WAIT_TIME, + nodelist_callback=initial_callback, + count_resolver_calls=True, + ): # Client uses unpatched method to get initial nodelist client = MongoClient(self.CONNECTION_STRING) # Invalid DNS resolver response should not change nodelist. self.assert_nodelist_nochange(self.BASE_SRV_RESPONSE, client) with SrvPollingKnobs( - ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME, - nodelist_callback=final_callback): + ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME, nodelist_callback=final_callback + ): # Nodelist should reflect new valid DNS resolver response. self.assert_nodelist_change(response_final, client) def test_recover_from_initially_empty_seedlist(self): def empty_seedlist(): return [] + self._test_recover_from_initial(empty_seedlist) def test_recover_from_initially_erroring_seedlist(self): def erroring_seedlist(): raise ConfigurationError + self._test_recover_from_initial(erroring_seedlist) def test_10_all_dns_selected(self): - response = [("localhost.test.build.10gen.cc", 27017), - ("localhost.test.build.10gen.cc", 27019), - ("localhost.test.build.10gen.cc", 27020)] + response = [ + ("localhost.test.build.10gen.cc", 27017), + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] def nodelist_callback(): return response - with SrvPollingKnobs(ttl_time=WAIT_TIME, - min_srv_rescan_interval=WAIT_TIME): + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=0) self.addCleanup(client.close) with SrvPollingKnobs(nodelist_callback=nodelist_callback): self.assert_nodelist_change(response, client) def test_11_all_dns_selected(self): - response = [("localhost.test.build.10gen.cc", 27019), - ("localhost.test.build.10gen.cc", 27020)] + response = [ + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] def nodelist_callback(): return response - with SrvPollingKnobs( - ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=2) self.addCleanup(client.close) with SrvPollingKnobs(nodelist_callback=nodelist_callback): self.assert_nodelist_change(response, client) def test_12_new_dns_randomly_selected(self): - response = [("localhost.test.build.10gen.cc", 27020), - ("localhost.test.build.10gen.cc", 27019), - ("localhost.test.build.10gen.cc", 27017)] + response = [ + ("localhost.test.build.10gen.cc", 27020), + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27017), + ] def nodelist_callback(): return response - with SrvPollingKnobs( - ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=2) self.addCleanup(client.close) with SrvPollingKnobs(nodelist_callback=nodelist_callback): - sleep(2*common.MIN_SRV_RESCAN_INTERVAL) - final_topology = set( - client.topology_description.server_descriptions()) - self.assertIn(("localhost.test.build.10gen.cc", 27017), - final_topology) + sleep(2 * common.MIN_SRV_RESCAN_INTERVAL) + final_topology = set(client.topology_description.server_descriptions()) + self.assertIn(("localhost.test.build.10gen.cc", 27017), final_topology) self.assertEqual(len(final_topology), 2) def test_does_not_flipflop(self): - with SrvPollingKnobs( - ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=1) self.addCleanup(client.close) old = set(client.topology_description.server_descriptions()) - sleep(4*WAIT_TIME) + sleep(4 * WAIT_TIME) new = set(client.topology_description.server_descriptions()) self.assertSetEqual(old, new) @@ -297,20 +310,19 @@ def test_srv_service_name(self): # Construct a valid final response callback distinct from base. response = [ ("localhost.test.build.10gen.cc.", 27019), - ("localhost.test.build.10gen.cc.", 27020) + ("localhost.test.build.10gen.cc.", 27020), ] def nodelist_callback(): return response - with SrvPollingKnobs( - ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = MongoClient( - "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName" - "=customname") + "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName" "=customname" + ) with SrvPollingKnobs(nodelist_callback=nodelist_callback): self.assert_nodelist_change(response, client) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_ssl.py b/test/test_ssl.py index 25a646a998..7629c1fd88 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -21,26 +21,21 @@ sys.path[0:0] = [""] +from test import HAVE_IPADDRESS, IntegrationTest, SkipTest, client_context, unittest +from test.utils import ( + EventListener, + cat_files, + connected, + ignore_deprecations, + remove_all_users, +) from urllib.parse import quote_plus from pymongo import MongoClient, ssl_support -from pymongo.errors import (ConfigurationError, - ConnectionFailure, - OperationFailure) +from pymongo.errors import ConfigurationError, ConnectionFailure, OperationFailure from pymongo.hello import HelloCompat -from pymongo.ssl_support import HAVE_SSL, get_ssl_context, _ssl +from pymongo.ssl_support import HAVE_SSL, _ssl, get_ssl_context from pymongo.write_concern import WriteConcern -from test import (IntegrationTest, - client_context, - SkipTest, - unittest, - HAVE_IPADDRESS) -from test.utils import (EventListener, - cat_files, - connected, - ignore_deprecations, - remove_all_users) - _HAVE_PYOPENSSL = False try: @@ -48,9 +43,12 @@ import OpenSSL import requests import service_identity + # Ensure service_identity>=18.1 is installed from service_identity.pyopenssl import verify_ip_address + from pymongo.ocsp_support import _load_trusted_ca_certs + _HAVE_PYOPENSSL = True except ImportError: _load_trusted_ca_certs = None # type: ignore @@ -59,15 +57,13 @@ if HAVE_SSL: import ssl -CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'certificates') -CLIENT_PEM = os.path.join(CERT_PATH, 'client.pem') -CLIENT_ENCRYPTED_PEM = os.path.join(CERT_PATH, 'password_protected.pem') -CA_PEM = os.path.join(CERT_PATH, 'ca.pem') -CA_BUNDLE_PEM = os.path.join(CERT_PATH, 'trusted-ca.pem') -CRL_PEM = os.path.join(CERT_PATH, 'crl.pem') -MONGODB_X509_USERNAME = ( - "C=US,ST=New York,L=New York City,O=MDB,OU=Drivers,CN=client") +CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "certificates") +CLIENT_PEM = os.path.join(CERT_PATH, "client.pem") +CLIENT_ENCRYPTED_PEM = os.path.join(CERT_PATH, "password_protected.pem") +CA_PEM = os.path.join(CERT_PATH, "ca.pem") +CA_BUNDLE_PEM = os.path.join(CERT_PATH, "trusted-ca.pem") +CRL_PEM = os.path.join(CERT_PATH, "crl.pem") +MONGODB_X509_USERNAME = "C=US,ST=New York,L=New York City,O=MDB,OU=Drivers,CN=client" _PY37PLUS = sys.version_info[:2] >= (3, 7) @@ -83,27 +79,24 @@ class TestClientSSL(unittest.TestCase): - - @unittest.skipIf(HAVE_SSL, "The ssl module is available, can't test what " - "happens without it.") + @unittest.skipIf( + HAVE_SSL, "The ssl module is available, can't test what " "happens without it." + ) def test_no_ssl_module(self): # Explicit - self.assertRaises(ConfigurationError, - MongoClient, ssl=True) + self.assertRaises(ConfigurationError, MongoClient, ssl=True) # Implied - self.assertRaises(ConfigurationError, - MongoClient, tlsCertificateKeyFile=CLIENT_PEM) + self.assertRaises(ConfigurationError, MongoClient, tlsCertificateKeyFile=CLIENT_PEM) @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") @ignore_deprecations def test_config_ssl(self): # Tests various ssl configurations - self.assertRaises(ValueError, MongoClient, ssl='foo') - self.assertRaises(ConfigurationError, - MongoClient, - tls=False, - tlsCertificateKeyFile=CLIENT_PEM) + self.assertRaises(ValueError, MongoClient, ssl="foo") + self.assertRaises( + ConfigurationError, MongoClient, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ) self.assertRaises(TypeError, MongoClient, ssl=0) self.assertRaises(TypeError, MongoClient, ssl=5.5) self.assertRaises(TypeError, MongoClient, ssl=[]) @@ -113,30 +106,20 @@ def test_config_ssl(self): self.assertRaises(TypeError, MongoClient, tlsCertificateKeyFile=[]) # Test invalid combinations - self.assertRaises(ConfigurationError, - MongoClient, - tls=False, - tlsCertificateKeyFile=CLIENT_PEM) - self.assertRaises(ConfigurationError, - MongoClient, - tls=False, - tlsCAFile=CA_PEM) - self.assertRaises(ConfigurationError, - MongoClient, - tls=False, - tlsCRLFile=CRL_PEM) - self.assertRaises(ConfigurationError, - MongoClient, - tls=False, - tlsAllowInvalidCertificates=False) - self.assertRaises(ConfigurationError, - MongoClient, - tls=False, - tlsAllowInvalidHostnames=False) - self.assertRaises(ConfigurationError, - MongoClient, - tls=False, - tlsDisableOCSPEndpointCheck=False) + self.assertRaises( + ConfigurationError, MongoClient, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ) + self.assertRaises(ConfigurationError, MongoClient, tls=False, tlsCAFile=CA_PEM) + self.assertRaises(ConfigurationError, MongoClient, tls=False, tlsCRLFile=CRL_PEM) + self.assertRaises( + ConfigurationError, MongoClient, tls=False, tlsAllowInvalidCertificates=False + ) + self.assertRaises( + ConfigurationError, MongoClient, tls=False, tlsAllowInvalidHostnames=False + ) + self.assertRaises( + ConfigurationError, MongoClient, tls=False, tlsDisableOCSPEndpointCheck=False + ) @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") def test_use_pyopenssl_when_available(self): @@ -153,10 +136,11 @@ class TestSSL(IntegrationTest): def assertClientWorks(self, client): coll = client.pymongo_test.ssl_test.with_options( - write_concern=WriteConcern(w=client_context.w)) + write_concern=WriteConcern(w=client_context.w) + ) coll.drop() - coll.insert_one({'ssl': True}) - self.assertTrue(coll.find_one()['ssl']) + coll.insert_one({"ssl": True}) + self.assertTrue(coll.find_one()["ssl"]) coll.drop() @classmethod @@ -185,30 +169,38 @@ def test_tlsCertificateKeyFilePassword(self): # # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem - if not hasattr(ssl, 'SSLContext') and not _ssl.IS_PYOPENSSL: + if not hasattr(ssl, "SSLContext") and not _ssl.IS_PYOPENSSL: self.assertRaises( ConfigurationError, MongoClient, - 'localhost', + "localhost", ssl=True, tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, tlsCertificateKeyFilePassword="qwerty", tlsCAFile=CA_PEM, - serverSelectionTimeoutMS=100) + serverSelectionTimeoutMS=100, + ) else: - connected(MongoClient('localhost', - ssl=True, - tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, - tlsCertificateKeyFilePassword="qwerty", - tlsCAFile=CA_PEM, - serverSelectionTimeoutMS=5000, - **self.credentials)) # type: ignore - - uri_fmt = ("mongodb://localhost/?ssl=true" - "&tlsCertificateKeyFile=%s&tlsCertificateKeyFilePassword=qwerty" - "&tlsCAFile=%s&serverSelectionTimeoutMS=5000") - connected(MongoClient(uri_fmt % (CLIENT_ENCRYPTED_PEM, CA_PEM), - **self.credentials)) # type: ignore + connected( + MongoClient( + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, + tlsCertificateKeyFilePassword="qwerty", + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=5000, + **self.credentials # type: ignore[arg-type] + ) + ) + + uri_fmt = ( + "mongodb://localhost/?ssl=true" + "&tlsCertificateKeyFile=%s&tlsCertificateKeyFilePassword=qwerty" + "&tlsCAFile=%s&serverSelectionTimeoutMS=5000" + ) + connected( + MongoClient(uri_fmt % (CLIENT_ENCRYPTED_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + ) @client_context.require_tlsCertificateKeyFile @client_context.require_no_auth @@ -221,16 +213,21 @@ def test_cert_ssl_implicitly_set(self): # # test that setting tlsCertificateKeyFile causes ssl to be set to True - client = MongoClient(client_context.host, client_context.port, - tlsAllowInvalidCertificates=True, - tlsCertificateKeyFile=CLIENT_PEM) + client = MongoClient( + client_context.host, + client_context.port, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) response = client.admin.command(HelloCompat.LEGACY_CMD) - if 'setName' in response: - client = MongoClient(client_context.pair, - replicaSet=response['setName'], - w=len(response['hosts']), - tlsAllowInvalidCertificates=True, - tlsCertificateKeyFile=CLIENT_PEM) + if "setName" in response: + client = MongoClient( + client_context.pair, + replicaSet=response["setName"], + w=len(response["hosts"]), + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) self.assertClientWorks(client) @@ -243,33 +240,41 @@ def test_cert_ssl_validation(self): # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem # - client = MongoClient('localhost', - ssl=True, - tlsCertificateKeyFile=CLIENT_PEM, - tlsAllowInvalidCertificates=False, - tlsCAFile=CA_PEM) + client = MongoClient( + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) response = client.admin.command(HelloCompat.LEGACY_CMD) - if 'setName' in response: - if response['primary'].split(":")[0] != 'localhost': - raise SkipTest("No hosts in the replicaset for 'localhost'. " - "Cannot validate hostname in the certificate") - - client = MongoClient('localhost', - replicaSet=response['setName'], - w=len(response['hosts']), - ssl=True, - tlsCertificateKeyFile=CLIENT_PEM, - tlsAllowInvalidCertificates=False, - tlsCAFile=CA_PEM) + if "setName" in response: + if response["primary"].split(":")[0] != "localhost": + raise SkipTest( + "No hosts in the replicaset for 'localhost'. " + "Cannot validate hostname in the certificate" + ) + + client = MongoClient( + "localhost", + replicaSet=response["setName"], + w=len(response["hosts"]), + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) self.assertClientWorks(client) if HAVE_IPADDRESS: - client = MongoClient('127.0.0.1', - ssl=True, - tlsCertificateKeyFile=CLIENT_PEM, - tlsAllowInvalidCertificates=False, - tlsCAFile=CA_PEM) + client = MongoClient( + "127.0.0.1", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) self.assertClientWorks(client) @client_context.require_tlsCertificateKeyFile @@ -281,9 +286,11 @@ def test_cert_ssl_uri_support(self): # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem # - uri_fmt = ("mongodb://localhost/?ssl=true&tlsCertificateKeyFile=%s&tlsAllowInvalidCertificates" - "=%s&tlsCAFile=%s&tlsAllowInvalidHostnames=false") - client = MongoClient(uri_fmt % (CLIENT_PEM, 'true', CA_PEM)) + uri_fmt = ( + "mongodb://localhost/?ssl=true&tlsCertificateKeyFile=%s&tlsAllowInvalidCertificates" + "=%s&tlsCAFile=%s&tlsAllowInvalidHostnames=false" + ) + client = MongoClient(uri_fmt % (CLIENT_PEM, "true", CA_PEM)) self.assertClientWorks(client) @client_context.require_tlsCertificateKeyFile @@ -309,81 +316,107 @@ def test_cert_ssl_validation_hostname_matching(self): response = self.client.admin.command(HelloCompat.LEGACY_CMD) with self.assertRaises(ConnectionFailure): - connected(MongoClient('server', - ssl=True, - tlsCertificateKeyFile=CLIENT_PEM, - tlsAllowInvalidCertificates=False, - tlsCAFile=CA_PEM, - serverSelectionTimeoutMS=500, - **self.credentials)) # type: ignore - - connected(MongoClient('server', - ssl=True, - tlsCertificateKeyFile=CLIENT_PEM, - tlsAllowInvalidCertificates=False, - tlsCAFile=CA_PEM, - tlsAllowInvalidHostnames=True, - serverSelectionTimeoutMS=500, - **self.credentials)) # type: ignore - - if 'setName' in response: + connected( + MongoClient( + "server", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=500, + **self.credentials # type: ignore[arg-type] + ) + ) + + connected( + MongoClient( + "server", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=500, + **self.credentials # type: ignore[arg-type] + ) + ) + + if "setName" in response: with self.assertRaises(ConnectionFailure): - connected(MongoClient('server', - replicaSet=response['setName'], - ssl=True, - tlsCertificateKeyFile=CLIENT_PEM, - tlsAllowInvalidCertificates=False, - tlsCAFile=CA_PEM, - serverSelectionTimeoutMS=500, - **self.credentials)) # type: ignore - - connected(MongoClient('server', - replicaSet=response['setName'], - ssl=True, - tlsCertificateKeyFile=CLIENT_PEM, - tlsAllowInvalidCertificates=False, - tlsCAFile=CA_PEM, - tlsAllowInvalidHostnames=True, - serverSelectionTimeoutMS=500, - **self.credentials)) # type: ignore + connected( + MongoClient( + "server", + replicaSet=response["setName"], + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=500, + **self.credentials # type: ignore[arg-type] + ) + ) + + connected( + MongoClient( + "server", + replicaSet=response["setName"], + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=500, + **self.credentials # type: ignore[arg-type] + ) + ) @client_context.require_tlsCertificateKeyFile @ignore_deprecations def test_tlsCRLFile_support(self): - if not hasattr(ssl, 'VERIFY_CRL_CHECK_LEAF') or _ssl.IS_PYOPENSSL: + if not hasattr(ssl, "VERIFY_CRL_CHECK_LEAF") or _ssl.IS_PYOPENSSL: self.assertRaises( ConfigurationError, MongoClient, - 'localhost', + "localhost", ssl=True, tlsCAFile=CA_PEM, tlsCRLFile=CRL_PEM, - serverSelectionTimeoutMS=100) + serverSelectionTimeoutMS=100, + ) else: - connected(MongoClient('localhost', - ssl=True, - tlsCAFile=CA_PEM, - serverSelectionTimeoutMS=100, - **self.credentials)) # type: ignore + connected( + MongoClient( + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=100, + **self.credentials # type: ignore[arg-type] + ) + ) with self.assertRaises(ConnectionFailure): - connected(MongoClient('localhost', - ssl=True, - tlsCAFile=CA_PEM, - tlsCRLFile=CRL_PEM, - serverSelectionTimeoutMS=100, - **self.credentials)) # type: ignore - - uri_fmt = ("mongodb://localhost/?ssl=true&" - "tlsCAFile=%s&serverSelectionTimeoutMS=100") - connected(MongoClient(uri_fmt % (CA_PEM,), - **self.credentials)) # type: ignore - - uri_fmt = ("mongodb://localhost/?ssl=true&tlsCRLFile=%s" - "&tlsCAFile=%s&serverSelectionTimeoutMS=100") + connected( + MongoClient( + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + tlsCRLFile=CRL_PEM, + serverSelectionTimeoutMS=100, + **self.credentials # type: ignore[arg-type] + ) + ) + + uri_fmt = "mongodb://localhost/?ssl=true&" "tlsCAFile=%s&serverSelectionTimeoutMS=100" + connected(MongoClient(uri_fmt % (CA_PEM,), **self.credentials)) # type: ignore + + uri_fmt = ( + "mongodb://localhost/?ssl=true&tlsCRLFile=%s" + "&tlsCAFile=%s&serverSelectionTimeoutMS=100" + ) with self.assertRaises(ConnectionFailure): - connected(MongoClient(uri_fmt % (CRL_PEM, CA_PEM), - **self.credentials)) # type: ignore + connected( + MongoClient(uri_fmt % (CRL_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + ) @client_context.require_tlsCertificateKeyFile @client_context.require_server_resolvable @@ -398,37 +431,39 @@ def test_validation_with_system_ca_certs(self): self.patch_system_certs(CA_PEM) with self.assertRaises(ConnectionFailure): # Server cert is verified but hostname matching fails - connected(MongoClient('server', - ssl=True, - serverSelectionTimeoutMS=100, - **self.credentials)) # type: ignore + connected( + MongoClient("server", ssl=True, serverSelectionTimeoutMS=100, **self.credentials) # type: ignore[arg-type] + ) # Server cert is verified. Disable hostname matching. - connected(MongoClient('server', - ssl=True, - tlsAllowInvalidHostnames=True, - serverSelectionTimeoutMS=100, - **self.credentials)) # type: ignore + connected( + MongoClient( + "server", + ssl=True, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=100, + **self.credentials # type: ignore[arg-type] + ) + ) # Server cert and hostname are verified. - connected(MongoClient('localhost', - ssl=True, - serverSelectionTimeoutMS=100, - **self.credentials)) # type: ignore + connected( + MongoClient("localhost", ssl=True, serverSelectionTimeoutMS=100, **self.credentials) # type: ignore[arg-type] + ) # Server cert and hostname are verified. connected( MongoClient( - 'mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=100', - **self.credentials)) # type: ignore + "mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=100", **self.credentials # type: ignore[arg-type] + ) + ) def test_system_certs_config_error(self): ctx = get_ssl_context(None, None, None, None, True, True, False) - if ((sys.platform != "win32" - and hasattr(ctx, "set_default_verify_paths")) - or hasattr(ctx, "load_default_certs")): - raise SkipTest( - "Can't test when system CA certificates are loadable.") + if (sys.platform != "win32" and hasattr(ctx, "set_default_verify_paths")) or hasattr( + ctx, "load_default_certs" + ): + raise SkipTest("Can't test when system CA certificates are loadable.") ssl_support: Any have_certifi = ssl_support.HAVE_CERTIFI @@ -457,8 +492,7 @@ def test_certifi_support(self): # Force the test on Windows, regardless of environment. ssl_support.HAVE_WINCERTSTORE = False try: - ctx = get_ssl_context(None, None, CA_PEM, None, False, False, - False) + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, CA_PEM) @@ -492,18 +526,24 @@ def test_wincertstore(self): @ignore_deprecations def test_mongodb_x509_auth(self): host, port = client_context.host, client_context.port - self.addCleanup(remove_all_users, client_context.client['$external']) + self.addCleanup(remove_all_users, client_context.client["$external"]) # Give x509 user all necessary privileges. - client_context.create_user('$external', MONGODB_X509_USERNAME, roles=[ - {'role': 'readWriteAnyDatabase', 'db': 'admin'}, - {'role': 'userAdminAnyDatabase', 'db': 'admin'}]) + client_context.create_user( + "$external", + MONGODB_X509_USERNAME, + roles=[ + {"role": "readWriteAnyDatabase", "db": "admin"}, + {"role": "userAdminAnyDatabase", "db": "admin"}, + ], + ) noauth = MongoClient( client_context.pair, ssl=True, tlsAllowInvalidCertificates=True, - tlsCertificateKeyFile=CLIENT_PEM) + tlsCertificateKeyFile=CLIENT_PEM, + ) self.addCleanup(noauth.close) with self.assertRaises(OperationFailure): @@ -512,11 +552,12 @@ def test_mongodb_x509_auth(self): listener = EventListener() auth = MongoClient( client_context.pair, - authMechanism='MONGODB-X509', + authMechanism="MONGODB-X509", ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM, - event_listeners=[listener]) + event_listeners=[listener], + ) self.addCleanup(auth.close) # No error @@ -524,64 +565,73 @@ def test_mongodb_x509_auth(self): names = listener.started_command_names() if client_context.version.at_least(4, 4, -1): # Speculative auth skips the authenticate command. - self.assertEqual(names, ['find']) + self.assertEqual(names, ["find"]) else: - self.assertEqual(names, ['authenticate', 'find']) - - uri = ('mongodb://%s@%s:%d/?authMechanism=' - 'MONGODB-X509' % ( - quote_plus(MONGODB_X509_USERNAME), host, port)) - client = MongoClient(uri, - ssl=True, - tlsAllowInvalidCertificates=True, - tlsCertificateKeyFile=CLIENT_PEM) + self.assertEqual(names, ["authenticate", "find"]) + + uri = "mongodb://%s@%s:%d/?authMechanism=" "MONGODB-X509" % ( + quote_plus(MONGODB_X509_USERNAME), + host, + port, + ) + client = MongoClient( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) self.addCleanup(client.close) # No error client.pymongo_test.test.find_one() - uri = 'mongodb://%s:%d/?authMechanism=MONGODB-X509' % (host, port) - client = MongoClient(uri, - ssl=True, - tlsAllowInvalidCertificates=True, - tlsCertificateKeyFile=CLIENT_PEM) + uri = "mongodb://%s:%d/?authMechanism=MONGODB-X509" % (host, port) + client = MongoClient( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) self.addCleanup(client.close) # No error client.pymongo_test.test.find_one() # Auth should fail if username and certificate do not match - uri = ('mongodb://%s@%s:%d/?authMechanism=' - 'MONGODB-X509' % ( - quote_plus("not the username"), host, port)) + uri = "mongodb://%s@%s:%d/?authMechanism=" "MONGODB-X509" % ( + quote_plus("not the username"), + host, + port, + ) bad_client = MongoClient( - uri, ssl=True, tlsAllowInvalidCertificates=True, - tlsCertificateKeyFile=CLIENT_PEM) + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) self.addCleanup(bad_client.close) with self.assertRaises(OperationFailure): bad_client.pymongo_test.test.find_one() bad_client = MongoClient( - client_context.pair, - username="not the username", - authMechanism='MONGODB-X509', - ssl=True, - tlsAllowInvalidCertificates=True, - tlsCertificateKeyFile=CLIENT_PEM) + client_context.pair, + username="not the username", + authMechanism="MONGODB-X509", + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) self.addCleanup(bad_client.close) with self.assertRaises(OperationFailure): bad_client.pymongo_test.test.find_one() # Invalid certificate (using CA certificate as client certificate) - uri = ('mongodb://%s@%s:%d/?authMechanism=' - 'MONGODB-X509' % ( - quote_plus(MONGODB_X509_USERNAME), host, port)) + uri = "mongodb://%s@%s:%d/?authMechanism=" "MONGODB-X509" % ( + quote_plus(MONGODB_X509_USERNAME), + host, + port, + ) try: - connected(MongoClient(uri, - ssl=True, - tlsAllowInvalidCertificates=True, - tlsCertificateKeyFile=CA_PEM, - serverSelectionTimeoutMS=100)) + connected( + MongoClient( + uri, + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CA_PEM, + serverSelectionTimeoutMS=100, + ) + ) except (ConnectionFailure, ConfigurationError): pass else: @@ -596,15 +646,14 @@ def remove(path): except OSError: pass - temp_ca_bundle = os.path.join(CERT_PATH, 'trusted-ca-bundle.pem') + temp_ca_bundle = os.path.join(CERT_PATH, "trusted-ca-bundle.pem") self.addCleanup(remove, temp_ca_bundle) # Add the CA cert file to the bundle. cat_files(temp_ca_bundle, CA_BUNDLE_PEM, CA_PEM) - with MongoClient('localhost', - tls=True, - tlsCertificateKeyFile=CLIENT_PEM, - tlsCAFile=temp_ca_bundle) as client: - self.assertTrue(client.admin.command('ping')) + with MongoClient( + "localhost", tls=True, tlsCertificateKeyFile=CLIENT_PEM, tlsCAFile=temp_ca_bundle + ) as client: + self.assertTrue(client.admin.command("ping")) if __name__ == "__main__": diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py index 096da04cf1..72df717901 100644 --- a/test/test_streaming_protocol.py +++ b/test/test_streaming_protocol.py @@ -19,18 +19,18 @@ sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import ( + HeartbeatEventListener, + ServerEventListener, + rs_or_single_client, + single_client, + wait_until, +) + from pymongo import monitoring from pymongo.hello import HelloCompat -from test import (client_context, - IntegrationTest, - unittest) -from test.utils import (HeartbeatEventListener, - rs_or_single_client, - single_client, - ServerEventListener, - wait_until) - class TestStreamingProtocol(IntegrationTest): @client_context.require_failCommand_appName @@ -38,33 +38,40 @@ def test_failCommand_streaming(self): listener = ServerEventListener() hb_listener = HeartbeatEventListener() client = rs_or_single_client( - event_listeners=[listener, hb_listener], heartbeatFrequencyMS=500, - appName='failingHeartbeatTest') + event_listeners=[listener, hb_listener], + heartbeatFrequencyMS=500, + appName="failingHeartbeatTest", + ) self.addCleanup(client.close) # Force a connection. - client.admin.command('ping') + client.admin.command("ping") address = client.address listener.reset() fail_hello = { - 'configureFailPoint': 'failCommand', - 'mode': {'times': 4}, - 'data': { - 'failCommands': [HelloCompat.LEGACY_CMD, 'hello'], - 'closeConnection': False, - 'errorCode': 10107, - 'appName': 'failingHeartbeatTest', + "configureFailPoint": "failCommand", + "mode": {"times": 4}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "closeConnection": False, + "errorCode": 10107, + "appName": "failingHeartbeatTest", }, } with self.fail_point(fail_hello): + def _marked_unknown(event): - return (event.server_address == address - and not event.new_description.is_server_type_known) + return ( + event.server_address == address + and not event.new_description.is_server_type_known + ) def _discovered_node(event): - return (event.server_address == address - and not event.previous_description.is_server_type_known - and event.new_description.is_server_type_known) + return ( + event.server_address == address + and not event.previous_description.is_server_type_known + and event.new_description.is_server_type_known + ) def marked_unknown(): return len(listener.matching(_marked_unknown)) >= 1 @@ -73,11 +80,11 @@ def rediscovered(): return len(listener.matching(_discovered_node)) >= 1 # Topology events are published asynchronously - wait_until(marked_unknown, 'mark node unknown') - wait_until(rediscovered, 'rediscover node') + wait_until(marked_unknown, "mark node unknown") + wait_until(rediscovered, "rediscover node") # Server should be selectable. - client.admin.command('ping') + client.admin.command("ping") @client_context.require_failCommand_appName def test_streaming_rtt(self): @@ -86,45 +93,46 @@ def test_streaming_rtt(self): # On Windows, RTT can actually be 0.0 because time.time() only has # 1-15 millisecond resolution. We need to delay the initial hello # to ensure that RTT is never zero. - name = 'streamingRttTest' + name = "streamingRttTest" delay_hello: dict = { - 'configureFailPoint': 'failCommand', - 'mode': {'times': 1000}, - 'data': { - 'failCommands': [HelloCompat.LEGACY_CMD, 'hello'], - 'blockConnection': True, - 'blockTimeMS': 20, + "configureFailPoint": "failCommand", + "mode": {"times": 1000}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "blockConnection": True, + "blockTimeMS": 20, # This can be uncommented after SERVER-49220 is fixed. # 'appName': name, }, } with self.fail_point(delay_hello): client = rs_or_single_client( - event_listeners=[listener, hb_listener], - heartbeatFrequencyMS=500, - appName=name) + event_listeners=[listener, hb_listener], heartbeatFrequencyMS=500, appName=name + ) self.addCleanup(client.close) # Force a connection. - client.admin.command('ping') + client.admin.command("ping") address = client.address - delay_hello['data']['blockTimeMS'] = 500 - delay_hello['data']['appName'] = name + delay_hello["data"]["blockTimeMS"] = 500 + delay_hello["data"]["appName"] = name with self.fail_point(delay_hello): + def rtt_exceeds_250_ms(): # XXX: Add a public TopologyDescription getter to MongoClient? topology = client._topology sd = topology.description.server_descriptions()[address] return sd.round_trip_time > 0.250 - wait_until(rtt_exceeds_250_ms, 'exceed 250ms RTT') + wait_until(rtt_exceeds_250_ms, "exceed 250ms RTT") # Server should be selectable. - client.admin.command('ping') + client.admin.command("ping") def changed_event(event): - return (event.server_address == address and isinstance( - event, monitoring.ServerDescriptionChangedEvent)) + return event.server_address == address and isinstance( + event, monitoring.ServerDescriptionChangedEvent + ) # There should only be one event published, for the initial discovery. events = listener.matching(changed_event) @@ -137,21 +145,21 @@ def test_monitor_waits_after_server_check_error(self): # This test implements: # https://github.com/mongodb/specifications/blob/6c5b2ac/source/server-discovery-and-monitoring/server-discovery-and-monitoring-tests.rst#monitors-sleep-at-least-minheartbeatfreqencyms-between-checks fail_hello = { - 'mode': {'times': 5}, - 'data': { - 'failCommands': [HelloCompat.LEGACY_CMD, 'hello'], - 'errorCode': 1234, - 'appName': 'SDAMMinHeartbeatFrequencyTest', + "mode": {"times": 5}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 1234, + "appName": "SDAMMinHeartbeatFrequencyTest", }, } with self.fail_point(fail_hello): start = time.time() client = single_client( - appName='SDAMMinHeartbeatFrequencyTest', - serverSelectionTimeoutMS=5000) + appName="SDAMMinHeartbeatFrequencyTest", serverSelectionTimeoutMS=5000 + ) self.addCleanup(client.close) # Force a connection. - client.admin.command('ping') + client.admin.command("ping") duration = time.time() - start # Explanation of the expected events: # 0ms: run configureFailPoint @@ -172,11 +180,13 @@ def test_monitor_waits_after_server_check_error(self): def test_heartbeat_awaited_flag(self): hb_listener = HeartbeatEventListener() client = single_client( - event_listeners=[hb_listener], heartbeatFrequencyMS=500, - appName='heartbeatEventAwaitedFlag') + event_listeners=[hb_listener], + heartbeatFrequencyMS=500, + appName="heartbeatEventAwaitedFlag", + ) self.addCleanup(client.close) # Force a connection. - client.admin.command('ping') + client.admin.command("ping") def hb_succeeded(event): return isinstance(event, monitoring.ServerHeartbeatSucceededEvent) @@ -185,18 +195,17 @@ def hb_failed(event): return isinstance(event, monitoring.ServerHeartbeatFailedEvent) fail_heartbeat = { - 'mode': {'times': 2}, - 'data': { - 'failCommands': [HelloCompat.LEGACY_CMD, 'hello'], - 'closeConnection': True, - 'appName': 'heartbeatEventAwaitedFlag', + "mode": {"times": 2}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "closeConnection": True, + "appName": "heartbeatEventAwaitedFlag", }, } with self.fail_point(fail_heartbeat): - wait_until(lambda: hb_listener.matching(hb_failed), - "published failed event") + wait_until(lambda: hb_listener.matching(hb_failed), "published failed event") # Reconnect. - client.admin.command('ping') + client.admin.command("ping") hb_succeeded_events = hb_listener.matching(hb_succeeded) hb_failed_events = hb_listener.matching(hb_failed) @@ -205,10 +214,12 @@ def hb_failed(event): # Depending on thread scheduling, the failed heartbeat could occur on # the second or third check. events = [type(e) for e in hb_listener.events[:4]] - if events == [monitoring.ServerHeartbeatStartedEvent, - monitoring.ServerHeartbeatSucceededEvent, - monitoring.ServerHeartbeatStartedEvent, - monitoring.ServerHeartbeatFailedEvent]: + if events == [ + monitoring.ServerHeartbeatStartedEvent, + monitoring.ServerHeartbeatSucceededEvent, + monitoring.ServerHeartbeatStartedEvent, + monitoring.ServerHeartbeatFailedEvent, + ]: self.assertFalse(hb_succeeded_events[1].awaited) else: self.assertTrue(hb_succeeded_events[1].awaited) diff --git a/test/test_threads.py b/test/test_threads.py index a3cde207a2..064008ee32 100644 --- a/test/test_threads.py +++ b/test/test_threads.py @@ -15,12 +15,8 @@ """Test that pymongo is thread safe.""" import threading - -from test import (client_context, - IntegrationTest, - unittest) -from test.utils import rs_or_single_client -from test.utils import joinall +from test import IntegrationTest, client_context, unittest +from test.utils import joinall, rs_or_single_client @client_context.require_connection @@ -29,7 +25,6 @@ def setUpModule(): class AutoAuthenticateThreads(threading.Thread): - def __init__(self, collection, num): threading.Thread.__init__(self) self.coll = collection @@ -39,14 +34,13 @@ def __init__(self, collection, num): def run(self): for i in range(self.num): - self.coll.insert_one({'num': i}) - self.coll.find_one({'num': i}) + self.coll.insert_one({"num": i}) + self.coll.find_one({"num": i}) self.success = True class SaveAndFind(threading.Thread): - def __init__(self, collection): threading.Thread.__init__(self) self.collection = collection @@ -63,7 +57,6 @@ def run(self): class Insert(threading.Thread): - def __init__(self, collection, n, expect_exception): threading.Thread.__init__(self) self.collection = collection @@ -87,7 +80,6 @@ def run(self): class Update(threading.Thread): - def __init__(self, collection, n, expect_exception): threading.Thread.__init__(self) self.collection = collection @@ -100,8 +92,7 @@ def run(self): error = True try: - self.collection.update_one({"test": "unique"}, - {"$set": {"test": "update"}}) + self.collection.update_one({"test": "unique"}, {"$set": {"test": "update"}}) error = False except: if not self.expect_exception: diff --git a/test/test_timestamp.py b/test/test_timestamp.py index bb3358121c..3602fe2808 100644 --- a/test/test_timestamp.py +++ b/test/test_timestamp.py @@ -14,15 +14,17 @@ """Tests for the Timestamp class.""" -import datetime -import sys import copy +import datetime import pickle +import sys + sys.path[0:0] = [""] +from test import unittest + from bson.timestamp import Timestamp from bson.tz_util import utc -from test import unittest class TestTimestamp(unittest.TestCase): @@ -78,5 +80,6 @@ def test_repr(self): t = Timestamp(0, 0) self.assertEqual(repr(t), "Timestamp(0, 0)") + if __name__ == "__main__": unittest.main() diff --git a/test/test_topology.py b/test/test_topology.py index a309d622ab..d7bae9229f 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -18,27 +18,23 @@ sys.path[0:0] = [""] -from bson.objectid import ObjectId +from test import client_knobs, unittest +from test.pymongo_mocks import DummyMonitor +from test.utils import MockPool, wait_until +from bson.objectid import ObjectId from pymongo import common -from pymongo.errors import (AutoReconnect, - ConfigurationError, - ConnectionFailure) +from pymongo.errors import AutoReconnect, ConfigurationError, ConnectionFailure from pymongo.hello import Hello, HelloCompat from pymongo.monitor import Monitor from pymongo.pool import PoolOptions from pymongo.read_preferences import ReadPreference, Secondary from pymongo.server_description import ServerDescription -from pymongo.server_selectors import (any_server_selector, - writable_server_selector) +from pymongo.server_selectors import any_server_selector, writable_server_selector from pymongo.server_type import SERVER_TYPE from pymongo.settings import TopologySettings -from pymongo.topology import (_ErrorContext, - Topology) +from pymongo.topology import Topology, _ErrorContext from pymongo.topology_description import TOPOLOGY_TYPE -from test import client_knobs, unittest -from test.pymongo_mocks import DummyMonitor -from test.utils import MockPool, wait_until class SetNameDiscoverySettings(TopologySettings): @@ -46,20 +42,20 @@ def get_topology_type(self): return TOPOLOGY_TYPE.ReplicaSetNoPrimary -address = ('a', 27017) +address = ("a", 27017) def create_mock_topology( - seeds=None, - replica_set_name=None, - monitor_class=DummyMonitor, - direct_connection=False): - partitioned_seeds = list(map(common.partition_node, seeds or ['a'])) + seeds=None, replica_set_name=None, monitor_class=DummyMonitor, direct_connection=False +): + partitioned_seeds = list(map(common.partition_node, seeds or ["a"])) topology_settings = TopologySettings( partitioned_seeds, replica_set_name=replica_set_name, pool_class=MockPool, - monitor_class=monitor_class, direct_connection=direct_connection) + monitor_class=monitor_class, + direct_connection=direct_connection, + ) t = Topology(topology_settings) t.open() @@ -67,8 +63,7 @@ def create_mock_topology( def got_hello(topology, server_address, hello_response): - server_description = ServerDescription( - server_address, Hello(hello_response), 0) + server_description = ServerDescription(server_address, Hello(hello_response), 0) topology.on_change(server_description) @@ -108,7 +103,7 @@ def test_timeout_configuration(self): t.open() # Get the default server. - server = t.get_server_by_address(('localhost', 27017)) + server = t.get_server_by_address(("localhost", 27017)) # The pool for application operations obeys our settings. self.assertEqual(1, server._pool.opts.connect_timeout) @@ -127,55 +122,53 @@ def test_timeout_configuration(self): class TestSingleServerTopology(TopologyTest): def test_direct_connection(self): for server_type, hello_response in [ - (SERVER_TYPE.RSPrimary, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'hosts': ['a'], - 'setName': 'rs', - 'maxWireVersion': 6}), - - (SERVER_TYPE.RSSecondary, { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'hosts': ['a'], - 'setName': 'rs', - 'maxWireVersion': 6}), - - (SERVER_TYPE.Mongos, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'msg': 'isdbgrid', - 'maxWireVersion': 6}), - - (SERVER_TYPE.RSArbiter, { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'arbiterOnly': True, - 'hosts': ['a'], - 'setName': 'rs', - 'maxWireVersion': 6}), - - (SERVER_TYPE.Standalone, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'maxWireVersion': 6}), - + ( + SERVER_TYPE.RSPrimary, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "hosts": ["a"], + "setName": "rs", + "maxWireVersion": 6, + }, + ), + ( + SERVER_TYPE.RSSecondary, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "hosts": ["a"], + "setName": "rs", + "maxWireVersion": 6, + }, + ), + ( + SERVER_TYPE.Mongos, + {"ok": 1, HelloCompat.LEGACY_CMD: True, "msg": "isdbgrid", "maxWireVersion": 6}, + ), + ( + SERVER_TYPE.RSArbiter, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "arbiterOnly": True, + "hosts": ["a"], + "setName": "rs", + "maxWireVersion": 6, + }, + ), + (SERVER_TYPE.Standalone, {"ok": 1, HelloCompat.LEGACY_CMD: True, "maxWireVersion": 6}), # A "slave" in a master-slave deployment. # This replication type was removed in MongoDB # 4.0. - (SERVER_TYPE.Standalone, { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'maxWireVersion': 6}), + (SERVER_TYPE.Standalone, {"ok": 1, HelloCompat.LEGACY_CMD: False, "maxWireVersion": 6}), ]: t = create_mock_topology(direct_connection=True) # Can't select a server while the only server is of type Unknown. - with self.assertRaisesRegex(ConnectionFailure, - 'No servers found yet'): - t.select_servers(any_server_selector, - server_selection_timeout=0) + with self.assertRaisesRegex(ConnectionFailure, "No servers found yet"): + t.select_servers(any_server_selector, server_selection_timeout=0) got_hello(t, address, hello_response) @@ -189,12 +182,13 @@ def test_direct_connection(self): # Topology type single is always readable and writable regardless # of server type or state. - self.assertEqual(t.description.topology_type_name, 'Single') + self.assertEqual(t.description.topology_type_name, "Single") self.assertTrue(t.description.has_writable_server()) self.assertTrue(t.description.has_readable_server()) self.assertTrue(t.description.has_readable_server(Secondary())) - self.assertTrue(t.description.has_readable_server( - Secondary(tag_sets=[{'tag': 'does-not-exist'}]))) + self.assertTrue( + t.description.has_readable_server(Secondary(tag_sets=[{"tag": "does-not-exist"}])) + ) def test_reopen(self): t = create_mock_topology() @@ -206,7 +200,7 @@ def test_reopen(self): def test_unavailable_seed(self): t = create_mock_topology() disconnected(t, address) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "a")) def test_round_trip_time(self): round_trip_time = 125 @@ -215,10 +209,9 @@ def test_round_trip_time(self): class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): if available: - return (Hello({'ok': 1, 'maxWireVersion': 6}), - round_trip_time) + return (Hello({"ok": 1, "maxWireVersion": 6}), round_trip_time) else: - raise AutoReconnect('mock monitor error') + raise AutoReconnect("mock monitor error") t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) @@ -237,14 +230,13 @@ def _check_with_socket(self, *args, **kwargs): def raises_err(): try: - t.select_server(writable_server_selector, - server_selection_timeout=0.1) + t.select_server(writable_server_selector, server_selection_timeout=0.1) except ConnectionFailure: return True else: return False - wait_until(raises_err, 'discover server is down') + wait_until(raises_err, "discover server is down") self.assertIsNone(s.description.round_trip_time) # Bring it back, RTT is now 20 milliseconds. @@ -254,8 +246,10 @@ def raises_err(): def new_average(): # We reset the average to the most recent measurement. description = s.description - return (description.round_trip_time is not None - and round(abs(20 - description.round_trip_time), 7) == 0) + return ( + description.round_trip_time is not None + and round(abs(20 - description.round_trip_time), 7) == 0 + ) tries = 0 while not new_average(): @@ -267,275 +261,289 @@ def new_average(): class TestMultiServerTopology(TopologyTest): def test_readable_writable(self): - t = create_mock_topology(replica_set_name='rs') - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - got_hello(t, ('b', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) - self.assertEqual( - t.description.topology_type_name, 'ReplicaSetWithPrimary') + self.assertEqual(t.description.topology_type_name, "ReplicaSetWithPrimary") self.assertTrue(t.description.has_writable_server()) self.assertTrue(t.description.has_readable_server()) - self.assertTrue( - t.description.has_readable_server(Secondary())) - self.assertFalse( - t.description.has_readable_server( - Secondary(tag_sets=[{'tag': 'exists'}]))) - - t = create_mock_topology(replica_set_name='rs') - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': False, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - got_hello(t, ('b', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) + self.assertTrue(t.description.has_readable_server(Secondary())) + self.assertFalse(t.description.has_readable_server(Secondary(tag_sets=[{"tag": "exists"}]))) + + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": False, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) - self.assertEqual( - t.description.topology_type_name, 'ReplicaSetNoPrimary') + self.assertEqual(t.description.topology_type_name, "ReplicaSetNoPrimary") self.assertFalse(t.description.has_writable_server()) self.assertFalse(t.description.has_readable_server()) - self.assertTrue( - t.description.has_readable_server(Secondary())) - self.assertFalse( - t.description.has_readable_server( - Secondary(tag_sets=[{'tag': 'exists'}]))) - - t = create_mock_topology(replica_set_name='rs') - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - got_hello(t, ('b', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b'], - 'tags': {'tag': 'exists'}}) - - self.assertEqual( - t.description.topology_type_name, 'ReplicaSetWithPrimary') + self.assertTrue(t.description.has_readable_server(Secondary())) + self.assertFalse(t.description.has_readable_server(Secondary(tag_sets=[{"tag": "exists"}]))) + + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + "tags": {"tag": "exists"}, + }, + ) + + self.assertEqual(t.description.topology_type_name, "ReplicaSetWithPrimary") self.assertTrue(t.description.has_writable_server()) self.assertTrue(t.description.has_readable_server()) - self.assertTrue( - t.description.has_readable_server(Secondary())) - self.assertTrue( - t.description.has_readable_server( - Secondary(tag_sets=[{'tag': 'exists'}]))) + self.assertTrue(t.description.has_readable_server(Secondary())) + self.assertTrue(t.description.has_readable_server(Secondary(tag_sets=[{"tag": "exists"}]))) def test_close(self): - t = create_mock_topology(replica_set_name='rs') - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - got_hello(t, ('b', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a')) - self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, 'b')) - self.assertTrue(get_monitor(t, 'a').opened) - self.assertTrue(get_monitor(t, 'b').opened) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, - t.description.topology_type) + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) + + self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, "b")) + self.assertTrue(get_monitor(t, "a").opened) + self.assertTrue(get_monitor(t, "b").opened) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, t.description.topology_type) t.close() self.assertEqual(2, len(t.description.server_descriptions())) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'b')) - self.assertFalse(get_monitor(t, 'a').opened) - self.assertFalse(get_monitor(t, 'b').opened) - self.assertEqual('rs', t.description.replica_set_name) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, - t.description.topology_type) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "b")) + self.assertFalse(get_monitor(t, "a").opened) + self.assertFalse(get_monitor(t, "b").opened) + self.assertEqual("rs", t.description.replica_set_name) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, t.description.topology_type) # A closed topology should not be updated when receiving a hello. - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'b', 'c']}) + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b", "c"]}, + ) self.assertEqual(2, len(t.description.server_descriptions())) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'b')) - self.assertFalse(get_monitor(t, 'a').opened) - self.assertFalse(get_monitor(t, 'b').opened) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "b")) + self.assertFalse(get_monitor(t, "a").opened) + self.assertFalse(get_monitor(t, "b").opened) # Server c should not have been added. - self.assertEqual(None, get_server(t, 'c')) - self.assertEqual('rs', t.description.replica_set_name) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, - t.description.topology_type) + self.assertEqual(None, get_server(t, "c")) + self.assertEqual("rs", t.description.replica_set_name) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, t.description.topology_type) def test_handle_error(self): - t = create_mock_topology(replica_set_name='rs') - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - got_hello(t, ('b', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - errctx = _ErrorContext(AutoReconnect('mock'), 0, 0, True, None) - t.handle_error(('a', 27017), errctx) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) - self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, 'b')) - self.assertEqual('rs', t.description.replica_set_name) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, - t.description.topology_type) - - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a')) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, - t.description.topology_type) - - t.handle_error(('b', 27017), errctx) - self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a')) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'b')) - self.assertEqual('rs', t.description.replica_set_name) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, - t.description.topology_type) + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) + + errctx = _ErrorContext(AutoReconnect("mock"), 0, 0, True, None) + t.handle_error(("a", 27017), errctx) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, "b")) + self.assertEqual("rs", t.description.replica_set_name) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, t.description.topology_type) + + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, "a")) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, t.description.topology_type) + + t.handle_error(("b", 27017), errctx) + self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "b")) + self.assertEqual("rs", t.description.replica_set_name) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, t.description.topology_type) def test_handle_error_removed_server(self): - t = create_mock_topology(replica_set_name='rs') + t = create_mock_topology(replica_set_name="rs") # No error resetting a server not in the TopologyDescription. - errctx = _ErrorContext(AutoReconnect('mock'), 0, 0, True, None) - t.handle_error(('b', 27017), errctx) + errctx = _ErrorContext(AutoReconnect("mock"), 0, 0, True, None) + t.handle_error(("b", 27017), errctx) # Server was *not* added as type Unknown. - self.assertFalse(t.has_server(('b', 27017))) + self.assertFalse(t.has_server(("b", 27017))) def test_discover_set_name_from_primary(self): # Discovering a replica set without the setName supplied by the user # is not yet supported by MongoClient, but Topology can do it. topology_settings = SetNameDiscoverySettings( - seeds=[address], - pool_class=MockPool, - monitor_class=DummyMonitor) + seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor + ) t = Topology(topology_settings) self.assertEqual(t.description.replica_set_name, None) - self.assertEqual(t.description.topology_type, - TOPOLOGY_TYPE.ReplicaSetNoPrimary) + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetNoPrimary) t.open() - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a']}) + got_hello( + t, address, {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"]} + ) - self.assertEqual(t.description.replica_set_name, 'rs') - self.assertEqual(t.description.topology_type, - TOPOLOGY_TYPE.ReplicaSetWithPrimary) + self.assertEqual(t.description.replica_set_name, "rs") + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetWithPrimary) # Another response from the primary. Tests the code that processes # primary response when topology type is already ReplicaSetWithPrimary. - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a']}) + got_hello( + t, address, {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"]} + ) # No change. - self.assertEqual(t.description.replica_set_name, 'rs') - self.assertEqual(t.description.topology_type, - TOPOLOGY_TYPE.ReplicaSetWithPrimary) + self.assertEqual(t.description.replica_set_name, "rs") + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetWithPrimary) def test_discover_set_name_from_secondary(self): # Discovering a replica set without the setName supplied by the user # is not yet supported by MongoClient, but Topology can do it. topology_settings = SetNameDiscoverySettings( - seeds=[address], - pool_class=MockPool, - monitor_class=DummyMonitor) + seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor + ) t = Topology(topology_settings) self.assertEqual(t.description.replica_set_name, None) - self.assertEqual(t.description.topology_type, - TOPOLOGY_TYPE.ReplicaSetNoPrimary) + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetNoPrimary) t.open() - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a']}) + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a"], + }, + ) - self.assertEqual(t.description.replica_set_name, 'rs') - self.assertEqual(t.description.topology_type, - TOPOLOGY_TYPE.ReplicaSetNoPrimary) + self.assertEqual(t.description.replica_set_name, "rs") + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetNoPrimary) def test_wire_version(self): - t = create_mock_topology(replica_set_name='rs') + t = create_mock_topology(replica_set_name="rs") t.description.check_compatible() # No error. - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a']}) + got_hello( + t, address, {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"]} + ) # Use defaults. server = t.get_server_by_address(address) self.assertEqual(server.description.min_wire_version, 0) self.assertEqual(server.description.max_wire_version, 0) - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a'], - 'minWireVersion': 1, - 'maxWireVersion': 6}) + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a"], + "minWireVersion": 1, + "maxWireVersion": 6, + }, + ) self.assertEqual(server.description.min_wire_version, 1) self.assertEqual(server.description.max_wire_version, 6) t.select_servers(any_server_selector) # Incompatible. - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a'], - 'minWireVersion': 21, - 'maxWireVersion': 22}) + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a"], + "minWireVersion": 21, + "maxWireVersion": 22, + }, + ) try: t.select_servers(any_server_selector) @@ -544,19 +552,24 @@ def test_wire_version(self): self.assertEqual( str(e), "Server at a:27017 requires wire version 21, but this version " - "of PyMongo only supports up to %d." - % (common.MAX_SUPPORTED_WIRE_VERSION,)) + "of PyMongo only supports up to %d." % (common.MAX_SUPPORTED_WIRE_VERSION,), + ) else: - self.fail('No error with incompatible wire version') + self.fail("No error with incompatible wire version") # Incompatible. - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a'], - 'minWireVersion': 0, - 'maxWireVersion': 0}) + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a"], + "minWireVersion": 0, + "maxWireVersion": 0, + }, + ) try: t.select_servers(any_server_selector) @@ -566,57 +579,72 @@ def test_wire_version(self): str(e), "Server at a:27017 reports wire version 0, but this version " "of PyMongo requires at least %d (MongoDB %s)." - % (common.MIN_SUPPORTED_WIRE_VERSION, - common.MIN_SUPPORTED_SERVER_VERSION)) + % (common.MIN_SUPPORTED_WIRE_VERSION, common.MIN_SUPPORTED_SERVER_VERSION), + ) else: - self.fail('No error with incompatible wire version') + self.fail("No error with incompatible wire version") def test_max_write_batch_size(self): - t = create_mock_topology(seeds=['a', 'b'], replica_set_name='rs') + t = create_mock_topology(seeds=["a", "b"], replica_set_name="rs") def write_batch_size(): s = t.select_server(writable_server_selector) return s.description.max_write_batch_size - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'b'], - 'maxWireVersion': 6, - 'maxWriteBatchSize': 1}) - - got_hello(t, ('b', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b'], - 'maxWireVersion': 6, - 'maxWriteBatchSize': 2}) + got_hello( + t, + ("a", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a", "b"], + "maxWireVersion": 6, + "maxWriteBatchSize": 1, + }, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + "maxWireVersion": 6, + "maxWriteBatchSize": 2, + }, + ) # Uses primary's max batch size. self.assertEqual(1, write_batch_size()) # b becomes primary. - got_hello(t, ('b', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'b'], - 'maxWireVersion': 6, - 'maxWriteBatchSize': 2}) + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a", "b"], + "maxWireVersion": 6, + "maxWriteBatchSize": 2, + }, + ) self.assertEqual(2, write_batch_size()) def test_topology_repr(self): - t = create_mock_topology(replica_set_name='rs') + t = create_mock_topology(replica_set_name="rs") self.addCleanup(t.close) - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'c', 'b']}) + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "c", "b"]}, + ) self.assertEqual( repr(t.description), ", " "]>" % (t._topology_id,)) + " rtt: None>]>" % (t._topology_id,), + ) def test_unexpected_load_balancer(self): # Note: This behavior should not be reachable in practice but we # should handle it gracefully nonetheless. See PYTHON-2791. # Load balancers are included in topology with a single seed. - t = create_mock_topology(seeds=['a']) - mock_lb_response = {'ok': 1, 'msg': 'isdbgrid', - 'serviceId': ObjectId(), 'maxWireVersion': 13} - got_hello(t, ('a', 27017), mock_lb_response) + t = create_mock_topology(seeds=["a"]) + mock_lb_response = { + "ok": 1, + "msg": "isdbgrid", + "serviceId": ObjectId(), + "maxWireVersion": 13, + } + got_hello(t, ("a", 27017), mock_lb_response) sds = t.description.server_descriptions() - self.assertIn(('a', 27017), sds) - self.assertEqual(sds[('a', 27017)].server_type_name, 'LoadBalancer') - self.assertEqual(t.description.topology_type_name, 'Single') + self.assertIn(("a", 27017), sds) + self.assertEqual(sds[("a", 27017)].server_type_name, "LoadBalancer") + self.assertEqual(t.description.topology_type_name, "Single") self.assertTrue(t.description.has_writable_server()) # Load balancers are removed from a topology with multiple seeds. - t = create_mock_topology(seeds=['a', 'b']) - got_hello(t, ('a', 27017), mock_lb_response) - self.assertNotIn(('a', 27017), t.description.server_descriptions()) - self.assertEqual(t.description.topology_type_name, 'Unknown') + t = create_mock_topology(seeds=["a", "b"]) + got_hello(t, ("a", 27017), mock_lb_response) + self.assertNotIn(("a", 27017), t.description.server_descriptions()) + self.assertEqual(t.description.topology_type_name, "Unknown") def wait_for_primary(topology): @@ -663,7 +696,7 @@ def get_primary(): except ConnectionFailure: return None - return wait_until(get_primary, 'find primary') + return wait_until(get_primary, "find primary") class TestTopologyErrors(TopologyTest): @@ -677,9 +710,9 @@ class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): hello_count[0] += 1 if hello_count[0] == 1: - return Hello({'ok': 1, 'maxWireVersion': 6}), 0 + return Hello({"ok": 1, "maxWireVersion": 6}), 0 else: - raise AutoReconnect('mock monitor error') + raise AutoReconnect("mock monitor error") t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) @@ -699,17 +732,15 @@ class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): hello_count[0] += 1 if hello_count[0] in (1, 3): - return Hello({'ok': 1, 'maxWireVersion': 6}), 0 + return Hello({"ok": 1, "maxWireVersion": 6}), 0 else: - raise AutoReconnect( - 'mock monitor error #%s' % (hello_count[0],)) + raise AutoReconnect("mock monitor error #%s" % (hello_count[0],)) t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) server = wait_for_primary(t) self.assertEqual(1, hello_count[0]) - self.assertEqual(SERVER_TYPE.Standalone, - server.description.server_type) + self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) # Second hello call, server is marked Unknown, then the monitor # immediately runs a retry (third hello). @@ -718,12 +749,11 @@ def _check_with_socket(self, *args, **kwargs): # after the failed check triggered by request_check_all. Wait until # the server becomes known again. server = t.select_server(writable_server_selector, 0.250) - self.assertEqual(SERVER_TYPE.Standalone, - server.description.server_type) + self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) self.assertEqual(3, hello_count[0]) def test_internal_monitor_error(self): - exception = AssertionError('internal error') + exception = AssertionError("internal error") class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): @@ -731,9 +761,8 @@ def _check_with_socket(self, *args, **kwargs): t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) - with self.assertRaisesRegex(ConnectionFailure, 'internal error'): - t.select_server(any_server_selector, - server_selection_timeout=0.5) + with self.assertRaisesRegex(ConnectionFailure, "internal error"): + t.select_server(any_server_selector, server_selection_timeout=0.5) class TestServerSelectionErrors(TopologyTest): @@ -744,69 +773,80 @@ def assertMessage(self, message, topology, selector=any_server_selector): self.assertIn(message, str(context.exception)) def test_no_primary(self): - t = create_mock_topology(replica_set_name='rs') - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a']}) + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a"], + }, + ) - self.assertMessage('No replica set members match selector "Primary()"', - t, ReadPreference.PRIMARY) + self.assertMessage( + 'No replica set members match selector "Primary()"', t, ReadPreference.PRIMARY + ) - self.assertMessage('No primary available for writes', - t, writable_server_selector) + self.assertMessage("No primary available for writes", t, writable_server_selector) def test_no_secondary(self): - t = create_mock_topology(replica_set_name='rs') - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a']}) + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, address, {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"]} + ) self.assertMessage( - 'No replica set members match selector' + "No replica set members match selector" ' "Secondary(tag_sets=None, max_staleness=-1, hedge=None)"', - t, ReadPreference.SECONDARY) + t, + ReadPreference.SECONDARY, + ) self.assertMessage( "No replica set members match selector" " \"Secondary(tag_sets=[{'dc': 'ny'}], max_staleness=-1, " - "hedge=None)\"", - t, Secondary(tag_sets=[{'dc': 'ny'}])) + 'hedge=None)"', + t, + Secondary(tag_sets=[{"dc": "ny"}]), + ) def test_bad_replica_set_name(self): - t = create_mock_topology(replica_set_name='rs') - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'wrong', - 'hosts': ['a']}) + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "wrong", + "hosts": ["a"], + }, + ) - self.assertMessage( - 'No replica set members available for replica set name "rs"', t) + self.assertMessage('No replica set members available for replica set name "rs"', t) def test_multiple_standalones(self): # Standalones are removed from a topology with multiple seeds. - t = create_mock_topology(seeds=['a', 'b']) - got_hello(t, ('a', 27017), {'ok': 1}) - got_hello(t, ('b', 27017), {'ok': 1}) - self.assertMessage('No servers available', t) + t = create_mock_topology(seeds=["a", "b"]) + got_hello(t, ("a", 27017), {"ok": 1}) + got_hello(t, ("b", 27017), {"ok": 1}) + self.assertMessage("No servers available", t) def test_no_mongoses(self): # Standalones are removed from a topology with multiple seeds. - t = create_mock_topology(seeds=['a', 'b']) + t = create_mock_topology(seeds=["a", "b"]) # Discover a mongos and change topology type to Sharded. - got_hello(t, ('a', 27017), {'ok': 1, 'msg': 'isdbgrid'}) + got_hello(t, ("a", 27017), {"ok": 1, "msg": "isdbgrid"}) # Oops, both servers are standalone now. Remove them. - got_hello(t, ('a', 27017), {'ok': 1}) - got_hello(t, ('b', 27017), {'ok': 1}) - self.assertMessage('No mongoses available', t) + got_hello(t, ("a", 27017), {"ok": 1}) + got_hello(t, ("b", 27017), {"ok": 1}) + self.assertMessage("No mongoses available", t) if __name__ == "__main__": diff --git a/test/test_transactions.py b/test/test_transactions.py index 169a2ad03d..34dbbba34b 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -16,35 +16,38 @@ import os import sys - from io import BytesIO sys.path[0:0] = [""] -from pymongo import client_session, WriteConcern +from test import client_context, unittest +from test.utils import ( + OvertCommandListener, + TestCreator, + rs_client, + single_client, + wait_until, +) +from test.utils_spec_runner import SpecRunner + +from gridfs import GridFS, GridFSBucket +from pymongo import WriteConcern, client_session from pymongo.client_session import TransactionOptions -from pymongo.errors import (CollectionInvalid, - ConfigurationError, - ConnectionFailure, - InvalidOperation, - OperationFailure) +from pymongo.errors import ( + CollectionInvalid, + ConfigurationError, + ConnectionFailure, + InvalidOperation, + OperationFailure, +) from pymongo.operations import IndexModel, InsertOne from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference -from gridfs import GridFS, GridFSBucket - -from test import unittest, client_context -from test.utils import (rs_client, single_client, - wait_until, OvertCommandListener, - TestCreator) -from test.utils_spec_runner import SpecRunner - # Location of JSON test specifications. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'transactions', 'legacy') +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "transactions", "legacy") -_TXN_TESTS_DEBUG = os.environ.get('TRANSACTION_TESTS_DEBUG') +_TXN_TESTS_DEBUG = os.environ.get("TRANSACTION_TESTS_DEBUG") # Max number of operations to perform after a transaction to prove unpinning # occurs. Chosen so that there's a low false positive rate. With 2 mongoses, @@ -59,7 +62,7 @@ def setUpClass(cls): super(TransactionsBase, cls).setUpClass() if client_context.supports_transactions(): for address in client_context.mongoses: - cls.mongos_clients.append(single_client('%s:%s' % address)) + cls.mongos_clients.append(single_client("%s:%s" % address)) @classmethod def tearDownClass(cls): @@ -69,14 +72,17 @@ def tearDownClass(cls): def maybe_skip_scenario(self, test): super(TransactionsBase, self).maybe_skip_scenario(test) - if ('secondary' in self.id() and - not client_context.is_mongos and - not client_context.has_secondaries): - raise unittest.SkipTest('No secondaries') + if ( + "secondary" in self.id() + and not client_context.is_mongos + and not client_context.has_secondaries + ): + raise unittest.SkipTest("No secondaries") class TestTransactions(TransactionsBase): RUN_ON_SERVERLESS = True + @client_context.require_transactions def test_transaction_options_validation(self): default_options = TransactionOptions() @@ -85,23 +91,23 @@ def test_transaction_options_validation(self): self.assertIsNone(default_options.read_preference) self.assertIsNone(default_options.max_commit_time_ms) # No error when valid options are provided. - TransactionOptions(read_concern=ReadConcern(), - write_concern=WriteConcern(), - read_preference=ReadPreference.PRIMARY, - max_commit_time_ms=10000) + TransactionOptions( + read_concern=ReadConcern(), + write_concern=WriteConcern(), + read_preference=ReadPreference.PRIMARY, + max_commit_time_ms=10000, + ) with self.assertRaisesRegex(TypeError, "read_concern must be "): TransactionOptions(read_concern={}) # type: ignore with self.assertRaisesRegex(TypeError, "write_concern must be "): TransactionOptions(write_concern={}) # type: ignore with self.assertRaisesRegex( - ConfigurationError, - "transactions do not support unacknowledged write concern"): + ConfigurationError, "transactions do not support unacknowledged write concern" + ): TransactionOptions(write_concern=WriteConcern(w=0)) - with self.assertRaisesRegex( - TypeError, "is not valid for read_preference"): + with self.assertRaisesRegex(TypeError, "is not valid for read_preference"): TransactionOptions(read_preference={}) # type: ignore - with self.assertRaisesRegex( - TypeError, "max_commit_time_ms must be an integer or None"): + with self.assertRaisesRegex(TypeError, "max_commit_time_ms must be an integer or None"): TransactionOptions(max_commit_time_ms="10000") # type: ignore @client_context.require_transactions @@ -115,16 +121,11 @@ def test_transaction_write_concern_override(self): with client.start_session() as s: with s.start_transaction(write_concern=WriteConcern(w=1)): self.assertTrue(coll.insert_one({}, session=s).acknowledged) - self.assertTrue(coll.insert_many( - [{}, {}], session=s).acknowledged) - self.assertTrue(coll.bulk_write( - [InsertOne({})], session=s).acknowledged) - self.assertTrue(coll.replace_one( - {}, {}, session=s).acknowledged) - self.assertTrue(coll.update_one( - {}, {"$set": {"a": 1}}, session=s).acknowledged) - self.assertTrue(coll.update_many( - {}, {"$set": {"a": 1}}, session=s).acknowledged) + self.assertTrue(coll.insert_many([{}, {}], session=s).acknowledged) + self.assertTrue(coll.bulk_write([InsertOne({})], session=s).acknowledged) + self.assertTrue(coll.replace_one({}, {}, session=s).acknowledged) + self.assertTrue(coll.update_one({}, {"$set": {"a": 1}}, session=s).acknowledged) + self.assertTrue(coll.update_many({}, {"$set": {"a": 1}}, session=s).acknowledged) self.assertTrue(coll.delete_one({}, session=s).acknowledged) self.assertTrue(coll.delete_many({}, session=s).acknowledged) coll.find_one_and_delete({}, session=s) @@ -133,27 +134,29 @@ def test_transaction_write_concern_override(self): unsupported_txn_writes: list = [ (client.drop_database, [db.name], {}), - (db.drop_collection, ['collection'], {}), + (db.drop_collection, ["collection"], {}), (coll.drop, [], {}), - (coll.rename, ['collection2'], {}), + (coll.rename, ["collection2"], {}), # Drop collection2 between tests of "rename", above. - (coll.database.drop_collection, ['collection2'], {}), - (coll.create_indexes, [[IndexModel('a')]], {}), - (coll.create_index, ['a'], {}), - (coll.drop_index, ['a_1'], {}), + (coll.database.drop_collection, ["collection2"], {}), + (coll.create_indexes, [[IndexModel("a")]], {}), + (coll.create_index, ["a"], {}), + (coll.drop_index, ["a_1"], {}), (coll.drop_indexes, [], {}), (coll.aggregate, [[{"$out": "aggout"}]], {}), ] # Creating a collection in a transaction requires MongoDB 4.4+. if client_context.version < (4, 3, 4): - unsupported_txn_writes.extend([ - (db.create_collection, ['collection'], {}), - ]) + unsupported_txn_writes.extend( + [ + (db.create_collection, ["collection"], {}), + ] + ) for op in unsupported_txn_writes: op, args, kwargs = op with client.start_session() as s: - kwargs['session'] = s + kwargs["session"] = s s.start_transaction(write_concern=WriteConcern(w=1)) with self.assertRaises(OperationFailure): op(*args, **kwargs) @@ -164,8 +167,7 @@ def test_transaction_write_concern_override(self): def test_unpin_for_next_transaction(self): # Increase localThresholdMS and wait until both nodes are discovered # to avoid false positives. - client = rs_client(client_context.mongos_seeds(), - localThresholdMS=1000) + client = rs_client(client_context.mongos_seeds(), localThresholdMS=1000) wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") coll = client.test.test # Create the collection. @@ -193,8 +195,7 @@ def test_unpin_for_next_transaction(self): def test_unpin_for_non_transaction_operation(self): # Increase localThresholdMS and wait until both nodes are discovered # to avoid false positives. - client = rs_client(client_context.mongos_seeds(), - localThresholdMS=1000) + client = rs_client(client_context.mongos_seeds(), localThresholdMS=1000) wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") coll = client.test.test # Create the collection. @@ -255,46 +256,71 @@ def gridfs_find(*args, **kwargs): return gfs.find(*args, **kwargs).next() def gridfs_open_upload_stream(*args, **kwargs): - bucket.open_upload_stream(*args, **kwargs).write(b'1') + bucket.open_upload_stream(*args, **kwargs).write(b"1") gridfs_ops = [ - (gfs.put, (b'123',)), + (gfs.put, (b"123",)), (gfs.get, (1,)), - (gfs.get_version, ('name',)), - (gfs.get_last_version, ('name',)), - (gfs.delete, (1, )), + (gfs.get_version, ("name",)), + (gfs.get_last_version, ("name",)), + (gfs.delete, (1,)), (gfs.list, ()), (gfs.find_one, ()), (gridfs_find, ()), (gfs.exists, ()), - (gridfs_open_upload_stream, ('name',)), - (bucket.upload_from_stream, ('name', b'data',)), - (bucket.download_to_stream, (1, BytesIO(),)), - (bucket.download_to_stream_by_name, ('name', BytesIO(),)), + (gridfs_open_upload_stream, ("name",)), + ( + bucket.upload_from_stream, + ( + "name", + b"data", + ), + ), + ( + bucket.download_to_stream, + ( + 1, + BytesIO(), + ), + ), + ( + bucket.download_to_stream_by_name, + ( + "name", + BytesIO(), + ), + ), (bucket.delete, (1,)), (bucket.find, ()), (bucket.open_download_stream, (1,)), - (bucket.open_download_stream_by_name, ('name',)), - (bucket.rename, (1, 'new-name',)), + (bucket.open_download_stream_by_name, ("name",)), + ( + bucket.rename, + ( + 1, + "new-name", + ), + ), ] with client.start_session() as s, s.start_transaction(): for op, args in gridfs_ops: with self.assertRaisesRegex( - InvalidOperation, - 'GridFS does not support multi-document transactions', + InvalidOperation, + "GridFS does not support multi-document transactions", ): op(*args, session=s) # type: ignore # Require 4.2+ for large (16MB+) transactions. @client_context.require_version_min(4, 2) @client_context.require_transactions - @unittest.skipIf(sys.platform == 'win32', - 'Our Windows machines are too slow to pass this test') + @unittest.skipIf(sys.platform == "win32", "Our Windows machines are too slow to pass this test") def test_transaction_starts_with_batched_write(self): - if 'PyPy' in sys.version and client_context.tls: - self.skipTest('PYTHON-2937 PyPy is so slow sending large ' - 'messages over TLS that this test fails') + if "PyPy" in sys.version and client_context.tls: + self.skipTest( + "PYTHON-2937 PyPy is so slow sending large " + "messages over TLS that this test fails" + ) # Start a transaction with a batch of operations that needs to be # split. listener = OvertCommandListener() @@ -304,27 +330,29 @@ def test_transaction_starts_with_batched_write(self): listener.reset() self.addCleanup(client.close) self.addCleanup(coll.drop) - large_str = '\0'*(10*1024*1024) - ops = [InsertOne({'a': large_str}) for _ in range(10)] + large_str = "\0" * (10 * 1024 * 1024) + ops = [InsertOne({"a": large_str}) for _ in range(10)] with client.start_session() as session: with session.start_transaction(): coll.bulk_write(ops, session=session) # Assert commands were constructed properly. - self.assertEqual(['insert', 'insert', 'insert', 'commitTransaction'], - listener.started_command_names()) - first_cmd = listener.results['started'][0].command - self.assertTrue(first_cmd['startTransaction']) - lsid = first_cmd['lsid'] - txn_number = first_cmd['txnNumber'] - for event in listener.results['started'][1:]: - self.assertNotIn('startTransaction', event.command) - self.assertEqual(lsid, event.command['lsid']) - self.assertEqual(txn_number, event.command['txnNumber']) + self.assertEqual( + ["insert", "insert", "insert", "commitTransaction"], listener.started_command_names() + ) + first_cmd = listener.results["started"][0].command + self.assertTrue(first_cmd["startTransaction"]) + lsid = first_cmd["lsid"] + txn_number = first_cmd["txnNumber"] + for event in listener.results["started"][1:]: + self.assertNotIn("startTransaction", event.command) + self.assertEqual(lsid, event.command["lsid"]) + self.assertEqual(txn_number, event.command["txnNumber"]) self.assertEqual(10, coll.count_documents({})) class PatchSessionTimeout(object): """Patches the client_session's with_transaction timeout for testing.""" + def __init__(self, mock_timeout): self.real_timeout = client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT self.mock_timeout = mock_timeout @@ -338,15 +366,18 @@ def __exit__(self, exc_type, exc_val, exc_tb): class TestTransactionsConvenientAPI(TransactionsBase): - TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'transactions-convenient-api') + TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "transactions-convenient-api" + ) @client_context.require_transactions def test_callback_raises_custom_error(self): - class _MyException(Exception):pass + class _MyException(Exception): + pass def raise_error(_): raise _MyException() + with self.client.start_session() as s: with self.assertRaises(_MyException): s.with_transaction(raise_error) @@ -354,17 +385,19 @@ def raise_error(_): @client_context.require_transactions def test_callback_returns_value(self): def callback(_): - return 'Foo' + return "Foo" + with self.client.start_session() as s: - self.assertEqual(s.with_transaction(callback), 'Foo') + self.assertEqual(s.with_transaction(callback), "Foo") self.db.test.insert_one({}) def callback2(session): self.db.test.insert_one({}, session=session) - return 'Foo' + return "Foo" + with self.client.start_session() as s: - self.assertEqual(s.with_transaction(callback2), 'Foo') + self.assertEqual(s.with_transaction(callback2), "Foo") @client_context.require_transactions def test_callback_not_retried_after_timeout(self): @@ -376,13 +409,13 @@ def test_callback_not_retried_after_timeout(self): def callback(session): coll.insert_one({}, session=session) err: dict = { - 'ok': 0, - 'errmsg': 'Transaction 7819 has been aborted.', - 'code': 251, - 'codeName': 'NoSuchTransaction', - 'errorLabels': ['TransientTransactionError'], + "ok": 0, + "errmsg": "Transaction 7819 has been aborted.", + "code": 251, + "codeName": "NoSuchTransaction", + "errorLabels": ["TransientTransactionError"], } - raise OperationFailure(err['errmsg'], err['code'], err) + raise OperationFailure(err["errmsg"], err["code"], err) # Create the collection. coll.insert_one({}) @@ -392,8 +425,7 @@ def callback(session): with self.assertRaises(OperationFailure): s.with_transaction(callback) - self.assertEqual(listener.started_command_names(), - ['insert', 'abortTransaction']) + self.assertEqual(listener.started_command_names(), ["insert", "abortTransaction"]) @client_context.require_test_commands @client_context.require_transactions @@ -408,14 +440,17 @@ def callback(session): # Create the collection. coll.insert_one({}) - self.set_fail_point({ - 'configureFailPoint': 'failCommand', 'mode': {'times': 1}, - 'data': { - 'failCommands': ['commitTransaction'], - 'errorCode': 251, # NoSuchTransaction - }}) - self.addCleanup(self.set_fail_point, { - 'configureFailPoint': 'failCommand', 'mode': 'off'}) + self.set_fail_point( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["commitTransaction"], + "errorCode": 251, # NoSuchTransaction + }, + } + ) + self.addCleanup(self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"}) listener.results.clear() with client.start_session() as s: @@ -423,8 +458,7 @@ def callback(session): with self.assertRaises(OperationFailure): s.with_transaction(callback) - self.assertEqual(listener.started_command_names(), - ['insert', 'commitTransaction']) + self.assertEqual(listener.started_command_names(), ["insert", "commitTransaction"]) @client_context.require_test_commands @client_context.require_transactions @@ -439,13 +473,14 @@ def callback(session): # Create the collection. coll.insert_one({}) - self.set_fail_point({ - 'configureFailPoint': 'failCommand', 'mode': {'times': 2}, - 'data': { - 'failCommands': ['commitTransaction'], - 'closeConnection': True}}) - self.addCleanup(self.set_fail_point, { - 'configureFailPoint': 'failCommand', 'mode': 'off'}) + self.set_fail_point( + { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["commitTransaction"], "closeConnection": True}, + } + ) + self.addCleanup(self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"}) listener.results.clear() with client.start_session() as s: @@ -455,8 +490,9 @@ def callback(session): # One insert for the callback and two commits (includes the automatic # retry). - self.assertEqual(listener.started_command_names(), - ['insert', 'commitTransaction', 'commitTransaction']) + self.assertEqual( + listener.started_command_names(), ["insert", "commitTransaction", "commitTransaction"] + ) # Tested here because this supports Motor's convenient transactions API. @client_context.require_transactions @@ -489,6 +525,7 @@ def test_in_transaction_property(self): # Using a callback def callback(session): self.assertTrue(session.in_transaction) + with client.start_session() as s: self.assertFalse(s.in_transaction) s.with_transaction(callback) @@ -508,8 +545,9 @@ def run_scenario(self): test_creator.create_tests() -TestCreator(create_test, TestTransactionsConvenientAPI, - TestTransactionsConvenientAPI.TEST_PATH).create_tests() +TestCreator( + create_test, TestTransactionsConvenientAPI, TestTransactionsConvenientAPI.TEST_PATH +).create_tests() if __name__ == "__main__": diff --git a/test/test_transactions_unified.py b/test/test_transactions_unified.py index 37e8d06153..4f3aa233fa 100644 --- a/test/test_transactions_unified.py +++ b/test/test_transactions_unified.py @@ -23,8 +23,7 @@ from test.unified_format import generate_test_classes # Location of JSON test specifications. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'transactions', 'unified') +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "transactions", "unified") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/test/test_unified_format.py b/test/test_unified_format.py index 74770b6f3a..e36959a224 100644 --- a/test/test_unified_format.py +++ b/test/test_unified_format.py @@ -17,35 +17,39 @@ sys.path[0:0] = [""] -from bson import ObjectId - from test import unittest -from test.unified_format import generate_test_classes, MatchEvaluatorUtil +from test.unified_format import MatchEvaluatorUtil, generate_test_classes +from bson import ObjectId -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'unified-test-format') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "unified-test-format") -globals().update(generate_test_classes( - os.path.join(_TEST_PATH, 'valid-pass'), - module=__name__, - class_name_prefix='UnifiedTestFormat', - expected_failures=[ - 'Client side error in command starting transaction', # PYTHON-1894 - ], - RUN_ON_SERVERLESS=False)) +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "valid-pass"), + module=__name__, + class_name_prefix="UnifiedTestFormat", + expected_failures=[ + "Client side error in command starting transaction", # PYTHON-1894 + ], + RUN_ON_SERVERLESS=False, + ) +) -globals().update(generate_test_classes( - os.path.join(_TEST_PATH, 'valid-fail'), - module=__name__, - class_name_prefix='UnifiedTestFormat', - bypass_test_generation_errors=True, - expected_failures=[ - '.*', # All tests expected to fail - ], - RUN_ON_SERVERLESS=False)) +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "valid-fail"), + module=__name__, + class_name_prefix="UnifiedTestFormat", + bypass_test_generation_errors=True, + expected_failures=[ + ".*", # All tests expected to fail + ], + RUN_ON_SERVERLESS=False, + ) +) class TestMatchEvaluatorUtil(unittest.TestCase): @@ -53,22 +57,27 @@ def setUp(self): self.match_evaluator = MatchEvaluatorUtil(self) def test_unsetOrMatches(self): - spec = {'$$unsetOrMatches': {'y': {'$$unsetOrMatches': 2}}} - for actual in [{}, {'y': 2}, None]: + spec = {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}} + for actual in [{}, {"y": 2}, None]: self.match_evaluator.match_result(spec, actual) - spec = {'x': {'$$unsetOrMatches': {'y': {'$$unsetOrMatches': 2}}}} - for actual in [{}, {'x': {}}, {'x': {'y': 2}}]: + spec = {"x": {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}}} + for actual in [{}, {"x": {}}, {"x": {"y": 2}}]: self.match_evaluator.match_result(spec, actual) def test_type(self): self.match_evaluator.match_result( - {'operationType': 'insert', - 'ns': {'db': 'change-stream-tests', 'coll': 'test'}, - 'fullDocument': {'_id': {'$$type': 'objectId'}, 'x': 1}}, - {'operationType': 'insert', - 'fullDocument': {'_id': ObjectId('5fc93511ac93941052098f0c'), 'x': 1}, - 'ns': {'db': 'change-stream-tests', 'coll': 'test'}}) + { + "operationType": "insert", + "ns": {"db": "change-stream-tests", "coll": "test"}, + "fullDocument": {"_id": {"$$type": "objectId"}, "x": 1}, + }, + { + "operationType": "insert", + "fullDocument": {"_id": ObjectId("5fc93511ac93941052098f0c"), "x": 1}, + "ns": {"db": "change-stream-tests", "coll": "test"}, + }, + ) if __name__ == "__main__": diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index ed5291d716..cfe21169fd 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -21,444 +21,431 @@ sys.path[0:0] = [""] +from test import unittest + from bson.binary import JAVA_LEGACY from pymongo import ReadPreference from pymongo.errors import ConfigurationError, InvalidURI -from pymongo.uri_parser import (parse_userinfo, - split_hosts, - split_options, - parse_uri) -from test import unittest +from pymongo.uri_parser import parse_uri, parse_userinfo, split_hosts, split_options class TestURI(unittest.TestCase): - def test_validate_userinfo(self): - self.assertRaises(InvalidURI, parse_userinfo, - 'foo@') - self.assertRaises(InvalidURI, parse_userinfo, - ':password') - self.assertRaises(InvalidURI, parse_userinfo, - 'fo::o:p@ssword') - self.assertRaises(InvalidURI, parse_userinfo, ':') - self.assertTrue(parse_userinfo('user:password')) - self.assertEqual(('us:r', 'p@ssword'), - parse_userinfo('us%3Ar:p%40ssword')) - self.assertEqual(('us er', 'p ssword'), - parse_userinfo('us+er:p+ssword')) - self.assertEqual(('us er', 'p ssword'), - parse_userinfo('us%20er:p%20ssword')) - self.assertEqual(('us+er', 'p+ssword'), - parse_userinfo('us%2Ber:p%2Bssword')) - self.assertEqual(('dev1@FOO.COM', ''), - parse_userinfo('dev1%40FOO.COM')) - self.assertEqual(('dev1@FOO.COM', ''), - parse_userinfo('dev1%40FOO.COM:')) + self.assertRaises(InvalidURI, parse_userinfo, "foo@") + self.assertRaises(InvalidURI, parse_userinfo, ":password") + self.assertRaises(InvalidURI, parse_userinfo, "fo::o:p@ssword") + self.assertRaises(InvalidURI, parse_userinfo, ":") + self.assertTrue(parse_userinfo("user:password")) + self.assertEqual(("us:r", "p@ssword"), parse_userinfo("us%3Ar:p%40ssword")) + self.assertEqual(("us er", "p ssword"), parse_userinfo("us+er:p+ssword")) + self.assertEqual(("us er", "p ssword"), parse_userinfo("us%20er:p%20ssword")) + self.assertEqual(("us+er", "p+ssword"), parse_userinfo("us%2Ber:p%2Bssword")) + self.assertEqual(("dev1@FOO.COM", ""), parse_userinfo("dev1%40FOO.COM")) + self.assertEqual(("dev1@FOO.COM", ""), parse_userinfo("dev1%40FOO.COM:")) def test_split_hosts(self): - self.assertRaises(ConfigurationError, split_hosts, - 'localhost:27017,') - self.assertRaises(ConfigurationError, split_hosts, - ',localhost:27017') - self.assertRaises(ConfigurationError, split_hosts, - 'localhost:27017,,localhost:27018') - self.assertEqual([('localhost', 27017), ('example.com', 27017)], - split_hosts('localhost,example.com')) - self.assertEqual([('localhost', 27018), ('example.com', 27019)], - split_hosts('localhost:27018,example.com:27019')) - self.assertEqual([('/tmp/mongodb-27017.sock', None)], - split_hosts('/tmp/mongodb-27017.sock')) - self.assertEqual([('/tmp/mongodb-27017.sock', None), - ('example.com', 27017)], - split_hosts('/tmp/mongodb-27017.sock,' - 'example.com:27017')) - self.assertEqual([('example.com', 27017), - ('/tmp/mongodb-27017.sock', None)], - split_hosts('example.com:27017,' - '/tmp/mongodb-27017.sock')) - self.assertRaises(ValueError, split_hosts, '::1', 27017) - self.assertRaises(ValueError, split_hosts, '[::1:27017') - self.assertRaises(ValueError, split_hosts, '::1') - self.assertRaises(ValueError, split_hosts, '::1]:27017') - self.assertEqual([('::1', 27017)], split_hosts('[::1]:27017')) - self.assertEqual([('::1', 27017)], split_hosts('[::1]')) + self.assertRaises(ConfigurationError, split_hosts, "localhost:27017,") + self.assertRaises(ConfigurationError, split_hosts, ",localhost:27017") + self.assertRaises(ConfigurationError, split_hosts, "localhost:27017,,localhost:27018") + self.assertEqual( + [("localhost", 27017), ("example.com", 27017)], split_hosts("localhost,example.com") + ) + self.assertEqual( + [("localhost", 27018), ("example.com", 27019)], + split_hosts("localhost:27018,example.com:27019"), + ) + self.assertEqual( + [("/tmp/mongodb-27017.sock", None)], split_hosts("/tmp/mongodb-27017.sock") + ) + self.assertEqual( + [("/tmp/mongodb-27017.sock", None), ("example.com", 27017)], + split_hosts("/tmp/mongodb-27017.sock," "example.com:27017"), + ) + self.assertEqual( + [("example.com", 27017), ("/tmp/mongodb-27017.sock", None)], + split_hosts("example.com:27017," "/tmp/mongodb-27017.sock"), + ) + self.assertRaises(ValueError, split_hosts, "::1", 27017) + self.assertRaises(ValueError, split_hosts, "[::1:27017") + self.assertRaises(ValueError, split_hosts, "::1") + self.assertRaises(ValueError, split_hosts, "::1]:27017") + self.assertEqual([("::1", 27017)], split_hosts("[::1]:27017")) + self.assertEqual([("::1", 27017)], split_hosts("[::1]")) def test_split_options(self): - self.assertRaises(ConfigurationError, split_options, 'foo') - self.assertRaises(ConfigurationError, split_options, 'foo=bar;foo') - self.assertTrue(split_options('ssl=true')) - self.assertTrue(split_options('connect=true')) - self.assertTrue(split_options('tlsAllowInvalidHostnames=false')) + self.assertRaises(ConfigurationError, split_options, "foo") + self.assertRaises(ConfigurationError, split_options, "foo=bar;foo") + self.assertTrue(split_options("ssl=true")) + self.assertTrue(split_options("connect=true")) + self.assertTrue(split_options("tlsAllowInvalidHostnames=false")) # Test Invalid URI options that should throw warnings. with warnings.catch_warnings(): - warnings.filterwarnings('error') - self.assertRaises(Warning, split_options, - 'foo=bar', warn=True) - self.assertRaises(Warning, split_options, - 'socketTimeoutMS=foo', warn=True) - self.assertRaises(Warning, split_options, - 'socketTimeoutMS=0.0', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=foo', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=0.0', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=1e100000', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=-1e100000', warn=True) - self.assertRaises(Warning, split_options, - 'ssl=foo', warn=True) - self.assertRaises(Warning, split_options, - 'connect=foo', warn=True) - self.assertRaises(Warning, split_options, - 'tlsAllowInvalidHostnames=foo', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=inf', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=-inf', warn=True) - self.assertRaises(Warning, split_options, 'wtimeoutms=foo', - warn=True) - self.assertRaises(Warning, split_options, 'wtimeoutms=5.5', - warn=True) - self.assertRaises(Warning, split_options, 'fsync=foo', - warn=True) - self.assertRaises(Warning, split_options, 'fsync=5.5', - warn=True) - self.assertRaises(Warning, - split_options, 'authMechanism=foo', - warn=True) + warnings.filterwarnings("error") + self.assertRaises(Warning, split_options, "foo=bar", warn=True) + self.assertRaises(Warning, split_options, "socketTimeoutMS=foo", warn=True) + self.assertRaises(Warning, split_options, "socketTimeoutMS=0.0", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=foo", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=0.0", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=1e100000", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=-1e100000", warn=True) + self.assertRaises(Warning, split_options, "ssl=foo", warn=True) + self.assertRaises(Warning, split_options, "connect=foo", warn=True) + self.assertRaises(Warning, split_options, "tlsAllowInvalidHostnames=foo", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=inf", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=-inf", warn=True) + self.assertRaises(Warning, split_options, "wtimeoutms=foo", warn=True) + self.assertRaises(Warning, split_options, "wtimeoutms=5.5", warn=True) + self.assertRaises(Warning, split_options, "fsync=foo", warn=True) + self.assertRaises(Warning, split_options, "fsync=5.5", warn=True) + self.assertRaises(Warning, split_options, "authMechanism=foo", warn=True) # Test invalid options with warn=False. - self.assertRaises(ConfigurationError, split_options, 'foo=bar') - self.assertRaises(ValueError, split_options, 'socketTimeoutMS=foo') - self.assertRaises(ValueError, split_options, 'socketTimeoutMS=0.0') - self.assertRaises(ValueError, split_options, 'connectTimeoutMS=foo') - self.assertRaises(ValueError, split_options, 'connectTimeoutMS=0.0') - self.assertRaises(ValueError, split_options, - 'connectTimeoutMS=1e100000') - self.assertRaises(ValueError, split_options, - 'connectTimeoutMS=-1e100000') - self.assertRaises(ValueError, split_options, 'ssl=foo') - self.assertRaises(ValueError, split_options, 'connect=foo') - self.assertRaises(ValueError, split_options, 'tlsAllowInvalidHostnames=foo') - self.assertRaises(ValueError, split_options, 'connectTimeoutMS=inf') - self.assertRaises(ValueError, split_options, 'connectTimeoutMS=-inf') - self.assertRaises(ValueError, split_options, 'wtimeoutms=foo') - self.assertRaises(ValueError, split_options, 'wtimeoutms=5.5') - self.assertRaises(ValueError, split_options, 'fsync=foo') - self.assertRaises(ValueError, split_options, 'fsync=5.5') - self.assertRaises(ValueError, - split_options, 'authMechanism=foo') + self.assertRaises(ConfigurationError, split_options, "foo=bar") + self.assertRaises(ValueError, split_options, "socketTimeoutMS=foo") + self.assertRaises(ValueError, split_options, "socketTimeoutMS=0.0") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=foo") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=0.0") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=1e100000") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=-1e100000") + self.assertRaises(ValueError, split_options, "ssl=foo") + self.assertRaises(ValueError, split_options, "connect=foo") + self.assertRaises(ValueError, split_options, "tlsAllowInvalidHostnames=foo") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=inf") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=-inf") + self.assertRaises(ValueError, split_options, "wtimeoutms=foo") + self.assertRaises(ValueError, split_options, "wtimeoutms=5.5") + self.assertRaises(ValueError, split_options, "fsync=foo") + self.assertRaises(ValueError, split_options, "fsync=5.5") + self.assertRaises(ValueError, split_options, "authMechanism=foo") # Test splitting options works when valid. - self.assertTrue(split_options('socketTimeoutMS=300')) - self.assertTrue(split_options('connectTimeoutMS=300')) - self.assertEqual({'sockettimeoutms': 0.3}, - split_options('socketTimeoutMS=300')) - self.assertEqual({'sockettimeoutms': 0.0001}, - split_options('socketTimeoutMS=0.1')) - self.assertEqual({'connecttimeoutms': 0.3}, - split_options('connectTimeoutMS=300')) - self.assertEqual({'connecttimeoutms': 0.0001}, - split_options('connectTimeoutMS=0.1')) - self.assertTrue(split_options('connectTimeoutMS=300')) - self.assertTrue(isinstance(split_options('w=5')['w'], int)) - self.assertTrue(isinstance(split_options('w=5.5')['w'], str)) - self.assertTrue(split_options('w=foo')) - self.assertTrue(split_options('w=majority')) - self.assertTrue(split_options('wtimeoutms=500')) - self.assertEqual({'fsync': True}, split_options('fsync=true')) - self.assertEqual({'fsync': False}, split_options('fsync=false')) - self.assertEqual({'authmechanism': 'GSSAPI'}, - split_options('authMechanism=GSSAPI')) - self.assertEqual({'authmechanism': 'MONGODB-CR'}, - split_options('authMechanism=MONGODB-CR')) - self.assertEqual({'authmechanism': 'SCRAM-SHA-1'}, - split_options('authMechanism=SCRAM-SHA-1')) - self.assertEqual({'authsource': 'foobar'}, - split_options('authSource=foobar')) - self.assertEqual({'maxpoolsize': 50}, split_options('maxpoolsize=50')) + self.assertTrue(split_options("socketTimeoutMS=300")) + self.assertTrue(split_options("connectTimeoutMS=300")) + self.assertEqual({"sockettimeoutms": 0.3}, split_options("socketTimeoutMS=300")) + self.assertEqual({"sockettimeoutms": 0.0001}, split_options("socketTimeoutMS=0.1")) + self.assertEqual({"connecttimeoutms": 0.3}, split_options("connectTimeoutMS=300")) + self.assertEqual({"connecttimeoutms": 0.0001}, split_options("connectTimeoutMS=0.1")) + self.assertTrue(split_options("connectTimeoutMS=300")) + self.assertTrue(isinstance(split_options("w=5")["w"], int)) + self.assertTrue(isinstance(split_options("w=5.5")["w"], str)) + self.assertTrue(split_options("w=foo")) + self.assertTrue(split_options("w=majority")) + self.assertTrue(split_options("wtimeoutms=500")) + self.assertEqual({"fsync": True}, split_options("fsync=true")) + self.assertEqual({"fsync": False}, split_options("fsync=false")) + self.assertEqual({"authmechanism": "GSSAPI"}, split_options("authMechanism=GSSAPI")) + self.assertEqual({"authmechanism": "MONGODB-CR"}, split_options("authMechanism=MONGODB-CR")) + self.assertEqual( + {"authmechanism": "SCRAM-SHA-1"}, split_options("authMechanism=SCRAM-SHA-1") + ) + self.assertEqual({"authsource": "foobar"}, split_options("authSource=foobar")) + self.assertEqual({"maxpoolsize": 50}, split_options("maxpoolsize=50")) def test_parse_uri(self): self.assertRaises(InvalidURI, parse_uri, "http://foobar.com") self.assertRaises(InvalidURI, parse_uri, "http://foo@foobar.com") - self.assertRaises(ValueError, - parse_uri, "mongodb://::1", 27017) + self.assertRaises(ValueError, parse_uri, "mongodb://::1", 27017) orig: dict = { - 'nodelist': [("localhost", 27017)], - 'username': None, - 'password': None, - 'database': None, - 'collection': None, - 'options': {}, - 'fqdn': None + "nodelist": [("localhost", 27017)], + "username": None, + "password": None, + "database": None, + "collection": None, + "options": {}, + "fqdn": None, } res: dict = copy.deepcopy(orig) self.assertEqual(res, parse_uri("mongodb://localhost")) - res.update({'username': 'fred', 'password': 'foobar'}) + res.update({"username": "fred", "password": "foobar"}) self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost")) - res.update({'database': 'baz'}) + res.update({"database": "baz"}) self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost/baz")) res = copy.deepcopy(orig) - res['nodelist'] = [("example1.com", 27017), ("example2.com", 27017)] - self.assertEqual(res, - parse_uri("mongodb://example1.com:27017," - "example2.com:27017")) + res["nodelist"] = [("example1.com", 27017), ("example2.com", 27017)] + self.assertEqual(res, parse_uri("mongodb://example1.com:27017," "example2.com:27017")) res = copy.deepcopy(orig) - res['nodelist'] = [("localhost", 27017), - ("localhost", 27018), - ("localhost", 27019)] - self.assertEqual(res, - parse_uri("mongodb://localhost," - "localhost:27018,localhost:27019")) + res["nodelist"] = [("localhost", 27017), ("localhost", 27018), ("localhost", 27019)] + self.assertEqual(res, parse_uri("mongodb://localhost," "localhost:27018,localhost:27019")) res = copy.deepcopy(orig) - res['database'] = 'foo' + res["database"] = "foo" self.assertEqual(res, parse_uri("mongodb://localhost/foo")) res = copy.deepcopy(orig) self.assertEqual(res, parse_uri("mongodb://localhost/")) - res.update({'database': 'test', 'collection': 'yield_historical.in'}) - self.assertEqual(res, parse_uri("mongodb://" - "localhost/test.yield_historical.in")) + res.update({"database": "test", "collection": "yield_historical.in"}) + self.assertEqual(res, parse_uri("mongodb://" "localhost/test.yield_historical.in")) - res.update({'username': 'fred', 'password': 'foobar'}) - self.assertEqual(res, - parse_uri("mongodb://fred:foobar@localhost/" - "test.yield_historical.in")) + res.update({"username": "fred", "password": "foobar"}) + self.assertEqual( + res, parse_uri("mongodb://fred:foobar@localhost/" "test.yield_historical.in") + ) res = copy.deepcopy(orig) - res['nodelist'] = [("example1.com", 27017), ("example2.com", 27017)] - res.update({'database': 'test', 'collection': 'yield_historical.in'}) - self.assertEqual(res, - parse_uri("mongodb://example1.com:27017,example2.com" - ":27017/test.yield_historical.in")) + res["nodelist"] = [("example1.com", 27017), ("example2.com", 27017)] + res.update({"database": "test", "collection": "yield_historical.in"}) + self.assertEqual( + res, + parse_uri( + "mongodb://example1.com:27017,example2.com" ":27017/test.yield_historical.in" + ), + ) # Test socket path without escaped characters. - self.assertRaises(InvalidURI, parse_uri, - "mongodb:///tmp/mongodb-27017.sock") + self.assertRaises(InvalidURI, parse_uri, "mongodb:///tmp/mongodb-27017.sock") # Test with escaped characters. res = copy.deepcopy(orig) - res['nodelist'] = [("example2.com", 27017), - ("/tmp/mongodb-27017.sock", None)] - self.assertEqual(res, - parse_uri("mongodb://example2.com," - "%2Ftmp%2Fmongodb-27017.sock")) + res["nodelist"] = [("example2.com", 27017), ("/tmp/mongodb-27017.sock", None)] + self.assertEqual(res, parse_uri("mongodb://example2.com," "%2Ftmp%2Fmongodb-27017.sock")) res = copy.deepcopy(orig) - res['nodelist'] = [("shoe.sock.pants.co.uk", 27017), - ("/tmp/mongodb-27017.sock", None)] - res['database'] = "nethers_db" - self.assertEqual(res, - parse_uri("mongodb://shoe.sock.pants.co.uk," - "%2Ftmp%2Fmongodb-27017.sock/nethers_db")) + res["nodelist"] = [("shoe.sock.pants.co.uk", 27017), ("/tmp/mongodb-27017.sock", None)] + res["database"] = "nethers_db" + self.assertEqual( + res, + parse_uri("mongodb://shoe.sock.pants.co.uk," "%2Ftmp%2Fmongodb-27017.sock/nethers_db"), + ) res = copy.deepcopy(orig) - res['nodelist'] = [("/tmp/mongodb-27017.sock", None), - ("example2.com", 27017)] - res.update({'database': 'test', 'collection': 'yield_historical.in'}) - self.assertEqual(res, - parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock," - "example2.com:27017" - "/test.yield_historical.in")) + res["nodelist"] = [("/tmp/mongodb-27017.sock", None), ("example2.com", 27017)] + res.update({"database": "test", "collection": "yield_historical.in"}) + self.assertEqual( + res, + parse_uri( + "mongodb://%2Ftmp%2Fmongodb-27017.sock," + "example2.com:27017" + "/test.yield_historical.in" + ), + ) res = copy.deepcopy(orig) - res['nodelist'] = [("/tmp/mongodb-27017.sock", None), - ("example2.com", 27017)] - res.update({'database': 'test', 'collection': 'yield_historical.sock'}) - self.assertEqual(res, - parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock," - "example2.com:27017/test.yield_historical" - ".sock")) + res["nodelist"] = [("/tmp/mongodb-27017.sock", None), ("example2.com", 27017)] + res.update({"database": "test", "collection": "yield_historical.sock"}) + self.assertEqual( + res, + parse_uri( + "mongodb://%2Ftmp%2Fmongodb-27017.sock," + "example2.com:27017/test.yield_historical" + ".sock" + ), + ) res = copy.deepcopy(orig) - res['nodelist'] = [("example2.com", 27017)] - res.update({'database': 'test', 'collection': 'yield_historical.sock'}) - self.assertEqual(res, - parse_uri("mongodb://example2.com:27017" - "/test.yield_historical.sock")) + res["nodelist"] = [("example2.com", 27017)] + res.update({"database": "test", "collection": "yield_historical.sock"}) + self.assertEqual( + res, parse_uri("mongodb://example2.com:27017" "/test.yield_historical.sock") + ) res = copy.deepcopy(orig) - res['nodelist'] = [("/tmp/mongodb-27017.sock", None)] - res.update({'database': 'test', 'collection': 'mongodb-27017.sock'}) - self.assertEqual(res, - parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock" - "/test.mongodb-27017.sock")) + res["nodelist"] = [("/tmp/mongodb-27017.sock", None)] + res.update({"database": "test", "collection": "mongodb-27017.sock"}) + self.assertEqual( + res, parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock" "/test.mongodb-27017.sock") + ) res = copy.deepcopy(orig) - res['nodelist'] = [('/tmp/mongodb-27020.sock', None), - ("::1", 27017), - ("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 27018), - ("192.168.0.212", 27019), - ("localhost", 27018)] - self.assertEqual(res, parse_uri("mongodb://%2Ftmp%2Fmongodb-27020.sock" - ",[::1]:27017,[2001:0db8:" - "85a3:0000:0000:8a2e:0370:7334]," - "192.168.0.212:27019,localhost", - 27018)) + res["nodelist"] = [ + ("/tmp/mongodb-27020.sock", None), + ("::1", 27017), + ("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 27018), + ("192.168.0.212", 27019), + ("localhost", 27018), + ] + self.assertEqual( + res, + parse_uri( + "mongodb://%2Ftmp%2Fmongodb-27020.sock" + ",[::1]:27017,[2001:0db8:" + "85a3:0000:0000:8a2e:0370:7334]," + "192.168.0.212:27019,localhost", + 27018, + ), + ) res = copy.deepcopy(orig) - res.update({'username': 'fred', 'password': 'foobar'}) - res.update({'database': 'test', 'collection': 'yield_historical.in'}) - self.assertEqual(res, - parse_uri("mongodb://fred:foobar@localhost/" - "test.yield_historical.in")) + res.update({"username": "fred", "password": "foobar"}) + res.update({"database": "test", "collection": "yield_historical.in"}) + self.assertEqual( + res, parse_uri("mongodb://fred:foobar@localhost/" "test.yield_historical.in") + ) res = copy.deepcopy(orig) - res['database'] = 'test' - res['collection'] = 'name/with "delimiters' - self.assertEqual( - res, parse_uri("mongodb://localhost/test.name/with \"delimiters")) + res["database"] = "test" + res["collection"] = 'name/with "delimiters' + self.assertEqual(res, parse_uri('mongodb://localhost/test.name/with "delimiters')) res = copy.deepcopy(orig) - res['options'] = { - 'readpreference': ReadPreference.SECONDARY.mongos_mode - } - self.assertEqual(res, parse_uri( - "mongodb://localhost/?readPreference=secondary")) + res["options"] = {"readpreference": ReadPreference.SECONDARY.mongos_mode} + self.assertEqual(res, parse_uri("mongodb://localhost/?readPreference=secondary")) # Various authentication tests res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'MONGODB-CR'} - res['username'] = 'user' - res['password'] = 'password' - self.assertEqual(res, - parse_uri("mongodb://user:password@localhost/" - "?authMechanism=MONGODB-CR")) + res["options"] = {"authmechanism": "MONGODB-CR"} + res["username"] = "user" + res["password"] = "password" + self.assertEqual( + res, parse_uri("mongodb://user:password@localhost/" "?authMechanism=MONGODB-CR") + ) res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'MONGODB-CR', 'authsource': 'bar'} - res['username'] = 'user' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user:password@localhost/foo" - "?authSource=bar;authMechanism=MONGODB-CR")) + res["options"] = {"authmechanism": "MONGODB-CR", "authsource": "bar"} + res["username"] = "user" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri( + "mongodb://user:password@localhost/foo" "?authSource=bar;authMechanism=MONGODB-CR" + ), + ) res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'MONGODB-CR'} - res['username'] = 'user' - res['password'] = '' - self.assertEqual(res, - parse_uri("mongodb://user:@localhost/" - "?authMechanism=MONGODB-CR")) + res["options"] = {"authmechanism": "MONGODB-CR"} + res["username"] = "user" + res["password"] = "" + self.assertEqual(res, parse_uri("mongodb://user:@localhost/" "?authMechanism=MONGODB-CR")) res = copy.deepcopy(orig) - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo")) + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual(res, parse_uri("mongodb://user%40domain.com:password" "@localhost/foo")) res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'GSSAPI'} - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo?authMechanism=GSSAPI")) + res["options"] = {"authmechanism": "GSSAPI"} + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri("mongodb://user%40domain.com:password" "@localhost/foo?authMechanism=GSSAPI"), + ) res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'GSSAPI'} - res['username'] = 'user@domain.com' - res['password'] = '' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com" - "@localhost/foo?authMechanism=GSSAPI")) + res["options"] = {"authmechanism": "GSSAPI"} + res["username"] = "user@domain.com" + res["password"] = "" + res["database"] = "foo" + self.assertEqual( + res, parse_uri("mongodb://user%40domain.com" "@localhost/foo?authMechanism=GSSAPI") + ) res = copy.deepcopy(orig) - res['options'] = { - 'readpreference': ReadPreference.SECONDARY.mongos_mode, - 'readpreferencetags': [ - {'dc': 'west', 'use': 'website'}, - {'dc': 'east', 'use': 'website'} - ] + res["options"] = { + "readpreference": ReadPreference.SECONDARY.mongos_mode, + "readpreferencetags": [ + {"dc": "west", "use": "website"}, + {"dc": "east", "use": "website"}, + ], } - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo?readpreference=secondary&" - "readpreferencetags=dc:west,use:website&" - "readpreferencetags=dc:east,use:website")) + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri( + "mongodb://user%40domain.com:password" + "@localhost/foo?readpreference=secondary&" + "readpreferencetags=dc:west,use:website&" + "readpreferencetags=dc:east,use:website" + ), + ) res = copy.deepcopy(orig) - res['options'] = { - 'readpreference': ReadPreference.SECONDARY.mongos_mode, - 'readpreferencetags': [ - {'dc': 'west', 'use': 'website'}, - {'dc': 'east', 'use': 'website'}, - {} - ] + res["options"] = { + "readpreference": ReadPreference.SECONDARY.mongos_mode, + "readpreferencetags": [ + {"dc": "west", "use": "website"}, + {"dc": "east", "use": "website"}, + {}, + ], } - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo?readpreference=secondary&" - "readpreferencetags=dc:west,use:website&" - "readpreferencetags=dc:east,use:website&" - "readpreferencetags=")) + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri( + "mongodb://user%40domain.com:password" + "@localhost/foo?readpreference=secondary&" + "readpreferencetags=dc:west,use:website&" + "readpreferencetags=dc:east,use:website&" + "readpreferencetags=" + ), + ) res = copy.deepcopy(orig) - res['options'] = {'uuidrepresentation': JAVA_LEGACY} - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo?uuidrepresentation=" - "javaLegacy")) + res["options"] = {"uuidrepresentation": JAVA_LEGACY} + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri( + "mongodb://user%40domain.com:password" + "@localhost/foo?uuidrepresentation=" + "javaLegacy" + ), + ) with warnings.catch_warnings(): - warnings.filterwarnings('error') - self.assertRaises(Warning, parse_uri, - "mongodb://user%40domain.com:password" - "@localhost/foo?uuidrepresentation=notAnOption", - warn=True) - self.assertRaises(ValueError, parse_uri, - "mongodb://user%40domain.com:password" - "@localhost/foo?uuidrepresentation=notAnOption") + warnings.filterwarnings("error") + self.assertRaises( + Warning, + parse_uri, + "mongodb://user%40domain.com:password" + "@localhost/foo?uuidrepresentation=notAnOption", + warn=True, + ) + self.assertRaises( + ValueError, + parse_uri, + "mongodb://user%40domain.com:password" "@localhost/foo?uuidrepresentation=notAnOption", + ) def test_parse_ssl_paths(self): # Turn off "validate" since these paths don't exist on filesystem. self.assertEqual( - {'collection': None, - 'database': None, - 'nodelist': [('/MongoDB.sock', None)], - 'options': {'tlsCertificateKeyFile': '/a/b'}, - 'password': 'foo/bar', - 'username': 'jesse', - 'fqdn': None}, + { + "collection": None, + "database": None, + "nodelist": [("/MongoDB.sock", None)], + "options": {"tlsCertificateKeyFile": "/a/b"}, + "password": "foo/bar", + "username": "jesse", + "fqdn": None, + }, parse_uri( - 'mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?tlsCertificateKeyFile=/a/b', - validate=False)) + "mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?tlsCertificateKeyFile=/a/b", + validate=False, + ), + ) self.assertEqual( - {'collection': None, - 'database': None, - 'nodelist': [('/MongoDB.sock', None)], - 'options': {'tlsCertificateKeyFile': 'a/b'}, - 'password': 'foo/bar', - 'username': 'jesse', - 'fqdn': None}, + { + "collection": None, + "database": None, + "nodelist": [("/MongoDB.sock", None)], + "options": {"tlsCertificateKeyFile": "a/b"}, + "password": "foo/bar", + "username": "jesse", + "fqdn": None, + }, parse_uri( - 'mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?tlsCertificateKeyFile=a/b', - validate=False)) + "mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?tlsCertificateKeyFile=a/b", + validate=False, + ), + ) def test_tlsinsecure_simple(self): # check that tlsInsecure is expanded correctly. @@ -467,59 +454,68 @@ def test_tlsinsecure_simple(self): res = { "tlsAllowInvalidHostnames": True, "tlsAllowInvalidCertificates": True, - "tlsInsecure": True, 'tlsDisableOCSPEndpointCheck': True} + "tlsInsecure": True, + "tlsDisableOCSPEndpointCheck": True, + } self.assertEqual(res, parse_uri(uri)["options"]) def test_normalize_options(self): # check that options are converted to their internal names correctly. - uri = ("mongodb://example.com/?ssl=true&appname=myapp") + uri = "mongodb://example.com/?ssl=true&appname=myapp" res = {"tls": True, "appname": "myapp"} self.assertEqual(res, parse_uri(uri)["options"]) def test_unquote_after_parsing(self): quoted_val = "val%21%40%23%24%25%5E%26%2A%28%29_%2B%2C%3A+etc" unquoted_val = "val!@#$%^&*()_+,: etc" - uri = ("mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" - "&authMechanismProperties=AWS_SESSION_TOKEN:"+quoted_val) + uri = ( + "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" + "&authMechanismProperties=AWS_SESSION_TOKEN:" + quoted_val + ) res = parse_uri(uri) options = { - 'authmechanism': 'MONGODB-AWS', - 'authmechanismproperties': { - 'AWS_SESSION_TOKEN': unquoted_val}} - self.assertEqual(options, res['options']) - - uri = (("mongodb://localhost/foo?readpreference=secondary&" - "readpreferencetags=dc:west,"+quoted_val+":"+quoted_val+"&" - "readpreferencetags=dc:east,use:"+quoted_val)) + "authmechanism": "MONGODB-AWS", + "authmechanismproperties": {"AWS_SESSION_TOKEN": unquoted_val}, + } + self.assertEqual(options, res["options"]) + + uri = ( + "mongodb://localhost/foo?readpreference=secondary&" + "readpreferencetags=dc:west," + quoted_val + ":" + quoted_val + "&" + "readpreferencetags=dc:east,use:" + quoted_val + ) res = parse_uri(uri) options = { - 'readpreference': ReadPreference.SECONDARY.mongos_mode, - 'readpreferencetags': [ - {'dc': 'west', unquoted_val: unquoted_val}, - {'dc': 'east', 'use': unquoted_val} - ] + "readpreference": ReadPreference.SECONDARY.mongos_mode, + "readpreferencetags": [ + {"dc": "west", unquoted_val: unquoted_val}, + {"dc": "east", "use": unquoted_val}, + ], } - self.assertEqual(options, res['options']) + self.assertEqual(options, res["options"]) def test_redact_AWS_SESSION_TOKEN(self): unquoted_colon = "token:" - uri = ("mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" - "&authMechanismProperties=AWS_SESSION_TOKEN:"+unquoted_colon) + uri = ( + "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" + "&authMechanismProperties=AWS_SESSION_TOKEN:" + unquoted_colon + ) with self.assertRaisesRegex( - ValueError, - 'auth mechanism properties must be key:value pairs like ' - 'SERVICE_NAME:mongodb, not AWS_SESSION_TOKEN:' - ', did you forget to percent-escape the token with ' - 'quote_plus?'): + ValueError, + "auth mechanism properties must be key:value pairs like " + "SERVICE_NAME:mongodb, not AWS_SESSION_TOKEN:" + ", did you forget to percent-escape the token with " + "quote_plus?", + ): parse_uri(uri) def test_special_chars(self): user = "user@ /9+:?~!$&'()*+,;=" pwd = "pwd@ /9+:?~!$&'()*+,;=" - uri = 'mongodb://%s:%s@localhost' % (quote_plus(user), quote_plus(pwd)) + uri = "mongodb://%s:%s@localhost" % (quote_plus(user), quote_plus(pwd)) res = parse_uri(uri) - self.assertEqual(user, res['username']) - self.assertEqual(pwd, res['password']) + self.assertEqual(user, res["username"]) + self.assertEqual(pwd, res["password"]) if __name__ == "__main__": diff --git a/test/test_uri_spec.py b/test/test_uri_spec.py index 59457b57ac..d12abf3b91 100644 --- a/test/test_uri_spec.py +++ b/test/test_uri_spec.py @@ -22,19 +22,18 @@ sys.path[0:0] = [""] +from test import clear_warning_registry, unittest + from pymongo.common import INTERNAL_URI_OPTION_NAME_MAP, validate from pymongo.compression_support import _HAVE_SNAPPY from pymongo.srv_resolver import _HAVE_DNSPYTHON -from pymongo.uri_parser import parse_uri, SRV_SCHEME -from test import clear_warning_registry, unittest - +from pymongo.uri_parser import SRV_SCHEME, parse_uri CONN_STRING_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - os.path.join('connection_string', 'test')) + os.path.dirname(os.path.realpath(__file__)), os.path.join("connection_string", "test") +) -URI_OPTIONS_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'uri_options') +URI_OPTIONS_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "uri_options") TEST_DESC_SKIP_LIST = [ "Valid options specific to single-threaded drivers are parsed correctly", @@ -64,7 +63,8 @@ "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and true) raises an error", "tlsDisableOCSPEndpointCheck=true and tlsDisableCertificateRevocationCheck=false raises an error", "tlsDisableOCSPEndpointCheck=false and tlsDisableCertificateRevocationCheck=true raises an error", - "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and false) raises an error"] + "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and false) raises an error", +] class TestAllScenarios(unittest.TestCase): @@ -73,8 +73,7 @@ def setUp(self): def get_error_message_template(expected, artefact): - return "%s %s for test '%s'" % ( - "Expected" if expected else "Unexpected", artefact, "%s") + return "%s %s for test '%s'" % ("Expected" if expected else "Unexpected", artefact, "%s") def run_scenario_in_dir(target_workdir): @@ -84,91 +83,107 @@ def modified_test_scenario(*args, **kwargs): os.chdir(target_workdir) func(*args, **kwargs) os.chdir(original_workdir) + return modified_test_scenario + return workdir_context_decorator def create_test(test, test_workdir): def run_scenario(self): - compressors = (test.get('options') or {}).get('compressors', []) - if 'snappy' in compressors and not _HAVE_SNAPPY: - self.skipTest('This test needs the snappy module.') - if test['uri'].startswith(SRV_SCHEME) and not _HAVE_DNSPYTHON: + compressors = (test.get("options") or {}).get("compressors", []) + if "snappy" in compressors and not _HAVE_SNAPPY: + self.skipTest("This test needs the snappy module.") + if test["uri"].startswith(SRV_SCHEME) and not _HAVE_DNSPYTHON: self.skipTest("This test needs dnspython package.") valid = True warning = False with warnings.catch_warnings(record=True) as ctx: - warnings.simplefilter('always') + warnings.simplefilter("always") try: - options = parse_uri(test['uri'], warn=True) + options = parse_uri(test["uri"], warn=True) except Exception: valid = False else: warning = len(ctx) > 0 - expected_valid = test.get('valid', True) + expected_valid = test.get("valid", True) self.assertEqual( - valid, expected_valid, get_error_message_template( - not expected_valid, "error") % test['description']) + valid, + expected_valid, + get_error_message_template(not expected_valid, "error") % test["description"], + ) if expected_valid: - expected_warning = test.get('warning', False) + expected_warning = test.get("warning", False) self.assertEqual( - warning, expected_warning, get_error_message_template( - expected_warning, "warning") % test['description']) + warning, + expected_warning, + get_error_message_template(expected_warning, "warning") % test["description"], + ) # Compare hosts and port. - if test['hosts'] is not None: + if test["hosts"] is not None: self.assertEqual( - len(test['hosts']), len(options['nodelist']), - "Incorrect number of hosts parsed from URI") - - for exp, actual in zip(test['hosts'], - options['nodelist']): - self.assertEqual(exp['host'], actual[0], - "Expected host %s but got %s" - % (exp['host'], actual[0])) - if exp['port'] is not None: - self.assertEqual(exp['port'], actual[1], - "Expected port %s but got %s" - % (exp['port'], actual)) + len(test["hosts"]), + len(options["nodelist"]), + "Incorrect number of hosts parsed from URI", + ) + + for exp, actual in zip(test["hosts"], options["nodelist"]): + self.assertEqual( + exp["host"], actual[0], "Expected host %s but got %s" % (exp["host"], actual[0]) + ) + if exp["port"] is not None: + self.assertEqual( + exp["port"], + actual[1], + "Expected port %s but got %s" % (exp["port"], actual), + ) # Compare auth options. - auth = test['auth'] + auth = test["auth"] if auth is not None: - auth['database'] = auth.pop('db') # db == database + auth["database"] = auth.pop("db") # db == database # Special case for PyMongo's collection parsing. - if options.get('collection') is not None: - options['database'] += "." + options['collection'] + if options.get("collection") is not None: + options["database"] += "." + options["collection"] for elm in auth: if auth[elm] is not None: # We have to do this because while the spec requires # "+"->"+", unquote_plus does "+"->" " options[elm] = options[elm].replace(" ", "+") - self.assertEqual(auth[elm], options[elm], - "Expected %s but got %s" - % (auth[elm], options[elm])) + self.assertEqual( + auth[elm], + options[elm], + "Expected %s but got %s" % (auth[elm], options[elm]), + ) # Compare URI options. err_msg = "For option %s expected %s but got %s" - if test['options']: - opts = options['options'] - for opt in test['options']: + if test["options"]: + opts = options["options"] + for opt in test["options"]: lopt = opt.lower() optname = INTERNAL_URI_OPTION_NAME_MAP.get(lopt, lopt) if opts.get(optname) is not None: - if opts[optname] == test['options'][opt]: - expected_value = test['options'][opt] + if opts[optname] == test["options"][opt]: + expected_value = test["options"][opt] else: - expected_value = validate( - lopt, test['options'][opt])[1] + expected_value = validate(lopt, test["options"][opt])[1] self.assertEqual( - opts[optname], expected_value, - err_msg % (opt, expected_value, opts[optname],)) + opts[optname], + expected_value, + err_msg + % ( + opt, + expected_value, + opts[optname], + ), + ) else: - self.fail( - "Missing expected option %s" % (opt,)) + self.fail("Missing expected option %s" % (opt,)) return run_scenario_in_dir(test_workdir)(run_scenario) @@ -176,27 +191,29 @@ def run_scenario(self): def create_tests(test_path): for dirpath, _, filenames in os.walk(test_path): dirname = os.path.split(dirpath) - dirname = os.path.split(dirname[-2])[-1] + '_' + dirname[-1] + dirname = os.path.split(dirname[-2])[-1] + "_" + dirname[-1] for filename in filenames: - if not filename.endswith('.json'): + if not filename.endswith(".json"): # skip everything that is not a test specification continue json_path = os.path.join(dirpath, filename) with open(json_path, encoding="utf-8") as scenario_stream: scenario_def = json.load(scenario_stream) - for testcase in scenario_def['tests']: - dsc = testcase['description'] + for testcase in scenario_def["tests"]: + dsc = testcase["description"] if dsc in TEST_DESC_SKIP_LIST: print("Skipping test '%s'" % dsc) continue testmethod = create_test(testcase, dirpath) - testname = 'test_%s_%s_%s' % ( - dirname, os.path.splitext(filename)[0], - str(dsc).replace(' ', '_')) + testname = "test_%s_%s_%s" % ( + dirname, + os.path.splitext(filename)[0], + str(dsc).replace(" ", "_"), + ) testmethod.__name__ = testname setattr(TestAllScenarios, testmethod.__name__, testmethod) diff --git a/test/test_versioned_api.py b/test/test_versioned_api.py index 44fc89ac73..a2fd059d21 100644 --- a/test/test_versioned_api.py +++ b/test/test_versioned_api.py @@ -17,16 +17,14 @@ sys.path[0:0] = [""] -from pymongo.mongo_client import MongoClient -from pymongo.server_api import ServerApi, ServerApiVersion - -from test import client_context, IntegrationTest, unittest +from test import IntegrationTest, client_context, unittest from test.unified_format import generate_test_classes from test.utils import OvertCommandListener, rs_or_single_client +from pymongo.mongo_client import MongoClient +from pymongo.server_api import ServerApi, ServerApiVersion -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'versioned-api') +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "versioned-api") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) @@ -38,38 +36,38 @@ class TestServerApi(IntegrationTest): def test_server_api_defaults(self): api = ServerApi(ServerApiVersion.V1) - self.assertEqual(api.version, '1') + self.assertEqual(api.version, "1") self.assertIsNone(api.strict) self.assertIsNone(api.deprecation_errors) def test_server_api_explicit_false(self): - api = ServerApi('1', strict=False, deprecation_errors=False) - self.assertEqual(api.version, '1') + api = ServerApi("1", strict=False, deprecation_errors=False) + self.assertEqual(api.version, "1") self.assertFalse(api.strict) self.assertFalse(api.deprecation_errors) def test_server_api_strict(self): - api = ServerApi('1', strict=True, deprecation_errors=True) - self.assertEqual(api.version, '1') + api = ServerApi("1", strict=True, deprecation_errors=True) + self.assertEqual(api.version, "1") self.assertTrue(api.strict) self.assertTrue(api.deprecation_errors) def test_server_api_validation(self): with self.assertRaises(ValueError): - ServerApi('2') + ServerApi("2") with self.assertRaises(TypeError): - ServerApi('1', strict='not-a-bool') + ServerApi("1", strict="not-a-bool") with self.assertRaises(TypeError): - ServerApi('1', deprecation_errors='not-a-bool') + ServerApi("1", deprecation_errors="not-a-bool") with self.assertRaises(TypeError): - MongoClient(server_api='not-a-ServerApi') + MongoClient(server_api="not-a-ServerApi") def assertServerApi(self, event): - self.assertIn('apiVersion', event.command) - self.assertEqual(event.command['apiVersion'], '1') + self.assertIn("apiVersion", event.command) + self.assertEqual(event.command["apiVersion"], "1") def assertNoServerApi(self, event): - self.assertNotIn('apiVersion', event.command) + self.assertNotIn("apiVersion", event.command) def assertServerApiInAllCommands(self, events): for event in events: @@ -78,22 +76,20 @@ def assertServerApiInAllCommands(self, events): @client_context.require_version_min(4, 7) def test_command_options(self): listener = OvertCommandListener() - client = rs_or_single_client(server_api=ServerApi('1'), - event_listeners=[listener]) + client = rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) self.addCleanup(client.close) coll = client.test.test coll.insert_many([{} for _ in range(100)]) self.addCleanup(coll.delete_many, {}) list(coll.find(batch_size=25)) - client.admin.command('ping') - self.assertServerApiInAllCommands(listener.results['started']) + client.admin.command("ping") + self.assertServerApiInAllCommands(listener.results["started"]) @client_context.require_version_min(4, 7) @client_context.require_transactions def test_command_options_txn(self): listener = OvertCommandListener() - client = rs_or_single_client(server_api=ServerApi('1'), - event_listeners=[listener]) + client = rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) self.addCleanup(client.close) coll = client.test.test coll.insert_many([{} for _ in range(100)]) @@ -103,8 +99,8 @@ def test_command_options_txn(self): with client.start_session() as s, s.start_transaction(): coll.insert_many([{} for _ in range(100)], session=s) list(coll.find(batch_size=25, session=s)) - client.test.command('find', 'test', session=s) - self.assertServerApiInAllCommands(listener.results['started']) + client.test.command("find", "test", session=s) + self.assertServerApiInAllCommands(listener.results["started"]) if __name__ == "__main__": diff --git a/test/test_write_concern.py b/test/test_write_concern.py index f0ea690fb3..02c562a348 100644 --- a/test/test_write_concern.py +++ b/test/test_write_concern.py @@ -22,7 +22,6 @@ class TestWriteConcern(unittest.TestCase): - def test_invalid(self): # Can't use fsync and j options together self.assertRaises(ConfigurationError, WriteConcern, j=True, fsync=True) @@ -41,9 +40,7 @@ def test_equality_to_none(self): self.assertTrue(concern != None) # noqa def test_equality_compatible_type(self): - class _FakeWriteConcern(object): - def __init__(self, **document): self.document = document @@ -66,9 +63,9 @@ def __ne__(self, other): self.assertNotEqual(WriteConcern(wtimeout=42), _FakeWriteConcern(wtimeout=2000)) def test_equality_incompatible_type(self): - _fake_type = collections.namedtuple('NotAWriteConcern', ['document']) # type: ignore - self.assertNotEqual(WriteConcern(j=True), _fake_type({'j': True})) + _fake_type = collections.namedtuple("NotAWriteConcern", ["document"]) # type: ignore + self.assertNotEqual(WriteConcern(j=True), _fake_type({"j": True})) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/unicode/test_utf8.py b/test/unicode/test_utf8.py index 65738d5c04..7ce2936b7a 100644 --- a/test/unicode/test_utf8.py +++ b/test/unicode/test_utf8.py @@ -2,9 +2,11 @@ sys.path[0:0] = [""] +from test import unittest + from bson import encode from bson.errors import InvalidStringData -from test import unittest + class TestUTF8(unittest.TestCase): @@ -12,18 +14,19 @@ class TestUTF8(unittest.TestCase): # legal utf-8 if the first byte is 0xf4 (244) def _assert_same_utf8_validation(self, data): try: - data.decode('utf-8') - py_is_legal = True + data.decode("utf-8") + py_is_legal = True except UnicodeDecodeError: py_is_legal = False try: - encode({'x': data}) - bson_is_legal = True + encode({"x": data}) + bson_is_legal = True except InvalidStringData: bson_is_legal = False self.assertEqual(py_is_legal, bson_is_legal, data) + if __name__ == "__main__": unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index 9c38c47863..ba1d063694 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -25,49 +25,65 @@ import sys import time import types - from collections import abc +from test import IntegrationTest, client_context, unittest +from test.utils import ( + CMAPListener, + camel_to_snake, + camel_to_snake_args, + get_pool, + parse_collection_options, + parse_spec_options, + prepare_spec_arguments, + rs_or_single_client, + single_client, + snake_to_camel, +) +from test.version import Version from typing import Any -from bson import json_util, Code, Decimal128, DBRef, SON, Int64, MaxKey, MinKey +from bson import SON, Code, DBRef, Decimal128, Int64, MaxKey, MinKey, json_util from bson.binary import Binary from bson.objectid import ObjectId -from bson.regex import Regex, RE_TYPE - +from bson.regex import RE_TYPE, Regex from gridfs import GridFSBucket - from pymongo import ASCENDING, MongoClient -from pymongo.client_session import ClientSession, TransactionOptions, _TxnState from pymongo.change_stream import ChangeStream +from pymongo.client_session import ClientSession, TransactionOptions, _TxnState from pymongo.collection import Collection from pymongo.database import Database from pymongo.errors import ( - BulkWriteError, ConnectionFailure, ConfigurationError, InvalidOperation, - NotPrimaryError, PyMongoError) + BulkWriteError, + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NotPrimaryError, + PyMongoError, +) from pymongo.monitoring import ( - CommandFailedEvent, CommandListener, CommandStartedEvent, - CommandSucceededEvent, _SENSITIVE_COMMANDS, PoolCreatedEvent, - PoolReadyEvent, PoolClearedEvent, PoolClosedEvent, ConnectionCreatedEvent, - ConnectionReadyEvent, ConnectionClosedEvent, - ConnectionCheckOutStartedEvent, ConnectionCheckOutFailedEvent, - ConnectionCheckedOutEvent, ConnectionCheckedInEvent) + _SENSITIVE_COMMANDS, + CommandFailedEvent, + CommandListener, + CommandStartedEvent, + CommandSucceededEvent, + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionCreatedEvent, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, +) from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.results import BulkWriteResult from pymongo.server_api import ServerApi from pymongo.write_concern import WriteConcern -from test import client_context, unittest, IntegrationTest -from test.utils import ( - camel_to_snake, get_pool, rs_or_single_client, single_client, - snake_to_camel, CMAPListener) - -from test.version import Version -from test.utils import ( - camel_to_snake_args, parse_collection_options, parse_spec_options, - prepare_spec_arguments) - - JSON_OPTS = json_util.JSONOptions(tz_aware=False) IS_INTERRUPTED = False @@ -87,14 +103,13 @@ def with_metaclass(meta, *bases): # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(type): - def __new__(cls, name, this_bases, d): if sys.version_info[:2] >= (3, 7): # This version introduced PEP 560 that requires a bit # of extra care (we mimic what is done by __build_class__). resolved_bases = types.resolve_bases(bases) if resolved_bases is not bases: - d['__orig_bases__'] = bases + d["__orig_bases__"] = bases else: resolved_bases = bases return meta(name, resolved_bases, d) @@ -102,40 +117,38 @@ def __new__(cls, name, this_bases, d): @classmethod def __prepare__(cls, name, this_bases): return meta.__prepare__(name, bases) - return type.__new__(metaclass, 'temporary_class', (), {}) + + return type.__new__(metaclass, "temporary_class", (), {}) def is_run_on_requirement_satisfied(requirement): topology_satisfied = True - req_topologies = requirement.get('topologies') + req_topologies = requirement.get("topologies") if req_topologies: - topology_satisfied = client_context.is_topology_type( - req_topologies) + topology_satisfied = client_context.is_topology_type(req_topologies) server_version = Version(*client_context.version[:3]) min_version_satisfied = True - req_min_server_version = requirement.get('minServerVersion') + req_min_server_version = requirement.get("minServerVersion") if req_min_server_version: - min_version_satisfied = Version.from_string( - req_min_server_version) <= server_version + min_version_satisfied = Version.from_string(req_min_server_version) <= server_version max_version_satisfied = True - req_max_server_version = requirement.get('maxServerVersion') + req_max_server_version = requirement.get("maxServerVersion") if req_max_server_version: - max_version_satisfied = Version.from_string( - req_max_server_version) >= server_version + max_version_satisfied = Version.from_string(req_max_server_version) >= server_version - serverless = requirement.get('serverless') + serverless = requirement.get("serverless") if serverless == "require": serverless_satisfied = client_context.serverless elif serverless == "forbid": serverless_satisfied = not client_context.serverless - else: # unset or "allow" + else: # unset or "allow" serverless_satisfied = True params_satisfied = True - params = requirement.get('serverParameters') + params = requirement.get("serverParameters") if params: for param, val in params.items(): if param not in client_context.server_parameters: @@ -144,16 +157,21 @@ def is_run_on_requirement_satisfied(requirement): params_satisfied = False auth_satisfied = True - req_auth = requirement.get('auth') + req_auth = requirement.get("auth") if req_auth is not None: if req_auth: auth_satisfied = client_context.auth_enabled else: auth_satisfied = not client_context.auth_enabled - return (topology_satisfied and min_version_satisfied and - max_version_satisfied and serverless_satisfied and - params_satisfied and auth_satisfied) + return ( + topology_satisfied + and min_version_satisfied + and max_version_satisfied + and serverless_satisfied + and params_satisfied + and auth_satisfied + ) def parse_collection_or_database_options(options): @@ -161,15 +179,15 @@ def parse_collection_or_database_options(options): def parse_bulk_write_result(result): - upserted_ids = {str(int_idx): result.upserted_ids[int_idx] - for int_idx in result.upserted_ids} + upserted_ids = {str(int_idx): result.upserted_ids[int_idx] for int_idx in result.upserted_ids} return { - 'deletedCount': result.deleted_count, - 'insertedCount': result.inserted_count, - 'matchedCount': result.matched_count, - 'modifiedCount': result.modified_count, - 'upsertedCount': result.upserted_count, - 'upsertedIds': upserted_ids} + "deletedCount": result.deleted_count, + "insertedCount": result.inserted_count, + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedCount": result.upserted_count, + "upsertedIds": upserted_ids, + } def parse_bulk_write_error_result(error): @@ -179,6 +197,7 @@ def parse_bulk_write_error_result(error): class NonLazyCursor(object): """A find cursor proxy that creates the remote cursor when initialized.""" + def __init__(self, find_cursor): self.find_cursor = find_cursor # Create the server side cursor. @@ -196,8 +215,9 @@ def close(self): class EventListenerUtil(CMAPListener, CommandListener): - def __init__(self, observe_events, ignore_commands, - observe_sensitive_commands, store_events, entity_map): + def __init__( + self, observe_events, ignore_commands, observe_sensitive_commands, store_events, entity_map + ): self._event_types = set(name.lower() for name in observe_events) if observe_sensitive_commands: self._observe_sensitive_commands = True @@ -205,7 +225,7 @@ def __init__(self, observe_events, ignore_commands, else: self._observe_sensitive_commands = False self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) - self._ignore_commands.add('configurefailpoint') + self._ignore_commands.add("configurefailpoint") self._event_mapping = collections.defaultdict(list) self.entity_map = entity_map if store_events: @@ -218,20 +238,22 @@ def __init__(self, observe_events, ignore_commands, super(EventListenerUtil, self).__init__() def get_events(self, event_type): - if event_type == 'command': - return [e for e in self.events if 'Command' in type(e).__name__] - return [e for e in self.events if 'Command' not in type(e).__name__] + if event_type == "command": + return [e for e in self.events if "Command" in type(e).__name__] + return [e for e in self.events if "Command" not in type(e).__name__] def add_event(self, event): event_name = type(event).__name__.lower() if event_name in self._event_types: super(EventListenerUtil, self).add_event(event) for id in self._event_mapping[event_name]: - self.entity_map[id].append({ - "name": type(event).__name__, - "observedAt": time.time(), - "description": repr(event) - }) + self.entity_map[id].append( + { + "name": type(event).__name__, + "observedAt": time.time(), + "description": repr(event), + } + ) def _command_event(self, event): if event.command_name.lower() not in self._ignore_commands: @@ -260,6 +282,7 @@ def failed(self, event): class EntityMapUtil(object): """Utility class that implements an entity map as per the unified test format specification.""" + def __init__(self, test_class): self._entities = {} self._listeners = {} @@ -276,102 +299,100 @@ def __getitem__(self, item): try: return self._entities[item] except KeyError: - self.test.fail('Could not find entity named %s in map' % ( - item,)) + self.test.fail("Could not find entity named %s in map" % (item,)) def __setitem__(self, key, value): if not isinstance(key, str): - self.test.fail( - 'Expected entity name of type str, got %s' % (type(key))) + self.test.fail("Expected entity name of type str, got %s" % (type(key))) if key in self._entities: - self.test.fail('Entity named %s already in map' % (key,)) + self.test.fail("Entity named %s already in map" % (key,)) self._entities[key] = value def _create_entity(self, entity_spec, uri=None): if len(entity_spec) != 1: self.test.fail( - "Entity spec %s did not contain exactly one top-level key" % ( - entity_spec,)) + "Entity spec %s did not contain exactly one top-level key" % (entity_spec,) + ) entity_type, spec = next(iter(entity_spec.items())) - if entity_type == 'client': + if entity_type == "client": kwargs: dict = {} - observe_events = spec.get('observeEvents', []) - ignore_commands = spec.get('ignoreCommandMonitoringEvents', []) - observe_sensitive_commands = spec.get( - 'observeSensitiveCommands', False) + observe_events = spec.get("observeEvents", []) + ignore_commands = spec.get("ignoreCommandMonitoringEvents", []) + observe_sensitive_commands = spec.get("observeSensitiveCommands", False) ignore_commands = [cmd.lower() for cmd in ignore_commands] listener = EventListenerUtil( - observe_events, ignore_commands, + observe_events, + ignore_commands, observe_sensitive_commands, - spec.get("storeEventsAsEntities"), self) - self._listeners[spec['id']] = listener - kwargs['event_listeners'] = [listener] - if spec.get('useMultipleMongoses'): + spec.get("storeEventsAsEntities"), + self, + ) + self._listeners[spec["id"]] = listener + kwargs["event_listeners"] = [listener] + if spec.get("useMultipleMongoses"): if client_context.load_balancer or client_context.serverless: - kwargs['h'] = client_context.MULTI_MONGOS_LB_URI + kwargs["h"] = client_context.MULTI_MONGOS_LB_URI elif client_context.is_mongos: - kwargs['h'] = client_context.mongos_seeds() - kwargs.update(spec.get('uriOptions', {})) - server_api = spec.get('serverApi') + kwargs["h"] = client_context.mongos_seeds() + kwargs.update(spec.get("uriOptions", {})) + server_api = spec.get("serverApi") if server_api: - kwargs['server_api'] = ServerApi( - server_api['version'], strict=server_api.get('strict'), - deprecation_errors=server_api.get('deprecationErrors')) + kwargs["server_api"] = ServerApi( + server_api["version"], + strict=server_api.get("strict"), + deprecation_errors=server_api.get("deprecationErrors"), + ) if uri: - kwargs['h'] = uri + kwargs["h"] = uri client = rs_or_single_client(**kwargs) - self[spec['id']] = client + self[spec["id"]] = client self.test.addCleanup(client.close) return - elif entity_type == 'database': - client = self[spec['client']] + elif entity_type == "database": + client = self[spec["client"]] if not isinstance(client, MongoClient): self.test.fail( - 'Expected entity %s to be of type MongoClient, got %s' % ( - spec['client'], type(client))) - options = parse_collection_or_database_options( - spec.get('databaseOptions', {})) - self[spec['id']] = client.get_database( - spec['databaseName'], **options) + "Expected entity %s to be of type MongoClient, got %s" + % (spec["client"], type(client)) + ) + options = parse_collection_or_database_options(spec.get("databaseOptions", {})) + self[spec["id"]] = client.get_database(spec["databaseName"], **options) return - elif entity_type == 'collection': - database = self[spec['database']] + elif entity_type == "collection": + database = self[spec["database"]] if not isinstance(database, Database): self.test.fail( - 'Expected entity %s to be of type Database, got %s' % ( - spec['database'], type(database))) - options = parse_collection_or_database_options( - spec.get('collectionOptions', {})) - self[spec['id']] = database.get_collection( - spec['collectionName'], **options) + "Expected entity %s to be of type Database, got %s" + % (spec["database"], type(database)) + ) + options = parse_collection_or_database_options(spec.get("collectionOptions", {})) + self[spec["id"]] = database.get_collection(spec["collectionName"], **options) return - elif entity_type == 'session': - client = self[spec['client']] + elif entity_type == "session": + client = self[spec["client"]] if not isinstance(client, MongoClient): self.test.fail( - 'Expected entity %s to be of type MongoClient, got %s' % ( - spec['client'], type(client))) - opts = camel_to_snake_args(spec.get('sessionOptions', {})) - if 'default_transaction_options' in opts: - txn_opts = parse_spec_options( - opts['default_transaction_options']) + "Expected entity %s to be of type MongoClient, got %s" + % (spec["client"], type(client)) + ) + opts = camel_to_snake_args(spec.get("sessionOptions", {})) + if "default_transaction_options" in opts: + txn_opts = parse_spec_options(opts["default_transaction_options"]) txn_opts = TransactionOptions(**txn_opts) opts = copy.deepcopy(opts) - opts['default_transaction_options'] = txn_opts + opts["default_transaction_options"] = txn_opts session = client.start_session(**dict(opts)) - self[spec['id']] = session - self._session_lsids[spec['id']] = copy.deepcopy(session.session_id) + self[spec["id"]] = session + self._session_lsids[spec["id"]] = copy.deepcopy(session.session_id) self.test.addCleanup(session.end_session) return - elif entity_type == 'bucket': + elif entity_type == "bucket": # TODO: implement the 'bucket' entity type - self.test.skipTest( - 'GridFS is not currently supported (PYTHON-2459)') - self.test.fail( - 'Unable to create entity of unknown type %s' % (entity_type,)) + self.test.skipTest("GridFS is not currently supported (PYTHON-2459)") + self.test.fail("Unable to create entity of unknown type %s" % (entity_type,)) def create_entities_from_spec(self, entity_spec, uri=None): for spec in entity_spec: @@ -381,13 +402,12 @@ def get_listener_for_client(self, client_name): client = self[client_name] if not isinstance(client, MongoClient): self.test.fail( - 'Expected entity %s to be of type MongoClient, got %s' % ( - client_name, type(client))) + "Expected entity %s to be of type MongoClient, got %s" % (client_name, type(client)) + ) listener = self._listeners.get(client_name) if not listener: - self.test.fail( - 'No listeners configured for client %s' % (client_name,)) + self.test.fail("No listeners configured for client %s" % (client_name,)) return listener @@ -395,8 +415,9 @@ def get_lsid_for_session(self, session_name): session = self[session_name] if not isinstance(session, ClientSession): self.test.fail( - 'Expected entity %s to be of type ClientSession, got %s' % ( - session_name, type(session))) + "Expected entity %s to be of type ClientSession, got %s" + % (session_name, type(session)) + ) try: return session.session_id @@ -413,32 +434,33 @@ def get_lsid_for_session(self, session_name): BSON_TYPE_ALIAS_MAP = { # https://docs.mongodb.com/manual/reference/operator/query/type/ # https://pymongo.readthedocs.io/en/stable/api/bson/index.html - 'double': (float,), - 'string': (str,), - 'object': (abc.Mapping,), - 'array': (abc.MutableSequence,), - 'binData': binary_types, - 'undefined': (type(None),), - 'objectId': (ObjectId,), - 'bool': (bool,), - 'date': (datetime.datetime,), - 'null': (type(None),), - 'regex': (Regex, RE_TYPE), - 'dbPointer': (DBRef,), - 'javascript': (unicode_type, Code), - 'symbol': (unicode_type,), - 'javascriptWithScope': (unicode_type, Code), - 'int': (int,), - 'long': (Int64,), - 'decimal': (Decimal128,), - 'maxKey': (MaxKey,), - 'minKey': (MinKey,), + "double": (float,), + "string": (str,), + "object": (abc.Mapping,), + "array": (abc.MutableSequence,), + "binData": binary_types, + "undefined": (type(None),), + "objectId": (ObjectId,), + "bool": (bool,), + "date": (datetime.datetime,), + "null": (type(None),), + "regex": (Regex, RE_TYPE), + "dbPointer": (DBRef,), + "javascript": (unicode_type, Code), + "symbol": (unicode_type,), + "javascriptWithScope": (unicode_type, Code), + "int": (int,), + "long": (Int64,), + "decimal": (Decimal128,), + "maxKey": (MaxKey,), + "minKey": (MinKey,), } class MatchEvaluatorUtil(object): """Utility class that implements methods for evaluating matches as per the unified test format specification.""" + def __init__(self, test_class): self.test = test_class @@ -448,19 +470,18 @@ def _operation_exists(self, spec, actual, key_to_compare): elif spec is False: self.test.assertNotIn(key_to_compare, actual) else: - self.test.fail( - 'Expected boolean value for $$exists operator, got %s' % ( - spec,)) + self.test.fail("Expected boolean value for $$exists operator, got %s" % (spec,)) def __type_alias_to_type(self, alias): if alias not in BSON_TYPE_ALIAS_MAP: - self.test.fail('Unrecognized BSON type alias %s' % (alias,)) + self.test.fail("Unrecognized BSON type alias %s" % (alias,)) return BSON_TYPE_ALIAS_MAP[alias] def _operation_type(self, spec, actual, key_to_compare): if isinstance(spec, abc.MutableSequence): - permissible_types = tuple([ - t for alias in spec for t in self.__type_alias_to_type(alias)]) + permissible_types = tuple( + [t for alias in spec for t in self.__type_alias_to_type(alias)] + ) else: permissible_types = self.__type_alias_to_type(spec) value = actual[key_to_compare] if key_to_compare else actual @@ -481,7 +502,7 @@ def _operation_unsetOrMatches(self, spec, actual, key_to_compare): if key_to_compare not in actual: # we add a dummy value for the compared key to pass map size check - actual[key_to_compare] = 'dummyValue' + actual[key_to_compare] = "dummyValue" return self.match_result(spec, actual[key_to_compare], in_recursive_call=True) @@ -489,19 +510,16 @@ def _operation_sessionLsid(self, spec, actual, key_to_compare): expected_lsid = self.test.entity_map.get_lsid_for_session(spec) self.test.assertEqual(expected_lsid, actual[key_to_compare]) - def _evaluate_special_operation(self, opname, spec, actual, - key_to_compare): - method_name = '_operation_%s' % (opname.strip('$'),) + def _evaluate_special_operation(self, opname, spec, actual, key_to_compare): + method_name = "_operation_%s" % (opname.strip("$"),) try: method = getattr(self, method_name) except AttributeError: - self.test.fail( - 'Unsupported special matching operator %s' % (opname,)) + self.test.fail("Unsupported special matching operator %s" % (opname,)) else: method(spec, actual, key_to_compare) - def _evaluate_if_special_operation(self, expectation, actual, - key_to_compare=None): + def _evaluate_if_special_operation(self, expectation, actual, key_to_compare=None): """Returns True if a special operation is evaluated, False otherwise. If the ``expectation`` map contains a single key, value pair we check it for a special operation. @@ -515,7 +533,7 @@ def _evaluate_if_special_operation(self, expectation, actual, is_special_op, opname, spec = False, False, False if key_to_compare is not None: - if key_to_compare.startswith('$$'): + if key_to_compare.startswith("$$"): is_special_op = True opname = key_to_compare spec = expectation[key_to_compare] @@ -524,20 +542,18 @@ def _evaluate_if_special_operation(self, expectation, actual, nested = expectation[key_to_compare] if isinstance(nested, abc.Mapping) and len(nested) == 1: opname, spec = next(iter(nested.items())) - if opname.startswith('$$'): + if opname.startswith("$$"): is_special_op = True elif len(expectation) == 1: opname, spec = next(iter(expectation.items())) - if opname.startswith('$$'): + if opname.startswith("$$"): is_special_op = True key_to_compare = None if is_special_op: self._evaluate_special_operation( - opname=opname, - spec=spec, - actual=actual, - key_to_compare=key_to_compare) + opname=opname, spec=spec, actual=actual, key_to_compare=key_to_compare + ) return True return False @@ -557,37 +573,33 @@ def _match_document(self, expectation, actual, is_root): if not is_root: expected_keys = set(expectation.keys()) for key, value in expectation.items(): - if value == {'$$exists': False}: + if value == {"$$exists": False}: expected_keys.remove(key) self.test.assertEqual(expected_keys, set(actual.keys())) - def match_result(self, expectation, actual, - in_recursive_call=False): + def match_result(self, expectation, actual, in_recursive_call=False): if isinstance(expectation, abc.Mapping): - return self._match_document( - expectation, actual, is_root=not in_recursive_call) + return self._match_document(expectation, actual, is_root=not in_recursive_call) if isinstance(expectation, abc.MutableSequence): self.test.assertIsInstance(actual, abc.MutableSequence) for e, a in zip(expectation, actual): if isinstance(e, abc.Mapping): - self._match_document( - e, a, is_root=not in_recursive_call) + self._match_document(e, a, is_root=not in_recursive_call) else: self.match_result(e, a, in_recursive_call=True) return # account for flexible numerics in element-wise comparison - if (isinstance(expectation, int) or - isinstance(expectation, float)): + if isinstance(expectation, int) or isinstance(expectation, float): self.test.assertEqual(expectation, actual) else: self.test.assertIsInstance(actual, type(expectation)) self.test.assertEqual(expectation, actual) def assertHasServiceId(self, spec, actual): - if 'hasServiceId' in spec: - if spec.get('hasServiceId'): + if "hasServiceId" in spec: + if spec.get("hasServiceId"): self.test.assertIsNotNone(actual.service_id) self.test.assertIsInstance(actual.service_id, ObjectId) else: @@ -597,85 +609,83 @@ def match_event(self, event_type, expectation, actual): name, spec = next(iter(expectation.items())) # every command event has the commandName field - if event_type == 'command': - command_name = spec.get('commandName') + if event_type == "command": + command_name = spec.get("commandName") if command_name: self.test.assertEqual(command_name, actual.command_name) - if name == 'commandStartedEvent': + if name == "commandStartedEvent": self.test.assertIsInstance(actual, CommandStartedEvent) - command = spec.get('command') - database_name = spec.get('databaseName') + command = spec.get("command") + database_name = spec.get("databaseName") if command: - if actual.command_name == 'update': + if actual.command_name == "update": # TODO: remove this once PYTHON-1744 is done. # Add upsert and multi fields back into expectations. - for update in command.get('updates', []): - update.setdefault('upsert', False) - update.setdefault('multi', False) + for update in command.get("updates", []): + update.setdefault("upsert", False) + update.setdefault("multi", False) self.match_result(command, actual.command) if database_name: - self.test.assertEqual( - database_name, actual.database_name) + self.test.assertEqual(database_name, actual.database_name) self.assertHasServiceId(spec, actual) - elif name == 'commandSucceededEvent': + elif name == "commandSucceededEvent": self.test.assertIsInstance(actual, CommandSucceededEvent) - reply = spec.get('reply') + reply = spec.get("reply") if reply: self.match_result(reply, actual.reply) self.assertHasServiceId(spec, actual) - elif name == 'commandFailedEvent': + elif name == "commandFailedEvent": self.test.assertIsInstance(actual, CommandFailedEvent) self.assertHasServiceId(spec, actual) - elif name == 'poolCreatedEvent': + elif name == "poolCreatedEvent": self.test.assertIsInstance(actual, PoolCreatedEvent) - elif name == 'poolReadyEvent': + elif name == "poolReadyEvent": self.test.assertIsInstance(actual, PoolReadyEvent) - elif name == 'poolClearedEvent': + elif name == "poolClearedEvent": self.test.assertIsInstance(actual, PoolClearedEvent) self.assertHasServiceId(spec, actual) - elif name == 'poolClosedEvent': + elif name == "poolClosedEvent": self.test.assertIsInstance(actual, PoolClosedEvent) - elif name == 'connectionCreatedEvent': + elif name == "connectionCreatedEvent": self.test.assertIsInstance(actual, ConnectionCreatedEvent) - elif name == 'connectionReadyEvent': + elif name == "connectionReadyEvent": self.test.assertIsInstance(actual, ConnectionReadyEvent) - elif name == 'connectionClosedEvent': + elif name == "connectionClosedEvent": self.test.assertIsInstance(actual, ConnectionClosedEvent) - if 'reason' in spec: - self.test.assertEqual(actual.reason, spec['reason']) - elif name == 'connectionCheckOutStartedEvent': + if "reason" in spec: + self.test.assertEqual(actual.reason, spec["reason"]) + elif name == "connectionCheckOutStartedEvent": self.test.assertIsInstance(actual, ConnectionCheckOutStartedEvent) - elif name == 'connectionCheckOutFailedEvent': + elif name == "connectionCheckOutFailedEvent": self.test.assertIsInstance(actual, ConnectionCheckOutFailedEvent) - if 'reason' in spec: - self.test.assertEqual(actual.reason, spec['reason']) - elif name == 'connectionCheckedOutEvent': + if "reason" in spec: + self.test.assertEqual(actual.reason, spec["reason"]) + elif name == "connectionCheckedOutEvent": self.test.assertIsInstance(actual, ConnectionCheckedOutEvent) - elif name == 'connectionCheckedInEvent': + elif name == "connectionCheckedInEvent": self.test.assertIsInstance(actual, ConnectionCheckedInEvent) else: - self.test.fail( - 'Unsupported event type %s' % (name,)) + self.test.fail("Unsupported event type %s" % (name,)) def coerce_result(opname, result): """Convert a pymongo result into the spec's result format.""" - if hasattr(result, 'acknowledged') and not result.acknowledged: - return {'acknowledged': False} - if opname == 'bulkWrite': + if hasattr(result, "acknowledged") and not result.acknowledged: + return {"acknowledged": False} + if opname == "bulkWrite": return parse_bulk_write_result(result) - if opname == 'insertOne': - return {'insertedId': result.inserted_id} - if opname == 'insertMany': + if opname == "insertOne": + return {"insertedId": result.inserted_id} + if opname == "insertMany": return {idx: _id for idx, _id in enumerate(result.inserted_ids)} - if opname in ('deleteOne', 'deleteMany'): - return {'deletedCount': result.deleted_count} - if opname in ('updateOne', 'updateMany', 'replaceOne'): + if opname in ("deleteOne", "deleteMany"): + return {"deletedCount": result.deleted_count} + if opname in ("updateOne", "updateMany", "replaceOne"): return { - 'matchedCount': result.matched_count, - 'modifiedCount': result.modified_count, - 'upsertedCount': 0 if result.upserted_id is None else 1, + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedCount": 0 if result.upserted_id is None else 1, } return result @@ -689,7 +699,8 @@ class UnifiedSpecTestMixinV1(IntegrationTest): Specification of the test suite being currently run is available as a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string('1.5') + + SCHEMA_VERSION = Version.from_string("1.5") RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True TEST_SPEC: Any @@ -707,12 +718,13 @@ def should_run_on(run_on_spec): def insert_initial_data(self, initial_data): for collection_data in initial_data: - coll_name = collection_data['collectionName'] - db_name = collection_data['databaseName'] - documents = collection_data['documents'] + coll_name = collection_data["collectionName"] + db_name = collection_data["databaseName"] + documents = collection_data["documents"] coll = self.client.get_database(db_name).get_collection( - coll_name, write_concern=WriteConcern(w="majority")) + coll_name, write_concern=WriteConcern(w="majority") + ) coll.drop() if len(documents) > 0: @@ -720,56 +732,54 @@ def insert_initial_data(self, initial_data): else: # ensure collection exists result = coll.insert_one({}) - coll.delete_one({'_id': result.inserted_id}) + coll.delete_one({"_id": result.inserted_id}) @classmethod def setUpClass(cls): # super call creates internal client cls.client super(UnifiedSpecTestMixinV1, cls).setUpClass() # process file-level runOnRequirements - run_on_spec = cls.TEST_SPEC.get('runOnRequirements', []) + run_on_spec = cls.TEST_SPEC.get("runOnRequirements", []) if not cls.should_run_on(run_on_spec): - raise unittest.SkipTest( - '%s runOnRequirements not satisfied' % (cls.__name__,)) + raise unittest.SkipTest("%s runOnRequirements not satisfied" % (cls.__name__,)) # add any special-casing for skipping tests here - if client_context.storage_engine == 'mmapv1': - if 'retryable-writes' in cls.TEST_SPEC['description']: - raise unittest.SkipTest( - "MMAPv1 does not support retryWrites=True") + if client_context.storage_engine == "mmapv1": + if "retryable-writes" in cls.TEST_SPEC["description"]: + raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") def setUp(self): super(UnifiedSpecTestMixinV1, self).setUp() # process schemaVersion # note: we check major schema version during class generation # note: we do this here because we cannot run assertions in setUpClass - version = Version.from_string(self.TEST_SPEC['schemaVersion']) + version = Version.from_string(self.TEST_SPEC["schemaVersion"]) self.assertLessEqual( - version, self.SCHEMA_VERSION, - 'expected schema version %s or lower, got %s' % ( - self.SCHEMA_VERSION, version)) + version, + self.SCHEMA_VERSION, + "expected schema version %s or lower, got %s" % (self.SCHEMA_VERSION, version), + ) # initialize internals self.match_evaluator = MatchEvaluatorUtil(self) def maybe_skip_test(self, spec): # add any special-casing for skipping tests here - if client_context.storage_engine == 'mmapv1': - if 'Dirty explicit session is discarded' in spec['description']: - raise unittest.SkipTest( - "MMAPv1 does not support retryWrites=True") - elif 'Client side error in command starting transaction' in spec['description']: + if client_context.storage_engine == "mmapv1": + if "Dirty explicit session is discarded" in spec["description"]: + raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") + elif "Client side error in command starting transaction" in spec["description"]: raise unittest.SkipTest("Implement PYTHON-1894") def process_error(self, exception, spec): - is_error = spec.get('isError') - is_client_error = spec.get('isClientError') - error_contains = spec.get('errorContains') - error_code = spec.get('errorCode') - error_code_name = spec.get('errorCodeName') - error_labels_contain = spec.get('errorLabelsContain') - error_labels_omit = spec.get('errorLabelsOmit') - expect_result = spec.get('expectResult') + is_error = spec.get("isError") + is_client_error = spec.get("isClientError") + error_contains = spec.get("errorContains") + error_code = spec.get("errorCode") + error_code_name = spec.get("errorCodeName") + error_labels_contain = spec.get("errorLabelsContain") + error_labels_omit = spec.get("errorLabelsOmit") + expect_result = spec.get("expectResult") if is_error: # already satisfied because exception was raised @@ -792,75 +802,72 @@ def process_error(self, exception, spec): self.assertIn(error_contains.lower(), errmsg) if error_code: - self.assertEqual( - error_code, exception.details.get('code')) + self.assertEqual(error_code, exception.details.get("code")) if error_code_name: - self.assertEqual( - error_code_name, exception.details.get('codeName')) + self.assertEqual(error_code_name, exception.details.get("codeName")) if error_labels_contain: - labels = [err_label for err_label in error_labels_contain - if exception.has_error_label(err_label)] + labels = [ + err_label + for err_label in error_labels_contain + if exception.has_error_label(err_label) + ] self.assertEqual(labels, error_labels_contain) if error_labels_omit: for err_label in error_labels_omit: if exception.has_error_label(err_label): - self.fail("Exception '%s' unexpectedly had label '%s'" % ( - exception, err_label)) + self.fail("Exception '%s' unexpectedly had label '%s'" % (exception, err_label)) if expect_result: if isinstance(exception, BulkWriteError): - result = parse_bulk_write_error_result( - exception) + result = parse_bulk_write_error_result(exception) self.match_evaluator.match_result(expect_result, result) else: - self.fail("expectResult can only be specified with %s " - "exceptions" % (BulkWriteError,)) + self.fail( + "expectResult can only be specified with %s " "exceptions" % (BulkWriteError,) + ) def __raise_if_unsupported(self, opname, target, *target_types): if not isinstance(target, target_types): - self.fail('Operation %s not supported for entity ' - 'of type %s' % (opname, type(target))) + self.fail( + "Operation %s not supported for entity " "of type %s" % (opname, type(target)) + ) def __entityOperation_createChangeStream(self, target, *args, **kwargs): - if client_context.storage_engine == 'mmapv1': + if client_context.storage_engine == "mmapv1": self.skipTest("MMAPv1 does not support change streams") - self.__raise_if_unsupported( - 'createChangeStream', target, MongoClient, Database, Collection) + self.__raise_if_unsupported("createChangeStream", target, MongoClient, Database, Collection) stream = target.watch(*args, **kwargs) self.addCleanup(stream.close) return stream def _clientOperation_createChangeStream(self, target, *args, **kwargs): - return self.__entityOperation_createChangeStream( - target, *args, **kwargs) + return self.__entityOperation_createChangeStream(target, *args, **kwargs) def _databaseOperation_createChangeStream(self, target, *args, **kwargs): - return self.__entityOperation_createChangeStream( - target, *args, **kwargs) + return self.__entityOperation_createChangeStream(target, *args, **kwargs) def _collectionOperation_createChangeStream(self, target, *args, **kwargs): - return self.__entityOperation_createChangeStream( - target, *args, **kwargs) + return self.__entityOperation_createChangeStream(target, *args, **kwargs) def _databaseOperation_runCommand(self, target, **kwargs): - self.__raise_if_unsupported('runCommand', target, Database) + self.__raise_if_unsupported("runCommand", target, Database) # Ensure the first key is the command name. - ordered_command = SON([(kwargs.pop('command_name'), 1)]) - ordered_command.update(kwargs['command']) - kwargs['command'] = ordered_command + ordered_command = SON([(kwargs.pop("command_name"), 1)]) + ordered_command.update(kwargs["command"]) + kwargs["command"] = ordered_command return target.command(**kwargs) def _databaseOperation_listCollections(self, target, *args, **kwargs): - if 'batch_size' in kwargs: - kwargs['cursor'] = {'batchSize': kwargs.pop('batch_size')} + if "batch_size" in kwargs: + kwargs["cursor"] = {"batchSize": kwargs.pop("batch_size")} cursor = target.list_collections(*args, **kwargs) return list(cursor) def __entityOperation_aggregate(self, target, *args, **kwargs): - self.__raise_if_unsupported('aggregate', target, Database, Collection) + self.__raise_if_unsupported("aggregate", target, Database, Collection) return list(target.aggregate(*args, **kwargs)) def _databaseOperation_aggregate(self, target, *args, **kwargs): @@ -870,86 +877,84 @@ def _collectionOperation_aggregate(self, target, *args, **kwargs): return self.__entityOperation_aggregate(target, *args, **kwargs) def _collectionOperation_find(self, target, *args, **kwargs): - self.__raise_if_unsupported('find', target, Collection) + self.__raise_if_unsupported("find", target, Collection) find_cursor = target.find(*args, **kwargs) return list(find_cursor) def _collectionOperation_createFindCursor(self, target, *args, **kwargs): - self.__raise_if_unsupported('find', target, Collection) - if 'filter' not in kwargs: + self.__raise_if_unsupported("find", target, Collection) + if "filter" not in kwargs: self.fail('createFindCursor requires a "filter" argument') cursor = NonLazyCursor(target.find(*args, **kwargs)) self.addCleanup(cursor.close) return cursor def _collectionOperation_listIndexes(self, target, *args, **kwargs): - if 'batch_size' in kwargs: - self.skipTest('PyMongo does not support batch_size for ' - 'list_indexes') + if "batch_size" in kwargs: + self.skipTest("PyMongo does not support batch_size for " "list_indexes") return target.list_indexes(*args, **kwargs) def _sessionOperation_withTransaction(self, target, *args, **kwargs): - if client_context.storage_engine == 'mmapv1': - self.skipTest('MMAPv1 does not support document-level locking') - self.__raise_if_unsupported('withTransaction', target, ClientSession) + if client_context.storage_engine == "mmapv1": + self.skipTest("MMAPv1 does not support document-level locking") + self.__raise_if_unsupported("withTransaction", target, ClientSession) return target.with_transaction(*args, **kwargs) def _sessionOperation_startTransaction(self, target, *args, **kwargs): - if client_context.storage_engine == 'mmapv1': - self.skipTest('MMAPv1 does not support document-level locking') - self.__raise_if_unsupported('startTransaction', target, ClientSession) + if client_context.storage_engine == "mmapv1": + self.skipTest("MMAPv1 does not support document-level locking") + self.__raise_if_unsupported("startTransaction", target, ClientSession) return target.start_transaction(*args, **kwargs) - def _changeStreamOperation_iterateUntilDocumentOrError(self, target, - *args, **kwargs): - self.__raise_if_unsupported( - 'iterateUntilDocumentOrError', target, ChangeStream) + def _changeStreamOperation_iterateUntilDocumentOrError(self, target, *args, **kwargs): + self.__raise_if_unsupported("iterateUntilDocumentOrError", target, ChangeStream) return next(target) def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs): - self.__raise_if_unsupported( - 'iterateUntilDocumentOrError', target, NonLazyCursor) + self.__raise_if_unsupported("iterateUntilDocumentOrError", target, NonLazyCursor) return next(target) def _cursor_close(self, target, *args, **kwargs): - self.__raise_if_unsupported('close', target, NonLazyCursor) + self.__raise_if_unsupported("close", target, NonLazyCursor) return target.close() def run_entity_operation(self, spec): - target = self.entity_map[spec['object']] - opname = spec['name'] - opargs = spec.get('arguments') - expect_error = spec.get('expectError') - save_as_entity = spec.get('saveResultAsEntity') - expect_result = spec.get('expectResult') - ignore = spec.get('ignoreResultAndError') + target = self.entity_map[spec["object"]] + opname = spec["name"] + opargs = spec.get("arguments") + expect_error = spec.get("expectError") + save_as_entity = spec.get("saveResultAsEntity") + expect_result = spec.get("expectResult") + ignore = spec.get("ignoreResultAndError") if ignore and (expect_error or save_as_entity or expect_result): raise ValueError( - 'ignoreResultAndError is incompatible with saveResultAsEntity' - ', expectError, and expectResult') + "ignoreResultAndError is incompatible with saveResultAsEntity" + ", expectError, and expectResult" + ) if opargs: arguments = parse_spec_options(copy.deepcopy(opargs)) - prepare_spec_arguments(spec, arguments, camel_to_snake(opname), - self.entity_map, self.run_operations) + prepare_spec_arguments( + spec, arguments, camel_to_snake(opname), self.entity_map, self.run_operations + ) else: arguments = tuple() if isinstance(target, MongoClient): - method_name = '_clientOperation_%s' % (opname,) + method_name = "_clientOperation_%s" % (opname,) elif isinstance(target, Database): - method_name = '_databaseOperation_%s' % (opname,) + method_name = "_databaseOperation_%s" % (opname,) elif isinstance(target, Collection): - method_name = '_collectionOperation_%s' % (opname,) + method_name = "_collectionOperation_%s" % (opname,) elif isinstance(target, ChangeStream): - method_name = '_changeStreamOperation_%s' % (opname,) + method_name = "_changeStreamOperation_%s" % (opname,) elif isinstance(target, NonLazyCursor): - method_name = '_cursor_%s' % (opname,) + method_name = "_cursor_%s" % (opname,) elif isinstance(target, ClientSession): - method_name = '_sessionOperation_%s' % (opname,) + method_name = "_sessionOperation_%s" % (opname,) elif isinstance(target, GridFSBucket): raise NotImplementedError else: - method_name = 'doesNotExist' + method_name = "doesNotExist" try: method = getattr(self, method_name) @@ -957,8 +962,7 @@ def run_entity_operation(self, spec): try: cmd = getattr(target, camel_to_snake(opname)) except AttributeError: - self.fail('Unsupported operation %s on entity %s' % ( - opname, target)) + self.fail("Unsupported operation %s on entity %s" % (opname, target)) else: cmd = functools.partial(method, target) @@ -974,8 +978,9 @@ def run_entity_operation(self, spec): raise else: if expect_error: - self.fail('Excepted error %s but "%s" succeeded: %s' % ( - expect_error, opname, result)) + self.fail( + 'Excepted error %s but "%s" succeeded: %s' % (expect_error, opname, result) + ) if expect_result: actual = coerce_result(opname, result) @@ -986,42 +991,43 @@ def run_entity_operation(self, spec): def __set_fail_point(self, client, command_args): if not client_context.test_commands_enabled: - self.skipTest('Test commands must be enabled') + self.skipTest("Test commands must be enabled") - cmd_on = SON([('configureFailPoint', 'failCommand')]) + cmd_on = SON([("configureFailPoint", "failCommand")]) cmd_on.update(command_args) client.admin.command(cmd_on) self.addCleanup( - client.admin.command, - 'configureFailPoint', cmd_on['configureFailPoint'], mode='off') + client.admin.command, "configureFailPoint", cmd_on["configureFailPoint"], mode="off" + ) def _testOperation_failPoint(self, spec): self.__set_fail_point( - client=self.entity_map[spec['client']], - command_args=spec['failPoint']) + client=self.entity_map[spec["client"]], command_args=spec["failPoint"] + ) def _testOperation_targetedFailPoint(self, spec): - session = self.entity_map[spec['session']] + session = self.entity_map[spec["session"]] if not session._pinned_address: - self.fail("Cannot use targetedFailPoint operation with unpinned " - "session %s" % (spec['session'],)) + self.fail( + "Cannot use targetedFailPoint operation with unpinned " + "session %s" % (spec["session"],) + ) - client = single_client('%s:%s' % session._pinned_address) + client = single_client("%s:%s" % session._pinned_address) self.addCleanup(client.close) - self.__set_fail_point( - client=client, command_args=spec['failPoint']) + self.__set_fail_point(client=client, command_args=spec["failPoint"]) def _testOperation_assertSessionTransactionState(self, spec): - session = self.entity_map[spec['session']] - expected_state = getattr(_TxnState, spec['state'].upper()) + session = self.entity_map[spec["session"]] + expected_state = getattr(_TxnState, spec["state"].upper()) self.assertEqual(expected_state, session._transaction.state) def _testOperation_assertSessionPinned(self, spec): - session = self.entity_map[spec['session']] + session = self.entity_map[spec["session"]] self.assertIsNotNone(session._transaction.pinned_address) def _testOperation_assertSessionUnpinned(self, spec): - session = self.entity_map[spec['session']] + session = self.entity_map[spec["session"]] self.assertIsNone(session._pinned_address) self.assertIsNone(session._transaction.pinned_address) @@ -1031,61 +1037,61 @@ def __get_last_two_command_lsids(self, listener): if isinstance(event, CommandStartedEvent): cmd_started_events.append(event) if len(cmd_started_events) < 2: - self.fail('Needed 2 CommandStartedEvents to compare lsids, ' - 'got %s' % (len(cmd_started_events))) - return tuple([e.command['lsid'] for e in cmd_started_events][:2]) + self.fail( + "Needed 2 CommandStartedEvents to compare lsids, " + "got %s" % (len(cmd_started_events)) + ) + return tuple([e.command["lsid"] for e in cmd_started_events][:2]) def _testOperation_assertDifferentLsidOnLastTwoCommands(self, spec): - listener = self.entity_map.get_listener_for_client(spec['client']) + listener = self.entity_map.get_listener_for_client(spec["client"]) self.assertNotEqual(*self.__get_last_two_command_lsids(listener)) def _testOperation_assertSameLsidOnLastTwoCommands(self, spec): - listener = self.entity_map.get_listener_for_client(spec['client']) + listener = self.entity_map.get_listener_for_client(spec["client"]) self.assertEqual(*self.__get_last_two_command_lsids(listener)) def _testOperation_assertSessionDirty(self, spec): - session = self.entity_map[spec['session']] + session = self.entity_map[spec["session"]] self.assertTrue(session._server_session.dirty) def _testOperation_assertSessionNotDirty(self, spec): - session = self.entity_map[spec['session']] + session = self.entity_map[spec["session"]] return self.assertFalse(session._server_session.dirty) def _testOperation_assertCollectionExists(self, spec): - database_name = spec['databaseName'] - collection_name = spec['collectionName'] - collection_name_list = list( - self.client.get_database(database_name).list_collection_names()) + database_name = spec["databaseName"] + collection_name = spec["collectionName"] + collection_name_list = list(self.client.get_database(database_name).list_collection_names()) self.assertIn(collection_name, collection_name_list) def _testOperation_assertCollectionNotExists(self, spec): - database_name = spec['databaseName'] - collection_name = spec['collectionName'] - collection_name_list = list( - self.client.get_database(database_name).list_collection_names()) + database_name = spec["databaseName"] + collection_name = spec["collectionName"] + collection_name_list = list(self.client.get_database(database_name).list_collection_names()) self.assertNotIn(collection_name, collection_name_list) def _testOperation_assertIndexExists(self, spec): - collection = self.client[spec['databaseName']][spec['collectionName']] - index_names = [idx['name'] for idx in collection.list_indexes()] - self.assertIn(spec['indexName'], index_names) + collection = self.client[spec["databaseName"]][spec["collectionName"]] + index_names = [idx["name"] for idx in collection.list_indexes()] + self.assertIn(spec["indexName"], index_names) def _testOperation_assertIndexNotExists(self, spec): - collection = self.client[spec['databaseName']][spec['collectionName']] + collection = self.client[spec["databaseName"]][spec["collectionName"]] for index in collection.list_indexes(): - self.assertNotEqual(spec['indexName'], index['name']) + self.assertNotEqual(spec["indexName"], index["name"]) def _testOperation_assertNumberConnectionsCheckedOut(self, spec): - client = self.entity_map[spec['client']] + client = self.entity_map[spec["client"]] pool = get_pool(client) - self.assertEqual(spec['connections'], pool.active_sockets) + self.assertEqual(spec["connections"], pool.active_sockets) def _testOperation_loop(self, spec): - failure_key = spec.get('storeFailuresAsEntity') - error_key = spec.get('storeErrorsAsEntity') - successes_key = spec.get('storeSuccessesAsEntity') - iteration_key = spec.get('storeIterationsAsEntity') - iteration_limiter_key = spec.get('numIterations') + failure_key = spec.get("storeFailuresAsEntity") + error_key = spec.get("storeErrorsAsEntity") + successes_key = spec.get("storeSuccessesAsEntity") + iteration_key = spec.get("storeIterationsAsEntity") + iteration_limiter_key = spec.get("numIterations") for i in [failure_key, error_key]: if i: self.entity_map[i] = [] @@ -1114,37 +1120,34 @@ def _testOperation_loop(self, spec): key = error_key or failure_key if not key: raise - self.entity_map[key].append({ - "error": str(exc), - "time": time.time(), - "type": type(exc).__name__ - }) + self.entity_map[key].append( + {"error": str(exc), "time": time.time(), "type": type(exc).__name__} + ) def run_special_operation(self, spec): - opname = spec['name'] - method_name = '_testOperation_%s' % (opname,) + opname = spec["name"] + method_name = "_testOperation_%s" % (opname,) try: method = getattr(self, method_name) except AttributeError: - self.fail('Unsupported special test operation %s' % (opname,)) + self.fail("Unsupported special test operation %s" % (opname,)) else: - method(spec['arguments']) + method(spec["arguments"]) def run_operations(self, spec): for op in spec: - if op['object'] == 'testRunner': + if op["object"] == "testRunner": self.run_special_operation(op) else: self.run_entity_operation(op) - def check_events(self, spec): for event_spec in spec: - client_name = event_spec['client'] - events = event_spec['events'] + client_name = event_spec["client"] + events = event_spec["events"] # Valid types: 'command', 'cmap' - event_type = event_spec.get('eventType', 'command') - assert event_type in ('command', 'cmap') + event_type = event_spec.get("eventType", "command") + assert event_type in ("command", "cmap") listener = self.entity_map.get_listener_for_client(client_name) actual_events = listener.get_events(event_type) @@ -1153,68 +1156,64 @@ def check_events(self, spec): continue if len(events) > len(actual_events): - self.fail('Expected to see %s events, got %s' % ( - len(events), len(actual_events))) + self.fail("Expected to see %s events, got %s" % (len(events), len(actual_events))) for idx, expected_event in enumerate(events): - self.match_evaluator.match_event( - event_type, expected_event, actual_events[idx]) + self.match_evaluator.match_event(event_type, expected_event, actual_events[idx]) def verify_outcome(self, spec): for collection_data in spec: - coll_name = collection_data['collectionName'] - db_name = collection_data['databaseName'] - expected_documents = collection_data['documents'] + coll_name = collection_data["collectionName"] + db_name = collection_data["databaseName"] + expected_documents = collection_data["documents"] coll = self.client.get_database(db_name).get_collection( coll_name, read_preference=ReadPreference.PRIMARY, - read_concern=ReadConcern(level='local')) + read_concern=ReadConcern(level="local"), + ) if expected_documents: - sorted_expected_documents = sorted( - expected_documents, key=lambda doc: doc['_id']) - actual_documents = list( - coll.find({}, sort=[('_id', ASCENDING)])) - self.assertListEqual(sorted_expected_documents, - actual_documents) + sorted_expected_documents = sorted(expected_documents, key=lambda doc: doc["_id"]) + actual_documents = list(coll.find({}, sort=[("_id", ASCENDING)])) + self.assertListEqual(sorted_expected_documents, actual_documents) def run_scenario(self, spec, uri=None): # maybe skip test manually self.maybe_skip_test(spec) # process test-level runOnRequirements - run_on_spec = spec.get('runOnRequirements', []) + run_on_spec = spec.get("runOnRequirements", []) if not self.should_run_on(run_on_spec): - raise unittest.SkipTest('runOnRequirements not satisfied') + raise unittest.SkipTest("runOnRequirements not satisfied") # process skipReason - skip_reason = spec.get('skipReason', None) + skip_reason = spec.get("skipReason", None) if skip_reason is not None: - raise unittest.SkipTest('%s' % (skip_reason,)) + raise unittest.SkipTest("%s" % (skip_reason,)) # process createEntities self.entity_map = EntityMapUtil(self) - self.entity_map.create_entities_from_spec( - self.TEST_SPEC.get('createEntities', []), uri=uri) + self.entity_map.create_entities_from_spec(self.TEST_SPEC.get("createEntities", []), uri=uri) # process initialData - self.insert_initial_data(self.TEST_SPEC.get('initialData', [])) + self.insert_initial_data(self.TEST_SPEC.get("initialData", [])) # process operations - self.run_operations(spec['operations']) + self.run_operations(spec["operations"]) # process expectEvents - if 'expectEvents' in spec: - expect_events = spec['expectEvents'] - self.assertTrue(expect_events, 'expectEvents must be non-empty') + if "expectEvents" in spec: + expect_events = spec["expectEvents"] + self.assertTrue(expect_events, "expectEvents must be non-empty") self.check_events(expect_events) # process outcome - self.verify_outcome(spec.get('outcome', [])) + self.verify_outcome(spec.get("outcome", [])) class UnifiedSpecTestMeta(type): """Metaclass for generating test classes.""" + TEST_SPEC: Any EXPECTED_FAILURES: Any @@ -1224,12 +1223,12 @@ def __init__(cls, *args, **kwargs): def create_test(spec): def test_case(self): self.run_scenario(spec) + return test_case - for test_spec in cls.TEST_SPEC['tests']: - description = test_spec['description'] - test_name = 'test_%s' % (description.strip('. '). - replace(' ', '_').replace('.', '_'),) + for test_spec in cls.TEST_SPEC["tests"]: + description = test_spec["description"] + test_name = "test_%s" % (description.strip(". ").replace(" ", "_").replace(".", "_"),) test_method = create_test(copy.deepcopy(test_spec)) test_method.__name__ = str(test_name) @@ -1248,13 +1247,18 @@ def test_case(self): _SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS = { - KLASS.SCHEMA_VERSION[0]: KLASS for KLASS in _ALL_MIXIN_CLASSES} + KLASS.SCHEMA_VERSION[0]: KLASS for KLASS in _ALL_MIXIN_CLASSES +} -def generate_test_classes(test_path, module=__name__, class_name_prefix='', - expected_failures=[], - bypass_test_generation_errors=False, - **kwargs): +def generate_test_classes( + test_path, + module=__name__, + class_name_prefix="", + expected_failures=[], + bypass_test_generation_errors=False, + **kwargs +): """Method for generating test classes. Returns a dictionary where keys are the names of test classes and values are the test class objects.""" test_klasses = {} @@ -1263,9 +1267,11 @@ def test_base_class_factory(test_spec): """Utility that creates the base class to use for test generation. This is needed to ensure that cls.TEST_SPEC is appropriately set when the metaclass __init__ is invoked.""" + class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore TEST_SPEC = test_spec EXPECTED_FAILURES = expected_failures + return SpecTestBase for dirpath, _, filenames in os.walk(test_path): @@ -1277,30 +1283,34 @@ class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore # Use tz_aware=False to match how CodecOptions decodes # dates. opts = json_util.JSONOptions(tz_aware=False) - scenario_def = json_util.loads( - scenario_stream.read(), json_options=opts) + scenario_def = json_util.loads(scenario_stream.read(), json_options=opts) test_type = os.path.splitext(filename)[0] - snake_class_name = 'Test%s_%s_%s' % ( - class_name_prefix, dirname.replace('-', '_'), - test_type.replace('-', '_').replace('.', '_')) + snake_class_name = "Test%s_%s_%s" % ( + class_name_prefix, + dirname.replace("-", "_"), + test_type.replace("-", "_").replace(".", "_"), + ) class_name = snake_to_camel(snake_class_name) try: - schema_version = Version.from_string( - scenario_def['schemaVersion']) - mixin_class = _SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS.get( - schema_version[0]) + schema_version = Version.from_string(scenario_def["schemaVersion"]) + mixin_class = _SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS.get(schema_version[0]) if mixin_class is None: raise ValueError( - "test file '%s' has unsupported schemaVersion '%s'" % ( - fpath, schema_version)) - module_dict = {'__module__': module} + "test file '%s' has unsupported schemaVersion '%s'" + % (fpath, schema_version) + ) + module_dict = {"__module__": module} module_dict.update(kwargs) test_klasses[class_name] = type( class_name, - (mixin_class, test_base_class_factory(scenario_def),), - module_dict) + ( + mixin_class, + test_base_class_factory(scenario_def), + ), + module_dict, + ) except Exception: if bypass_test_generation_errors: continue diff --git a/test/utils.py b/test/utils.py index b0b0c87c47..2c50797266 100644 --- a/test/utils.py +++ b/test/utils.py @@ -26,16 +26,14 @@ import time import unittest import warnings - from collections import abc, defaultdict from functools import partial +from test import client_context, db_pwd, db_user from bson import json_util from bson.objectid import ObjectId from bson.son import SON - -from pymongo import (MongoClient, - monitoring, operations, read_preferences) +from pymongo import MongoClient, monitoring, operations, read_preferences from pymongo.collection import ReturnDocument from pymongo.errors import ConfigurationError, OperationFailure from pymongo.hello import HelloCompat @@ -43,16 +41,10 @@ from pymongo.pool import _CancellationContext, _PoolGeneration from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference -from pymongo.server_selectors import (any_server_selector, - writable_server_selector) +from pymongo.server_selectors import any_server_selector, writable_server_selector from pymongo.server_type import SERVER_TYPE -from pymongo.write_concern import WriteConcern from pymongo.uri_parser import parse_uri - -from test import (client_context, - db_user, - db_pwd) - +from pymongo.write_concern import WriteConcern IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) @@ -83,8 +75,7 @@ def matching(self, matcher): def wait_for_event(self, event, count): """Wait for a number of events to be published, or fail.""" - wait_until(lambda: self.event_count(event) >= count, - 'find %s %s event(s)' % (count, event)) + wait_until(lambda: self.event_count(event) >= count, "find %s %s event(s)" % (count, event)) class CMAPListener(BaseListener, monitoring.ConnectionPoolListener): @@ -123,22 +114,21 @@ def pool_closed(self, event): class EventListener(monitoring.CommandListener): - def __init__(self): self.results = defaultdict(list) def started(self, event): - self.results['started'].append(event) + self.results["started"].append(event) def succeeded(self, event): - self.results['succeeded'].append(event) + self.results["succeeded"].append(event) def failed(self, event): - self.results['failed'].append(event) + self.results["failed"].append(event) def started_command_names(self): """Return list of command names started.""" - return [event.command_name for event in self.results['started']] + return [event.command_name for event in self.results["started"]] def reset(self): """Reset the state of this listener.""" @@ -150,13 +140,13 @@ def __init__(self): self.results = defaultdict(list) def closed(self, event): - self.results['closed'].append(event) + self.results["closed"].append(event) def description_changed(self, event): - self.results['description_changed'].append(event) + self.results["description_changed"].append(event) def opened(self, event): - self.results['opened'].append(event) + self.results["opened"].append(event) def reset(self): """Reset the state of this listener.""" @@ -164,7 +154,6 @@ def reset(self): class AllowListEventListener(EventListener): - def __init__(self, *commands): self.commands = set(commands) super(AllowListEventListener, self).__init__() @@ -184,6 +173,7 @@ def failed(self, event): class OvertCommandListener(EventListener): """A CommandListener that ignores sensitive commands.""" + def started(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).started(event) @@ -221,13 +211,13 @@ def reset(self): self.results = [] -class ServerEventListener(_ServerEventListener, - monitoring.ServerListener): +class ServerEventListener(_ServerEventListener, monitoring.ServerListener): """Listens to Server events.""" -class ServerAndTopologyEventListener(ServerEventListener, # type: ignore - monitoring.TopologyListener): +class ServerAndTopologyEventListener( # type: ignore[misc] + ServerEventListener, monitoring.TopologyListener +): """Listens to Server and Topology events.""" @@ -300,6 +290,7 @@ def remove_stale_sockets(self, *args, **kwargs): class ScenarioDict(dict): """Dict that returns {} for any unknown key, recursively.""" + def __init__(self, data): def convert(v): if isinstance(v, abc.Mapping): @@ -322,6 +313,7 @@ def __getitem__(self, item): class CompareType(object): """Class that compares equal to any object of the given type.""" + def __init__(self, type): self.type = type @@ -335,6 +327,7 @@ def __ne__(self, other): class FunctionCallRecorder(object): """Utility class to wrap a callable and record its invocations.""" + def __init__(self, function): self._function = function self._call_list = [] @@ -359,6 +352,7 @@ def call_count(self): class TestCreator(object): """Class to create test cases from specifications.""" + def __init__(self, create_test, test_class, test_path): """Create a TestCreator object. @@ -372,7 +366,7 @@ def __init__(self, create_test, test_class, test_path): test case. - `test_path`: path to the directory containing the JSON files with the test specifications. - """ + """ self._create_test = create_test self._test_class = test_class self.test_path = test_path @@ -380,67 +374,63 @@ def __init__(self, create_test, test_class, test_path): def _ensure_min_max_server_version(self, scenario_def, method): """Test modifier that enforces a version range for the server on a test case.""" - if 'minServerVersion' in scenario_def: - min_ver = tuple( - int(elt) for - elt in scenario_def['minServerVersion'].split('.')) + if "minServerVersion" in scenario_def: + min_ver = tuple(int(elt) for elt in scenario_def["minServerVersion"].split(".")) if min_ver is not None: method = client_context.require_version_min(*min_ver)(method) - if 'maxServerVersion' in scenario_def: - max_ver = tuple( - int(elt) for - elt in scenario_def['maxServerVersion'].split('.')) + if "maxServerVersion" in scenario_def: + max_ver = tuple(int(elt) for elt in scenario_def["maxServerVersion"].split(".")) if max_ver is not None: method = client_context.require_version_max(*max_ver)(method) - if 'serverless' in scenario_def: - serverless = scenario_def['serverless'] + if "serverless" in scenario_def: + serverless = scenario_def["serverless"] if serverless == "require": serverless_satisfied = client_context.serverless elif serverless == "forbid": serverless_satisfied = not client_context.serverless - else: # unset or "allow" + else: # unset or "allow" serverless_satisfied = True method = unittest.skipUnless( - serverless_satisfied, - "Serverless requirement not satisfied")(method) + serverless_satisfied, "Serverless requirement not satisfied" + )(method) return method @staticmethod def valid_topology(run_on_req): return client_context.is_topology_type( - run_on_req.get('topology', ['single', 'replicaset', 'sharded', - 'load-balanced'])) + run_on_req.get("topology", ["single", "replicaset", "sharded", "load-balanced"]) + ) @staticmethod def min_server_version(run_on_req): - version = run_on_req.get('minServerVersion') + version = run_on_req.get("minServerVersion") if version: - min_ver = tuple(int(elt) for elt in version.split('.')) + min_ver = tuple(int(elt) for elt in version.split(".")) return client_context.version >= min_ver return True @staticmethod def max_server_version(run_on_req): - version = run_on_req.get('maxServerVersion') + version = run_on_req.get("maxServerVersion") if version: - max_ver = tuple(int(elt) for elt in version.split('.')) + max_ver = tuple(int(elt) for elt in version.split(".")) return client_context.version <= max_ver return True @staticmethod def valid_auth_enabled(run_on_req): - if 'authEnabled' in run_on_req: - if run_on_req['authEnabled']: + if "authEnabled" in run_on_req: + if run_on_req["authEnabled"]: return client_context.auth_enabled return not client_context.auth_enabled return True @staticmethod def serverless_ok(run_on_req): - serverless = run_on_req['serverless'] + serverless = run_on_req["serverless"] if serverless == "require": return client_context.serverless elif serverless == "forbid": @@ -449,30 +439,31 @@ def serverless_ok(run_on_req): return True def should_run_on(self, scenario_def): - run_on = scenario_def.get('runOn', []) + run_on = scenario_def.get("runOn", []) if not run_on: # Always run these tests. return True for req in run_on: - if (self.valid_topology(req) and - self.min_server_version(req) and - self.max_server_version(req) and - self.valid_auth_enabled(req) and - self.serverless_ok(req)): + if ( + self.valid_topology(req) + and self.min_server_version(req) + and self.max_server_version(req) + and self.valid_auth_enabled(req) + and self.serverless_ok(req) + ): return True return False def ensure_run_on(self, scenario_def, method): """Test modifier that enforces a 'runOn' on a test case.""" return client_context._require( - lambda: self.should_run_on(scenario_def), - "runOn not satisfied", - method) + lambda: self.should_run_on(scenario_def), "runOn not satisfied", method + ) def tests(self, scenario_def): """Allow CMAP spec test to override the location of test.""" - return scenario_def['tests'] + return scenario_def["tests"] def create_tests(self): for dirpath, _, filenames in os.walk(self.test_path): @@ -484,25 +475,22 @@ def create_tests(self): # dates. opts = json_util.JSONOptions(tz_aware=False) scenario_def = ScenarioDict( - json_util.loads(scenario_stream.read(), - json_options=opts)) + json_util.loads(scenario_stream.read(), json_options=opts) + ) test_type = os.path.splitext(filename)[0] # Construct test from scenario. for test_def in self.tests(scenario_def): - test_name = 'test_%s_%s_%s' % ( + test_name = "test_%s_%s_%s" % ( dirname, - test_type.replace("-", "_").replace('.', '_'), - str(test_def['description'].replace(" ", "_").replace( - '.', '_'))) + test_type.replace("-", "_").replace(".", "_"), + str(test_def["description"].replace(" ", "_").replace(".", "_")), + ) - new_test = self._create_test( - scenario_def, test_def, test_name) - new_test = self._ensure_min_max_server_version( - scenario_def, new_test) - new_test = self.ensure_run_on( - scenario_def, new_test) + new_test = self._create_test(scenario_def, test_def, test_name) + new_test = self._ensure_min_max_server_version(scenario_def, new_test) + new_test = self.ensure_run_on(scenario_def, new_test) new_test.__name__ = test_name setattr(self._test_class, new_test.__name__, new_test) @@ -514,35 +502,36 @@ def _connection_string(h): return "mongodb://%s" % (str(h),) -def _mongo_client(host, port, authenticate=True, directConnection=None, - **kwargs): +def _mongo_client(host, port, authenticate=True, directConnection=None, **kwargs): """Create a new client over SSL/TLS if necessary.""" host = host or client_context.host port = port or client_context.port client_options: dict = client_context.default_client_options.copy() if client_context.replica_set_name and not directConnection: - client_options['replicaSet'] = client_context.replica_set_name + client_options["replicaSet"] = client_context.replica_set_name if directConnection is not None: - client_options['directConnection'] = directConnection + client_options["directConnection"] = directConnection client_options.update(kwargs) uri = _connection_string(host) if client_context.auth_enabled and authenticate: # Only add the default username or password if one is not provided. res = parse_uri(uri) - if (not res['username'] and not res['password'] and - 'username' not in client_options and - 'password' not in client_options): - client_options['username'] = db_user - client_options['password'] = db_pwd + if ( + not res["username"] + and not res["password"] + and "username" not in client_options + and "password" not in client_options + ): + client_options["username"] = db_user + client_options["password"] = db_pwd return MongoClient(uri, port, **client_options) def single_client_noauth(h=None, p=None, **kwargs): """Make a direct connection. Don't authenticate.""" - return _mongo_client(h, p, authenticate=False, - directConnection=True, **kwargs) + return _mongo_client(h, p, authenticate=False, directConnection=True, **kwargs) def single_client(h=None, p=None, **kwargs): @@ -585,17 +574,16 @@ def ensure_all_connected(client): that are configured on the client. """ hello = client.admin.command(HelloCompat.LEGACY_CMD) - if 'setName' not in hello: + if "setName" not in hello: raise ConfigurationError("cluster is not a replica set") - target_host_list = set(hello['hosts']) - connected_host_list = set([hello['me']]) - admindb = client.get_database('admin') + target_host_list = set(hello["hosts"]) + connected_host_list = set([hello["me"]]) + admindb = client.get_database("admin") # Run hello until we have connected to each host at least once. while connected_host_list != target_host_list: - hello = admindb.command(HelloCompat.LEGACY_CMD, - read_preference=ReadPreference.SECONDARY) + hello = admindb.command(HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY) connected_host_list.update([hello["me"]]) @@ -612,19 +600,19 @@ def oid_generated_on_process(oid): def delay(sec): - return '''function() { sleep(%f * 1000); return true; }''' % sec + return """function() { sleep(%f * 1000); return true; }""" % sec def get_command_line(client): - command_line = client.admin.command('getCmdLineOpts') - assert command_line['ok'] == 1, "getCmdLineOpts() failed" + command_line = client.admin.command("getCmdLineOpts") + assert command_line["ok"] == 1, "getCmdLineOpts() failed" return command_line def camel_to_snake(camel): # Regex to convert CamelCase to snake_case. - snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower() + snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() def camel_to_upper_camel(camel): @@ -640,21 +628,18 @@ def camel_to_snake_args(arguments): def snake_to_camel(snake): # Regex to convert snake_case to lowerCamelCase. - return re.sub(r'_([a-z])', lambda m: m.group(1).upper(), snake) + return re.sub(r"_([a-z])", lambda m: m.group(1).upper(), snake) def parse_collection_options(opts): - if 'readPreference' in opts: - opts['read_preference'] = parse_read_preference( - opts.pop('readPreference')) + if "readPreference" in opts: + opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) - if 'writeConcern' in opts: - opts['write_concern'] = WriteConcern( - **dict(opts.pop('writeConcern'))) + if "writeConcern" in opts: + opts["write_concern"] = WriteConcern(**dict(opts.pop("writeConcern"))) - if 'readConcern' in opts: - opts['read_concern'] = ReadConcern( - **dict(opts.pop('readConcern'))) + if "readConcern" in opts: + opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) return opts @@ -666,11 +651,11 @@ def server_started_with_option(client, cmdline_opt, config_opt): - `config_opt`: The config file option (i.e. nojournal) """ command_line = get_command_line(client) - if 'parsed' in command_line: - parsed = command_line['parsed'] + if "parsed" in command_line: + parsed = command_line["parsed"] if config_opt in parsed: return parsed[config_opt] - argv = command_line['argv'] + argv = command_line["argv"] return cmdline_opt in argv @@ -678,39 +663,37 @@ def server_started_with_auth(client): try: command_line = get_command_line(client) except OperationFailure as e: - msg = e.details.get('errmsg', '') # type: ignore - if e.code == 13 or 'unauthorized' in msg or 'login' in msg: + msg = e.details.get("errmsg", "") # type: ignore + if e.code == 13 or "unauthorized" in msg or "login" in msg: # Unauthorized. return True raise # MongoDB >= 2.0 - if 'parsed' in command_line: - parsed = command_line['parsed'] + if "parsed" in command_line: + parsed = command_line["parsed"] # MongoDB >= 2.6 - if 'security' in parsed: - security = parsed['security'] + if "security" in parsed: + security = parsed["security"] # >= rc3 - if 'authorization' in security: - return security['authorization'] == 'enabled' + if "authorization" in security: + return security["authorization"] == "enabled" # < rc3 - return security.get('auth', False) or bool(security.get('keyFile')) - return parsed.get('auth', False) or bool(parsed.get('keyFile')) + return security.get("auth", False) or bool(security.get("keyFile")) + return parsed.get("auth", False) or bool(parsed.get("keyFile")) # Legacy - argv = command_line['argv'] - return '--auth' in argv or '--keyFile' in argv + argv = command_line["argv"] + return "--auth" in argv or "--keyFile" in argv def drop_collections(db): # Drop all non-system collections in this database. - for coll in db.list_collection_names( - filter={"name": {"$regex": r"^(?!system\.)"}}): + for coll in db.list_collection_names(filter={"name": {"$regex": r"^(?!system\.)"}}): db.drop_collection(coll) def remove_all_users(db): - db.command("dropAllUsersFromDatabase", 1, - writeConcern={"w": client_context.w}) + db.command("dropAllUsersFromDatabase", 1, writeConcern={"w": client_context.w}) def joinall(threads): @@ -726,7 +709,7 @@ def connected(client): # Ignore warning that ping is always routed to primary even # if client's read preference isn't PRIMARY. warnings.simplefilter("ignore", UserWarning) - client.admin.command('ping') # Force connection. + client.admin.command("ping") # Force connection. return client @@ -745,7 +728,7 @@ def wait_until(predicate, success_description, timeout=10): Returns the predicate's first true value. """ start = time.time() - interval = min(float(timeout)/100, 0.1) + interval = min(float(timeout) / 100, 0.1) while True: retval = predicate() if retval: @@ -759,17 +742,17 @@ def wait_until(predicate, success_description, timeout=10): def repl_set_step_down(client, **kwargs): """Run replSetStepDown, first unfreezing a secondary with replSetFreeze.""" - cmd = SON([('replSetStepDown', 1)]) + cmd = SON([("replSetStepDown", 1)]) cmd.update(kwargs) # Unfreeze a secondary to ensure a speedy election. - client.admin.command( - 'replSetFreeze', 0, read_preference=ReadPreference.SECONDARY) + client.admin.command("replSetFreeze", 0, read_preference=ReadPreference.SECONDARY) client.admin.command(cmd) + def is_mongos(client): res = client.admin.command(HelloCompat.LEGACY_CMD) - return res.get('msg', '') == 'isdbgrid' + return res.get("msg", "") == "isdbgrid" def assertRaisesExactly(cls, fn, *args, **kwargs): @@ -781,8 +764,7 @@ def assertRaisesExactly(cls, fn, *args, **kwargs): try: fn(*args, **kwargs) except Exception as e: - assert e.__class__ == cls, "got %s, expected %s" % ( - e.__class__.__name__, cls.__name__) + assert e.__class__ == cls, "got %s, expected %s" % (e.__class__.__name__, cls.__name__) else: raise AssertionError("%s not raised" % cls) @@ -797,6 +779,7 @@ def _ignore_deprecations(): def ignore_deprecations(wrapped=None): """A context manager or a decorator.""" if wrapped: + @functools.wraps(wrapped) def wrapper(*args, **kwargs): with _ignore_deprecations(): @@ -809,7 +792,6 @@ def wrapper(*args, **kwargs): class DeprecationFilter(object): - def __init__(self, action="ignore"): """Start filtering deprecations.""" self.warn_context = warnings.catch_warnings() @@ -831,9 +813,7 @@ def get_pool(client): def get_pools(client): """Get all pools.""" - return [ - server.pool for server in - client._get_topology().select_servers(any_server_selector)] + return [server.pool for server in client._get_topology().select_servers(any_server_selector)] # Constants for run_threads and lazy_client_trial. @@ -900,7 +880,9 @@ def gevent_monkey_patched(): warnings.simplefilter("ignore", ImportWarning) try: import socket + import gevent.socket + return socket.socket is gevent.socket.socket except ImportError: return False @@ -909,8 +891,8 @@ def gevent_monkey_patched(): def eventlet_monkey_patched(): """Check if eventlet's monkey patching is active.""" import threading - return (threading.current_thread.__module__ == - 'eventlet.green.threading') + + return threading.current_thread.__module__ == "eventlet.green.threading" def is_greenthread_patched(): @@ -921,20 +903,19 @@ def disable_replication(client): """Disable replication on all secondaries.""" for host, port in client.secondaries: secondary = single_client(host, port) - secondary.admin.command('configureFailPoint', 'stopReplProducer', - mode='alwaysOn') + secondary.admin.command("configureFailPoint", "stopReplProducer", mode="alwaysOn") def enable_replication(client): """Enable replication on all secondaries.""" for host, port in client.secondaries: secondary = single_client(host, port) - secondary.admin.command('configureFailPoint', 'stopReplProducer', - mode='off') + secondary.admin.command("configureFailPoint", "stopReplProducer", mode="off") class ExceptionCatchingThread(threading.Thread): """A thread that stores any exception encountered from run().""" + def __init__(self, *args, **kwargs): self.exc = None super(ExceptionCatchingThread, self).__init__(*args, **kwargs) @@ -949,13 +930,14 @@ def run(self): def parse_read_preference(pref): # Make first letter lowercase to match read_pref's modes. - mode_string = pref.get('mode', 'primary') + mode_string = pref.get("mode", "primary") mode_string = mode_string[:1].lower() + mode_string[1:] mode = read_preferences.read_pref_mode_from_name(mode_string) - max_staleness = pref.get('maxStalenessSeconds', -1) - tag_sets = pref.get('tag_sets') + max_staleness = pref.get("maxStalenessSeconds", -1) + tag_sets = pref.get("tag_sets") return read_preferences.make_read_preference( - mode, tag_sets=tag_sets, max_staleness=max_staleness) + mode, tag_sets=tag_sets, max_staleness=max_staleness + ) def server_name_to_type(name): @@ -963,16 +945,16 @@ def server_name_to_type(name): # Special case, some tests in the spec include the PossiblePrimary # type, but only single-threaded drivers need that type. We call # possible primaries Unknown. - if name == 'PossiblePrimary': + if name == "PossiblePrimary": return SERVER_TYPE.Unknown return getattr(SERVER_TYPE, name) def cat_files(dest, *sources): """Cat multiple files into dest.""" - with open(dest, 'wb') as fdst: + with open(dest, "wb") as fdst: for src in sources: - with open(src, 'rb') as fsrc: + with open(src, "rb") as fsrc: shutil.copyfileobj(fsrc, fdst) @@ -982,65 +964,61 @@ def assertion_context(msg): try: yield except AssertionError as exc: - msg = '%s (%s)' % (exc, msg) + msg = "%s (%s)" % (exc, msg) exc_type, exc_val, exc_tb = sys.exc_info() assert exc_type is not None raise exc_type(exc_val).with_traceback(exc_tb) def parse_spec_options(opts): - if 'readPreference' in opts: - opts['read_preference'] = parse_read_preference( - opts.pop('readPreference')) + if "readPreference" in opts: + opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) - if 'writeConcern' in opts: - opts['write_concern'] = WriteConcern( - **dict(opts.pop('writeConcern'))) + if "writeConcern" in opts: + opts["write_concern"] = WriteConcern(**dict(opts.pop("writeConcern"))) - if 'readConcern' in opts: - opts['read_concern'] = ReadConcern( - **dict(opts.pop('readConcern'))) + if "readConcern" in opts: + opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) - if 'maxTimeMS' in opts: - opts['max_time_ms'] = opts.pop('maxTimeMS') + if "maxTimeMS" in opts: + opts["max_time_ms"] = opts.pop("maxTimeMS") - if 'maxCommitTimeMS' in opts: - opts['max_commit_time_ms'] = opts.pop('maxCommitTimeMS') + if "maxCommitTimeMS" in opts: + opts["max_commit_time_ms"] = opts.pop("maxCommitTimeMS") - if 'hint' in opts: - hint = opts.pop('hint') + if "hint" in opts: + hint = opts.pop("hint") if not isinstance(hint, str): hint = list(hint.items()) - opts['hint'] = hint + opts["hint"] = hint # Properly format 'hint' arguments for the Bulk API tests. - if 'requests' in opts: - reqs = opts.pop('requests') + if "requests" in opts: + reqs = opts.pop("requests") for req in reqs: - if 'name' in req: + if "name" in req: # CRUD v2 format - args = req.pop('arguments', {}) - if 'hint' in args: - hint = args.pop('hint') + args = req.pop("arguments", {}) + if "hint" in args: + hint = args.pop("hint") if not isinstance(hint, str): hint = list(hint.items()) - args['hint'] = hint - req['arguments'] = args + args["hint"] = hint + req["arguments"] = args else: # Unified test format bulk_model, spec = next(iter(req.items())) - if 'hint' in spec: - hint = spec.pop('hint') + if "hint" in spec: + hint = spec.pop("hint") if not isinstance(hint, str): hint = list(hint.items()) - spec['hint'] = hint - opts['requests'] = reqs + spec["hint"] = hint + opts["requests"] = reqs return dict(opts) -def prepare_spec_arguments(spec, arguments, opname, entity_map, - with_txn_callback): +def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callback): for arg_name in list(arguments): c2s = camel_to_snake(arg_name) # PyMongo accepts sort as list of tuples. @@ -1051,8 +1029,7 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, if arg_name == "fieldName": arguments["key"] = arguments.pop(arg_name) # Aggregate uses "batchSize", while find uses batch_size. - elif ((arg_name == "batchSize" or arg_name == "allowDiskUse") and - opname == "aggregate"): + elif (arg_name == "batchSize" or arg_name == "allowDiskUse") and opname == "aggregate": continue # Requires boolean returnDocument. elif arg_name == "returnDocument": @@ -1061,7 +1038,7 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, # Parse each request into a bulk write model. requests = [] for request in arguments["requests"]: - if 'name' in request: + if "name" in request: # CRUD v2 format bulk_model = camel_to_upper_camel(request["name"]) bulk_class = getattr(operations, bulk_model) @@ -1074,39 +1051,37 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, requests.append(bulk_class(**dict(bulk_arguments))) arguments["requests"] = requests elif arg_name == "session": - arguments['session'] = entity_map[arguments['session']] - elif (opname in ('command', 'run_admin_command') and - arg_name == 'command'): + arguments["session"] = entity_map[arguments["session"]] + elif opname in ("command", "run_admin_command") and arg_name == "command": # Ensure the first key is the command name. - ordered_command = SON([(spec['command_name'], 1)]) - ordered_command.update(arguments['command']) - arguments['command'] = ordered_command - elif opname == 'open_download_stream' and arg_name == 'id': - arguments['file_id'] = arguments.pop(arg_name) - elif opname != 'find' and c2s == 'max_time_ms': + ordered_command = SON([(spec["command_name"], 1)]) + ordered_command.update(arguments["command"]) + arguments["command"] = ordered_command + elif opname == "open_download_stream" and arg_name == "id": + arguments["file_id"] = arguments.pop(arg_name) + elif opname != "find" and c2s == "max_time_ms": # find is the only method that accepts snake_case max_time_ms. # All other methods take kwargs which must use the server's # camelCase maxTimeMS. See PYTHON-1855. - arguments['maxTimeMS'] = arguments.pop('max_time_ms') - elif opname == 'with_transaction' and arg_name == 'callback': - if 'operations' in arguments[arg_name]: + arguments["maxTimeMS"] = arguments.pop("max_time_ms") + elif opname == "with_transaction" and arg_name == "callback": + if "operations" in arguments[arg_name]: # CRUD v2 format - callback_ops = arguments[arg_name]['operations'] + callback_ops = arguments[arg_name]["operations"] else: # Unified test format callback_ops = arguments[arg_name] - arguments['callback'] = lambda _: with_txn_callback( - copy.deepcopy(callback_ops)) - elif opname == 'drop_collection' and arg_name == 'collection': - arguments['name_or_collection'] = arguments.pop(arg_name) - elif opname == 'create_collection': - if arg_name == 'collection': - arguments['name'] = arguments.pop(arg_name) + arguments["callback"] = lambda _: with_txn_callback(copy.deepcopy(callback_ops)) + elif opname == "drop_collection" and arg_name == "collection": + arguments["name_or_collection"] = arguments.pop(arg_name) + elif opname == "create_collection": + if arg_name == "collection": + arguments["name"] = arguments.pop(arg_name) # Any other arguments to create_collection are passed through # **kwargs. - elif opname == 'create_index' and arg_name == 'keys': - arguments['keys'] = list(arguments.pop(arg_name).items()) - elif opname == 'drop_index' and arg_name == 'name': - arguments['index_or_name'] = arguments.pop(arg_name) + elif opname == "create_index" and arg_name == "keys": + arguments["keys"] = list(arguments.pop(arg_name).items()) + elif opname == "drop_index" and arg_name == "name": + arguments["index_or_name"] = arguments.pop(arg_name) else: arguments[c2s] = arguments.pop(arg_name) diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index 76125b6f15..e693fc25f0 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -20,37 +20,37 @@ sys.path[0:0] = [""] +from test import unittest +from test.pymongo_mocks import DummyMonitor +from test.utils import MockPool, parse_read_preference + from bson import json_util -from pymongo.common import clean_node, HEARTBEAT_FREQUENCY +from pymongo.common import HEARTBEAT_FREQUENCY, clean_node from pymongo.errors import AutoReconnect, ConfigurationError from pymongo.hello import Hello, HelloCompat from pymongo.server_description import ServerDescription -from pymongo.settings import TopologySettings from pymongo.server_selectors import writable_server_selector +from pymongo.settings import TopologySettings from pymongo.topology import Topology -from test import unittest -from test.utils import MockPool, parse_read_preference -from test.pymongo_mocks import DummyMonitor def get_addresses(server_list): seeds = [] hosts = [] for server in server_list: - seeds.append(clean_node(server['address'])) - hosts.append(server['address']) + seeds.append(clean_node(server["address"])) + hosts.append(server["address"]) return seeds, hosts def make_last_write_date(server): epoch = datetime.datetime.utcfromtimestamp(0) - millis = server.get('lastWrite', {}).get('lastWriteDate') + millis = server.get("lastWrite", {}).get("lastWriteDate") if millis: diff = ((millis % 1000) + 1000) % 1000 seconds = (millis - diff) / 1000 micros = diff * 1000 - return epoch + datetime.timedelta( - seconds=seconds, microseconds=micros) + return epoch + datetime.timedelta(seconds=seconds, microseconds=micros) else: # "Unknown" server. return epoch @@ -58,61 +58,59 @@ def make_last_write_date(server): def make_server_description(server, hosts): """Make a ServerDescription from server info in a JSON test.""" - server_type = server['type'] + server_type = server["type"] if server_type in ("Unknown", "PossiblePrimary"): - return ServerDescription(clean_node(server['address']), Hello({})) + return ServerDescription(clean_node(server["address"]), Hello({})) - hello_response = {'ok': True, 'hosts': hosts} + hello_response = {"ok": True, "hosts": hosts} if server_type not in ("Standalone", "Mongos", "RSGhost"): - hello_response['setName'] = "rs" + hello_response["setName"] = "rs" if server_type == "RSPrimary": hello_response[HelloCompat.LEGACY_CMD] = True elif server_type == "RSSecondary": - hello_response['secondary'] = True + hello_response["secondary"] = True elif server_type == "Mongos": - hello_response['msg'] = 'isdbgrid' + hello_response["msg"] = "isdbgrid" elif server_type == "RSGhost": - hello_response['isreplicaset'] = True + hello_response["isreplicaset"] = True elif server_type == "RSArbiter": - hello_response['arbiterOnly'] = True + hello_response["arbiterOnly"] = True - hello_response['lastWrite'] = { - 'lastWriteDate': make_last_write_date(server) - } + hello_response["lastWrite"] = {"lastWriteDate": make_last_write_date(server)} - for field in 'maxWireVersion', 'tags', 'idleWritePeriodMillis': + for field in "maxWireVersion", "tags", "idleWritePeriodMillis": if field in server: hello_response[field] = server[field] - hello_response.setdefault('maxWireVersion', 6) + hello_response.setdefault("maxWireVersion", 6) # Sets _last_update_time to now. - sd = ServerDescription(clean_node(server['address']), - Hello(hello_response), - round_trip_time=server['avg_rtt_ms'] / 1000.0) + sd = ServerDescription( + clean_node(server["address"]), + Hello(hello_response), + round_trip_time=server["avg_rtt_ms"] / 1000.0, + ) - if 'lastUpdateTime' in server: - sd._last_update_time = server['lastUpdateTime'] / 1000.0 # ms to sec. + if "lastUpdateTime" in server: + sd._last_update_time = server["lastUpdateTime"] / 1000.0 # ms to sec. return sd def get_topology_type_name(scenario_def): - td = scenario_def['topology_description'] - name = td['type'] - if name == 'Unknown': + td = scenario_def["topology_description"] + name = td["type"] + if name == "Unknown": # PyMongo never starts a topology in type Unknown. - return 'Sharded' if len(td['servers']) > 1 else 'Single' + return "Sharded" if len(td["servers"]) > 1 else "Single" else: return name def get_topology_settings_dict(**kwargs): settings = dict( - monitor_class=DummyMonitor, - heartbeat_frequency=HEARTBEAT_FREQUENCY, - pool_class=MockPool + monitor_class=DummyMonitor, heartbeat_frequency=HEARTBEAT_FREQUENCY, pool_class=MockPool ) settings.update(kwargs) return settings @@ -120,25 +118,20 @@ def get_topology_settings_dict(**kwargs): def create_topology(scenario_def, **kwargs): # Initialize topologies. - if 'heartbeatFrequencyMS' in scenario_def: - frequency = int(scenario_def['heartbeatFrequencyMS']) / 1000.0 + if "heartbeatFrequencyMS" in scenario_def: + frequency = int(scenario_def["heartbeatFrequencyMS"]) / 1000.0 else: frequency = HEARTBEAT_FREQUENCY - seeds, hosts = get_addresses( - scenario_def['topology_description']['servers']) + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) topology_type = get_topology_type_name(scenario_def) - if topology_type == 'LoadBalanced': - kwargs.setdefault('load_balanced', True) + if topology_type == "LoadBalanced": + kwargs.setdefault("load_balanced", True) # Force topology description to ReplicaSet - elif topology_type in ['ReplicaSetNoPrimary', 'ReplicaSetWithPrimary']: - kwargs.setdefault('replica_set_name', 'rs') - settings = get_topology_settings_dict( - heartbeat_frequency=frequency, - seeds=seeds, - **kwargs - ) + elif topology_type in ["ReplicaSetNoPrimary", "ReplicaSetWithPrimary"]: + kwargs.setdefault("replica_set_name", "rs") + settings = get_topology_settings_dict(heartbeat_frequency=frequency, seeds=seeds, **kwargs) # "Eligible servers" is defined in the server selection spec as # the set of servers matching both the ReadPreference's mode @@ -147,21 +140,21 @@ def create_topology(scenario_def, **kwargs): topology.open() # Update topologies with server descriptions. - for server in scenario_def['topology_description']['servers']: + for server in scenario_def["topology_description"]["servers"]: server_description = make_server_description(server, hosts) topology.on_change(server_description) # Assert that descriptions match - assert (scenario_def['topology_description']['type'] == - topology.description.topology_type_name), topology.description.topology_type_name + assert ( + scenario_def["topology_description"]["type"] == topology.description.topology_type_name + ), topology.description.topology_type_name return topology def create_test(scenario_def): def run_scenario(self): - _, hosts = get_addresses( - scenario_def['topology_description']['servers']) + _, hosts = get_addresses(scenario_def["topology_description"]["servers"]) # "Eligible servers" is defined in the server selection spec as # the set of servers matching both the ReadPreference's mode # and tag sets. @@ -170,16 +163,15 @@ def run_scenario(self): # "In latency window" is defined in the server selection # spec as the subset of suitable_servers that falls within the # allowable latency window. - top_suitable = create_topology( - scenario_def, local_threshold_ms=1000000) + top_suitable = create_topology(scenario_def, local_threshold_ms=1000000) # Create server selector. if scenario_def.get("operation") == "write": pref = writable_server_selector else: # Make first letter lowercase to match read_pref's modes. - pref_def = scenario_def['read_preference'] - if scenario_def.get('error'): + pref_def = scenario_def["read_preference"] + if scenario_def.get("error"): with self.assertRaises((ConfigurationError, ValueError)): # Error can be raised when making Read Pref or selecting. pref = parse_read_preference(pref_def) @@ -189,35 +181,33 @@ def run_scenario(self): pref = parse_read_preference(pref_def) # Select servers. - if not scenario_def.get('suitable_servers'): + if not scenario_def.get("suitable_servers"): with self.assertRaises(AutoReconnect): top_suitable.select_server(pref, server_selection_timeout=0) return - if not scenario_def['in_latency_window']: + if not scenario_def["in_latency_window"]: with self.assertRaises(AutoReconnect): top_latency.select_server(pref, server_selection_timeout=0) return - actual_suitable_s = top_suitable.select_servers( - pref, server_selection_timeout=0) - actual_latency_s = top_latency.select_servers( - pref, server_selection_timeout=0) + actual_suitable_s = top_suitable.select_servers(pref, server_selection_timeout=0) + actual_latency_s = top_latency.select_servers(pref, server_selection_timeout=0) expected_suitable_servers = {} - for server in scenario_def['suitable_servers']: + for server in scenario_def["suitable_servers"]: server_description = make_server_description(server, hosts) - expected_suitable_servers[server['address']] = server_description + expected_suitable_servers[server["address"]] = server_description actual_suitable_servers = {} for s in actual_suitable_s: - actual_suitable_servers["%s:%d" % (s.description.address[0], - s.description.address[1])] = s.description + actual_suitable_servers[ + "%s:%d" % (s.description.address[0], s.description.address[1]) + ] = s.description - self.assertEqual(len(actual_suitable_servers), - len(expected_suitable_servers)) + self.assertEqual(len(actual_suitable_servers), len(expected_suitable_servers)) for k, actual in actual_suitable_servers.items(): expected = expected_suitable_servers[k] self.assertEqual(expected.address, actual.address) @@ -227,18 +217,17 @@ def run_scenario(self): self.assertEqual(expected.all_hosts, actual.all_hosts) expected_latency_servers = {} - for server in scenario_def['in_latency_window']: + for server in scenario_def["in_latency_window"]: server_description = make_server_description(server, hosts) - expected_latency_servers[server['address']] = server_description + expected_latency_servers[server["address"]] = server_description actual_latency_servers = {} for s in actual_latency_s: - actual_latency_servers["%s:%d" % - (s.description.address[0], - s.description.address[1])] = s.description + actual_latency_servers[ + "%s:%d" % (s.description.address[0], s.description.address[1]) + ] = s.description - self.assertEqual(len(actual_latency_servers), - len(expected_latency_servers)) + self.assertEqual(len(actual_latency_servers), len(expected_latency_servers)) for k, actual in actual_latency_servers.items(): expected = expected_latency_servers[k] self.assertEqual(expected.address, actual.address) @@ -256,7 +245,7 @@ class TestAllScenarios(unittest.TestCase): for dirpath, _, filenames in os.walk(test_dir): dirname = os.path.split(dirpath) - dirname = os.path.split(dirname[-2])[-1] + '_' + dirname[-1] + dirname = os.path.split(dirname[-2])[-1] + "_" + dirname[-1] for filename in filenames: if os.path.splitext(filename)[1] != ".json": @@ -266,8 +255,7 @@ class TestAllScenarios(unittest.TestCase): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = 'test_%s_%s' % ( - dirname, os.path.splitext(filename)[0]) + test_name = "test_%s_%s" % (dirname, os.path.splitext(filename)[0]) new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 8a53a365db..4a71fef328 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -16,41 +16,36 @@ import functools import threading - from collections import abc +from test import IntegrationTest, client_context, client_knobs +from test.utils import ( + CMAPListener, + CompareType, + EventListener, + OvertCommandListener, + ServerAndTopologyEventListener, + camel_to_snake, + camel_to_snake_args, + parse_spec_options, + prepare_spec_arguments, + rs_client, +) from typing import List from bson import decode, encode from bson.binary import Binary from bson.int64 import Int64 from bson.son import SON - from gridfs import GridFSBucket - from pymongo import client_session from pymongo.command_cursor import CommandCursor from pymongo.cursor import Cursor -from pymongo.errors import (BulkWriteError, - OperationFailure, - PyMongoError) +from pymongo.errors import BulkWriteError, OperationFailure, PyMongoError from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference -from pymongo.results import _WriteResult, BulkWriteResult +from pymongo.results import BulkWriteResult, _WriteResult from pymongo.write_concern import WriteConcern -from test import (client_context, - client_knobs, - IntegrationTest) -from test.utils import (EventListener, camel_to_snake, - camel_to_snake_args, - CompareType, - CMAPListener, - OvertCommandListener, - parse_spec_options, - prepare_spec_arguments, - rs_client, - ServerAndTopologyEventListener) - class SpecRunnerThread(threading.Thread): def __init__(self, name): @@ -74,7 +69,7 @@ def stop(self): def run(self): while not self.stopped or self.ops: - if not self. ops: + if not self.ops: with self.cond: self.cond.wait(10) if self.ops: @@ -97,8 +92,7 @@ def setUpClass(cls): cls.mongos_clients = [] # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(heartbeat_frequency=0.1, - min_heartbeat_interval=0.1) + cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() @classmethod @@ -115,7 +109,7 @@ def setUp(self): self.maxDiff = None def _set_fail_point(self, client, command_args): - cmd = SON([('configureFailPoint', 'failCommand')]) + cmd = SON([("configureFailPoint", "failCommand")]) cmd.update(command_args) client.admin.command(cmd) @@ -132,7 +126,7 @@ def targeted_fail_point(self, session, fail_point): clients = {c.address: c for c in self.mongos_clients} client = clients[session._pinned_address] self._set_fail_point(client, fail_point) - self.addCleanup(self.set_fail_point, {'mode': 'off'}) + self.addCleanup(self.set_fail_point, {"mode": "off"}) def assert_session_pinned(self, session): """Run the assertSessionPinned test operation. @@ -162,12 +156,12 @@ def assert_collection_not_exists(self, database, collection): def assert_index_exists(self, database, collection, index): """Run the assertIndexExists test operation.""" coll = self.client[database][collection] - self.assertIn(index, [doc['name'] for doc in coll.list_indexes()]) + self.assertIn(index, [doc["name"] for doc in coll.list_indexes()]) def assert_index_not_exists(self, database, collection, index): """Run the assertIndexNotExists test operation.""" coll = self.client[database][collection] - self.assertNotIn(index, [doc['name'] for doc in coll.list_indexes()]) + self.assertNotIn(index, [doc["name"] for doc in coll.list_indexes()]) def assertErrorLabelsContain(self, exc, expected_labels): labels = [l for l in expected_labels if exc.has_error_label(l)] @@ -176,14 +170,14 @@ def assertErrorLabelsContain(self, exc, expected_labels): def assertErrorLabelsOmit(self, exc, omit_labels): for label in omit_labels: self.assertFalse( - exc.has_error_label(label), - msg='error labels should not contain %s' % (label,)) + exc.has_error_label(label), msg="error labels should not contain %s" % (label,) + ) def kill_all_sessions(self): clients = self.mongos_clients if self.mongos_clients else [self.client] for client in clients: try: - client.admin.command('killAllSessions', []) + client.admin.command("killAllSessions", []) except OperationFailure: # "operation was interrupted" by killing the command's # own session. @@ -205,8 +199,7 @@ def check_result(self, expected_result, result): for res in expected_result: prop = camel_to_snake(res) # SPEC-869: Only BulkWriteResult has upserted_count. - if (prop == "upserted_count" - and not isinstance(result, BulkWriteResult)): + if prop == "upserted_count" and not isinstance(result, BulkWriteResult): if result.upserted_id is not None: upserted_count = 1 else: @@ -215,8 +208,7 @@ def check_result(self, expected_result, result): elif prop == "inserted_ids": # BulkWriteResult does not have inserted_ids. if isinstance(result, BulkWriteResult): - self.assertEqual(len(expected_result[res]), - result.inserted_count) + self.assertEqual(len(expected_result[res]), result.inserted_count) else: # InsertManyResult may be compared to [id1] from the # crud spec or {"0": id1} from the retryable write spec. @@ -233,8 +225,7 @@ def check_result(self, expected_result, result): expected_ids[int(str_index)] = ids[str_index] self.assertEqual(expected_ids, result.upserted_ids, prop) else: - self.assertEqual( - getattr(result, prop), expected_result[res], prop) + self.assertEqual(getattr(result, prop), expected_result[res], prop) return True else: @@ -245,7 +236,7 @@ def get_object_name(self, op): Transaction spec says 'object' is required. """ - return op['object'] + return op["object"] @staticmethod def parse_options(opts): @@ -253,54 +244,54 @@ def parse_options(opts): def run_operation(self, sessions, collection, operation): original_collection = collection - name = camel_to_snake(operation['name']) - if name == 'run_command': - name = 'command' - elif name == 'download_by_name': - name = 'open_download_stream_by_name' - elif name == 'download': - name = 'open_download_stream' - elif name == 'map_reduce': - self.skipTest('PyMongo does not support mapReduce') - elif name == 'count': - self.skipTest('PyMongo does not support count') + name = camel_to_snake(operation["name"]) + if name == "run_command": + name = "command" + elif name == "download_by_name": + name = "open_download_stream_by_name" + elif name == "download": + name = "open_download_stream" + elif name == "map_reduce": + self.skipTest("PyMongo does not support mapReduce") + elif name == "count": + self.skipTest("PyMongo does not support count") database = collection.database collection = database.get_collection(collection.name) - if 'collectionOptions' in operation: + if "collectionOptions" in operation: collection = collection.with_options( - **self.parse_options(operation['collectionOptions'])) + **self.parse_options(operation["collectionOptions"]) + ) object_name = self.get_object_name(operation) - if object_name == 'gridfsbucket': + if object_name == "gridfsbucket": # Only create the GridFSBucket when we need it (for the gridfs # retryable reads tests). obj = GridFSBucket(database, bucket_name=collection.name) else: objects = { - 'client': database.client, - 'database': database, - 'collection': collection, - 'testRunner': self + "client": database.client, + "database": database, + "collection": collection, + "testRunner": self, } objects.update(sessions) obj = objects[object_name] # Combine arguments with options and handle special cases. - arguments = operation.get('arguments', {}) + arguments = operation.get("arguments", {}) arguments.update(arguments.pop("options", {})) self.parse_options(arguments) cmd = getattr(obj, name) with_txn_callback = functools.partial( - self.run_operations, sessions, original_collection, - in_with_transaction=True) - prepare_spec_arguments(operation, arguments, name, sessions, - with_txn_callback) + self.run_operations, sessions, original_collection, in_with_transaction=True + ) + prepare_spec_arguments(operation, arguments, name, sessions, with_txn_callback) - if name == 'run_on_thread': - args = {'sessions': sessions, 'collection': collection} + if name == "run_on_thread": + args = {"sessions": sessions, "collection": collection} args.update(arguments) arguments = args result = cmd(**dict(arguments)) @@ -313,10 +304,10 @@ def run_operation(self, sessions, collection, operation): if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]: # Read from the primary to ensure causal consistency. out = collection.database.get_collection( - arguments["pipeline"][-1]["$out"], - read_preference=ReadPreference.PRIMARY) + arguments["pipeline"][-1]["$out"], read_preference=ReadPreference.PRIMARY + ) return out.find() - if 'download' in name: + if "download" in name: result = Binary(result.read()) if isinstance(result, Cursor) or isinstance(result, CommandCursor): @@ -329,10 +320,9 @@ def allowable_errors(self, op): return (PyMongoError,) def _run_op(self, sessions, collection, op, in_with_transaction): - expected_result = op.get('result') + expected_result = op.get("result") if expect_error(op): - with self.assertRaises(self.allowable_errors(op), - msg=op['name']) as context: + with self.assertRaises(self.allowable_errors(op), msg=op["name"]) as context: self.run_operation(sessions, collection, op.copy()) if expect_error_message(expected_result): @@ -340,19 +330,17 @@ def _run_op(self, sessions, collection, op, in_with_transaction): errmsg = str(context.exception.details).lower() else: errmsg = str(context.exception).lower() - self.assertIn(expected_result['errorContains'].lower(), - errmsg) + self.assertIn(expected_result["errorContains"].lower(), errmsg) if expect_error_code(expected_result): - self.assertEqual(expected_result['errorCodeName'], - context.exception.details.get('codeName')) + self.assertEqual( + expected_result["errorCodeName"], context.exception.details.get("codeName") + ) if expect_error_labels_contain(expected_result): self.assertErrorLabelsContain( - context.exception, - expected_result['errorLabelsContain']) + context.exception, expected_result["errorLabelsContain"] + ) if expect_error_labels_omit(expected_result): - self.assertErrorLabelsOmit( - context.exception, - expected_result['errorLabelsOmit']) + self.assertErrorLabelsOmit(context.exception, expected_result["errorLabelsOmit"]) # Reraise the exception if we're in the with_transaction # callback. @@ -360,65 +348,61 @@ def _run_op(self, sessions, collection, op, in_with_transaction): raise context.exception else: result = self.run_operation(sessions, collection, op.copy()) - if 'result' in op: - if op['name'] == 'runCommand': + if "result" in op: + if op["name"] == "runCommand": self.check_command_result(expected_result, result) else: self.check_result(expected_result, result) - def run_operations(self, sessions, collection, ops, - in_with_transaction=False): + def run_operations(self, sessions, collection, ops, in_with_transaction=False): for op in ops: self._run_op(sessions, collection, op, in_with_transaction) # TODO: factor with test_command_monitoring.py def check_events(self, test, listener, session_ids): res = listener.results - if not len(test['expectations']): + if not len(test["expectations"]): return # Give a nicer message when there are missing or extra events - cmds = decode_raw([event.command for event in res['started']]) - self.assertEqual( - len(res['started']), len(test['expectations']), cmds) - for i, expectation in enumerate(test['expectations']): + cmds = decode_raw([event.command for event in res["started"]]) + self.assertEqual(len(res["started"]), len(test["expectations"]), cmds) + for i, expectation in enumerate(test["expectations"]): event_type = next(iter(expectation)) - event = res['started'][i] + event = res["started"][i] # The tests substitute 42 for any number other than 0. - if (event.command_name == 'getMore' - and event.command['getMore']): - event.command['getMore'] = Int64(42) - elif event.command_name == 'killCursors': - event.command['cursors'] = [Int64(42)] - elif event.command_name == 'update': + if event.command_name == "getMore" and event.command["getMore"]: + event.command["getMore"] = Int64(42) + elif event.command_name == "killCursors": + event.command["cursors"] = [Int64(42)] + elif event.command_name == "update": # TODO: remove this once PYTHON-1744 is done. # Add upsert and multi fields back into expectations. - updates = expectation[event_type]['command']['updates'] + updates = expectation[event_type]["command"]["updates"] for update in updates: - update.setdefault('upsert', False) - update.setdefault('multi', False) + update.setdefault("upsert", False) + update.setdefault("multi", False) # Replace afterClusterTime: 42 with actual afterClusterTime. - expected_cmd = expectation[event_type]['command'] - expected_read_concern = expected_cmd.get('readConcern') + expected_cmd = expectation[event_type]["command"] + expected_read_concern = expected_cmd.get("readConcern") if expected_read_concern is not None: - time = expected_read_concern.get('afterClusterTime') + time = expected_read_concern.get("afterClusterTime") if time == 42: - actual_time = event.command.get( - 'readConcern', {}).get('afterClusterTime') + actual_time = event.command.get("readConcern", {}).get("afterClusterTime") if actual_time is not None: - expected_read_concern['afterClusterTime'] = actual_time + expected_read_concern["afterClusterTime"] = actual_time - recovery_token = expected_cmd.get('recoveryToken') + recovery_token = expected_cmd.get("recoveryToken") if recovery_token == 42: - expected_cmd['recoveryToken'] = CompareType(dict) + expected_cmd["recoveryToken"] = CompareType(dict) # Replace lsid with a name like "session0" to match test. - if 'lsid' in event.command: + if "lsid" in event.command: for name, lsid in session_ids.items(): - if event.command['lsid'] == lsid: - event.command['lsid'] = name + if event.command["lsid"] == lsid: + event.command["lsid"] = name break for attr, expected in expectation[event_type].items(): @@ -428,28 +412,27 @@ def check_events(self, test, listener, session_ids): for key, val in expected.items(): if val is None: if key in actual: - self.fail("Unexpected key [%s] in %r" % ( - key, actual)) + self.fail("Unexpected key [%s] in %r" % (key, actual)) elif key not in actual: - self.fail("Expected key [%s] in %r" % ( - key, actual)) + self.fail("Expected key [%s] in %r" % (key, actual)) else: - self.assertEqual(val, decode_raw(actual[key]), - "Key [%s] in %s" % (key, actual)) + self.assertEqual( + val, decode_raw(actual[key]), "Key [%s] in %s" % (key, actual) + ) else: self.assertEqual(actual, expected) def maybe_skip_scenario(self, test): - if test.get('skipReason'): - self.skipTest(test.get('skipReason')) + if test.get("skipReason"): + self.skipTest(test.get("skipReason")) def get_scenario_db_name(self, scenario_def): """Allow subclasses to override a test's database name.""" - return scenario_def['database_name'] + return scenario_def["database_name"] def get_scenario_coll_name(self, scenario_def): """Allow subclasses to override a test's collection name.""" - return scenario_def['collection_name'] + return scenario_def["collection_name"] def get_outcome_coll_name(self, outcome, collection): """Allow subclasses to override outcome collection.""" @@ -458,7 +441,7 @@ def get_outcome_coll_name(self, outcome, collection): def run_test_ops(self, sessions, collection, test): """Added to allow retryable writes spec to override a test's operation.""" - self.run_operations(sessions, collection, test['operations']) + self.run_operations(sessions, collection, test["operations"]) def parse_client_options(self, opts): """Allow encryption spec to override a clientOptions parsing.""" @@ -470,14 +453,13 @@ def setup_scenario(self, scenario_def): """Allow specs to override a test's setup.""" db_name = self.get_scenario_db_name(scenario_def) coll_name = self.get_scenario_coll_name(scenario_def) - db = client_context.client.get_database( - db_name, write_concern=WriteConcern(w='majority')) + db = client_context.client.get_database(db_name, write_concern=WriteConcern(w="majority")) coll = db[coll_name] coll.drop() db.create_collection(coll_name) - if scenario_def['data']: + if scenario_def["data"]: # Load data. - coll.insert_many(scenario_def['data']) + coll.insert_many(scenario_def["data"]) def run_scenario(self, scenario_def, test): self.maybe_skip_scenario(test) @@ -495,22 +477,22 @@ def run_scenario(self, scenario_def, test): c[database_name][collection_name].distinct("x") # Configure the fail point before creating the client. - if 'failPoint' in test: - fp = test['failPoint'] + if "failPoint" in test: + fp = test["failPoint"] self.set_fail_point(fp) - self.addCleanup(self.set_fail_point, { - 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off'}) + self.addCleanup( + self.set_fail_point, {"configureFailPoint": fp["configureFailPoint"], "mode": "off"} + ) listener = OvertCommandListener() pool_listener = CMAPListener() server_listener = ServerAndTopologyEventListener() # Create a new client, to avoid interference from pooled sessions. - client_options = self.parse_client_options(test['clientOptions']) + client_options = self.parse_client_options(test["clientOptions"]) # MMAPv1 does not support retryable writes. - if (client_options.get('retryWrites') is True and - client_context.storage_engine == 'mmapv1'): + if client_options.get("retryWrites") is True and client_context.storage_engine == "mmapv1": self.skipTest("MMAPv1 does not support retryWrites=True") - use_multi_mongos = test['useMultipleMongoses'] + use_multi_mongos = test["useMultipleMongoses"] host = None if use_multi_mongos: if client_context.load_balancer or client_context.serverless: @@ -518,9 +500,8 @@ def run_scenario(self, scenario_def, test): elif client_context.is_mongos: host = client_context.mongos_seeds() client = rs_client( - h=host, - event_listeners=[listener, pool_listener, server_listener], - **client_options) + h=host, event_listeners=[listener, pool_listener, server_listener], **client_options + ) self.scenario_client = client self.listener = listener self.pool_listener = pool_listener @@ -536,13 +517,12 @@ def run_scenario(self, scenario_def, test): # the running server version. if not client_context.sessions_enabled: break - session_name = 'session%d' % i - opts = camel_to_snake_args(test['sessionOptions'][session_name]) - if 'default_transaction_options' in opts: - txn_opts = self.parse_options( - opts['default_transaction_options']) + session_name = "session%d" % i + opts = camel_to_snake_args(test["sessionOptions"][session_name]) + if "default_transaction_options" in opts: + txn_opts = self.parse_options(opts["default_transaction_options"]) txn_opts = client_session.TransactionOptions(**txn_opts) - opts['default_transaction_options'] = txn_opts + opts["default_transaction_options"] = txn_opts s = client.start_session(**dict(opts)) @@ -560,74 +540,74 @@ def run_scenario(self, scenario_def, test): self.check_events(test, listener, session_ids) # Disable fail points. - if 'failPoint' in test: - fp = test['failPoint'] - self.set_fail_point({ - 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off'}) + if "failPoint" in test: + fp = test["failPoint"] + self.set_fail_point({"configureFailPoint": fp["configureFailPoint"], "mode": "off"}) # Assert final state is expected. - outcome = test['outcome'] - expected_c = outcome.get('collection') + outcome = test["outcome"] + expected_c = outcome.get("collection") if expected_c is not None: - outcome_coll_name = self.get_outcome_coll_name( - outcome, collection) + outcome_coll_name = self.get_outcome_coll_name(outcome, collection) # Read from the primary with local read concern to ensure causal # consistency. - outcome_coll = client_context.client[ - collection.database.name].get_collection( + outcome_coll = client_context.client[collection.database.name].get_collection( outcome_coll_name, read_preference=ReadPreference.PRIMARY, - read_concern=ReadConcern('local')) - actual_data = list(outcome_coll.find(sort=[('_id', 1)])) + read_concern=ReadConcern("local"), + ) + actual_data = list(outcome_coll.find(sort=[("_id", 1)])) # The expected data needs to be the left hand side here otherwise # CompareType(Binary) doesn't work. - self.assertEqual(wrap_types(expected_c['data']), actual_data) + self.assertEqual(wrap_types(expected_c["data"]), actual_data) def expect_any_error(op): if isinstance(op, dict): - return op.get('error') + return op.get("error") return False def expect_error_message(expected_result): if isinstance(expected_result, dict): - return isinstance(expected_result['errorContains'], str) + return isinstance(expected_result["errorContains"], str) return False def expect_error_code(expected_result): if isinstance(expected_result, dict): - return expected_result['errorCodeName'] + return expected_result["errorCodeName"] return False def expect_error_labels_contain(expected_result): if isinstance(expected_result, dict): - return expected_result['errorLabelsContain'] + return expected_result["errorLabelsContain"] return False def expect_error_labels_omit(expected_result): if isinstance(expected_result, dict): - return expected_result['errorLabelsOmit'] + return expected_result["errorLabelsOmit"] return False def expect_error(op): - expected_result = op.get('result') - return (expect_any_error(op) or - expect_error_message(expected_result) - or expect_error_code(expected_result) - or expect_error_labels_contain(expected_result) - or expect_error_labels_omit(expected_result)) + expected_result = op.get("result") + return ( + expect_any_error(op) + or expect_error_message(expected_result) + or expect_error_code(expected_result) + or expect_error_labels_contain(expected_result) + or expect_error_labels_omit(expected_result) + ) def end_sessions(sessions): @@ -639,13 +619,13 @@ def end_sessions(sessions): def decode_raw(val): """Decode RawBSONDocuments in the given container.""" if isinstance(val, (list, abc.Mapping)): - return decode(encode({'v': val}))['v'] + return decode(encode({"v": val}))["v"] return val TYPES = { - 'binData': Binary, - 'long': Int64, + "binData": Binary, + "long": Int64, } @@ -654,7 +634,7 @@ def wrap_types(val): if isinstance(val, list): return [wrap_types(v) for v in val] if isinstance(val, abc.Mapping): - typ = val.get('$$type') + typ = val.get("$$type") if typ: return CompareType(TYPES[typ]) d = {} diff --git a/test/version.py b/test/version.py index 3348060bfc..e102db7111 100644 --- a/test/version.py +++ b/test/version.py @@ -16,7 +16,6 @@ class Version(tuple): - def __new__(cls, *version): padded_version = cls._padded(version, 4) return super(Version, cls).__new__(cls, tuple(padded_version)) @@ -43,16 +42,15 @@ def from_string(cls, version_string): version_string = version_string[0:-1] mod = -1 # Deal with '-rcX' substrings - if '-rc' in version_string: - version_string = version_string[0:version_string.find('-rc')] + if "-rc" in version_string: + version_string = version_string[0 : version_string.find("-rc")] mod = -1 # Deal with git describe generated substrings - elif '-' in version_string: - version_string = version_string[0:version_string.find('-')] + elif "-" in version_string: + version_string = version_string[0 : version_string.find("-")] mod = -1 bump_patch_level = True - version = [int(part) for part in version_string.split(".")] version = cls._padded(version, 3) # Make from_string and from_version_array agree. For example: @@ -77,9 +75,9 @@ def from_version_array(cls, version_array): @classmethod def from_client(cls, client): info = client.server_info() - if 'versionArray' in info: - return cls.from_version_array(info['versionArray']) - return cls.from_string(info['version']) + if "versionArray" in info: + return cls.from_version_array(info["versionArray"]) + return cls.from_string(info["version"]) def at_least(self, *other_version): return self >= Version(*other_version) diff --git a/tools/clean.py b/tools/clean.py index 53729d6406..7196b00e90 100644 --- a/tools/clean.py +++ b/tools/clean.py @@ -34,12 +34,14 @@ try: from pymongo import _cmessage # type: ignore[attr-defined] + sys.exit("could still import _cmessage") except ImportError: pass try: from bson import _cbson + sys.exit("could still import _cbson") except ImportError: pass diff --git a/tools/fail_if_no_c.py b/tools/fail_if_no_c.py index e6fd83a36b..a2d4954789 100644 --- a/tools/fail_if_no_c.py +++ b/tools/fail_if_no_c.py @@ -18,6 +18,7 @@ """ import sys + sys.path[0:0] = [""] import bson diff --git a/tools/ocsptest.py b/tools/ocsptest.py index 149da000ba..14df8a8fe3 100644 --- a/tools/ocsptest.py +++ b/tools/ocsptest.py @@ -21,18 +21,20 @@ # Enable logs in this format: # 2020-06-08 23:49:35,982 DEBUG ocsp_support Peer did not staple an OCSP response -FORMAT = '%(asctime)s %(levelname)s %(module)s %(message)s' +FORMAT = "%(asctime)s %(levelname)s %(module)s %(message)s" logging.basicConfig(format=FORMAT, level=logging.DEBUG) + def check_ocsp(host, port, capath): ctx = get_ssl_context( - None, # certfile - None, # passphrase + None, # certfile + None, # passphrase capath, # ca_certs - None, # crlfile - False, # allow_invalid_certificates - False, # allow_invalid_hostnames - False) # disable_ocsp_endpoint_check + None, # crlfile + False, # allow_invalid_certificates + False, # allow_invalid_hostnames + False, + ) # disable_ocsp_endpoint_check # Ensure we're using pyOpenSSL. assert isinstance(ctx, SSLContext) @@ -44,18 +46,15 @@ def check_ocsp(host, port, capath): finally: s.close() + def main(): - parser = argparse.ArgumentParser( - description='Debug OCSP') - parser.add_argument( - '--host', type=str, required=True, help="Host to connect to") - parser.add_argument( - '-p', '--port', type=int, default=443, help="Port to connect to") - parser.add_argument( - '--ca_file', type=str, default=None, help="CA file for host") + parser = argparse.ArgumentParser(description="Debug OCSP") + parser.add_argument("--host", type=str, required=True, help="Host to connect to") + parser.add_argument("-p", "--port", type=int, default=443, help="Port to connect to") + parser.add_argument("--ca_file", type=str, default=None, help="CA file for host") args = parser.parse_args() check_ocsp(args.host, args.port, args.ca_file) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() From b7057ecf9f5f1591ffa0ba2b5d716cc8f1ad3068 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 9 Feb 2022 14:24:23 -0600 Subject: [PATCH 0066/1588] PYTHON-1834 (cont) Add pre-commit config (#853) --- .git-blame-ignore-revs | 2 ++ .github/workflows/test-python.yml | 12 +++++++++ .pre-commit-config.yaml | 31 ++++++++++++++++++++++++ CONTRIBUTING.rst | 19 ++++++++++++++- THIRD-PARTY-NOTICES | 1 - doc/api/pymongo/event_loggers.rst | 2 +- doc/api/pymongo/topology_description.rst | 1 - doc/atlas.rst | 1 - doc/examples/server_selection.rst | 2 +- doc/migrate-to-pymongo4.rst | 2 +- doc/tools.rst | 16 ++++++------ test/certificates/ca.pem | 2 +- 12 files changed, 75 insertions(+), 16 deletions(-) create mode 100644 .git-blame-ignore-revs create mode 100644 .pre-commit-config.yaml diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..8f02673e41 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,2 @@ +# Initial pre-commit reformat +5578999a90e439fbca06fc0ffc98f4d04e96f7b4 diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index ca1845e2cd..651f863d89 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -5,6 +5,18 @@ on: pull_request: jobs: + + pre-commit: + name: pre-commit + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - uses: pre-commit/action@v2.0.0 + with: + extra_args: --all-files --hook-stage=manual + build: # supercharge/mongodb-github-action requires containers so we don't test other platforms runs-on: ${{ matrix.os }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..39062bbdf5 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,31 @@ + +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.4.0 + hooks: + - id: check-added-large-files + - id: check-case-conflict + - id: check-toml + - id: check-yaml + - id: debug-statements + - id: end-of-file-fixer + exclude: WHEEL + exclude_types: [json] + - id: forbid-new-submodules + - id: trailing-whitespace + exclude: .patch + exclude_types: [json] + +- repo: https://github.com/psf/black + rev: 22.1.0 + hooks: + - id: black + files: \.py$ + args: [--line-length=100] + +- repo: https://github.com/PyCQA/isort + rev: 5.7.0 + hooks: + - id: isort + files: \.py$ + args: [--profile=black] diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 40dca00e0c..bbc22954a0 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -38,6 +38,23 @@ General Guidelines from the cmd line to run the test suite). - Add yourself to doc/contributors.rst :) +Running Linters +--------------- + +PyMongo uses `pre-commit `_ +for managing linting of the codebase. +``pre-commit`` performs various checks on all files in PyMongo and uses tools +that help follow a consistent code style within the codebase. + +To set up ``pre-commit`` locally, run:: + + pip install pre-commit + pre-commit install + +To run ``pre-commit`` manually, run:: + + pre-commit run --all-files + Documentation ------------- @@ -67,4 +84,4 @@ The ``-b`` flag adds as a regex pattern to block files you do not wish to update in PyMongo. This is primarily helpful if you are implementing a new feature in PyMongo that has spec tests already implemented, or if you are attempting to -validate new spec tests in PyMongo. \ No newline at end of file +validate new spec tests in PyMongo. diff --git a/THIRD-PARTY-NOTICES b/THIRD-PARTY-NOTICES index 28a340b3fb..a307b30432 100644 --- a/THIRD-PARTY-NOTICES +++ b/THIRD-PARTY-NOTICES @@ -94,4 +94,3 @@ supplied in this file in the creation of products supporting the Unicode Standard, and to make copies of this file in any form for internal or external distribution as long as this notice remains attached. - diff --git a/doc/api/pymongo/event_loggers.rst b/doc/api/pymongo/event_loggers.rst index f79bfb2345..9be0779c20 100644 --- a/doc/api/pymongo/event_loggers.rst +++ b/doc/api/pymongo/event_loggers.rst @@ -4,4 +4,4 @@ .. automodule:: pymongo.event_loggers :synopsis: A collection of simple listeners for monitoring driver events. - :members: \ No newline at end of file + :members: diff --git a/doc/api/pymongo/topology_description.rst b/doc/api/pymongo/topology_description.rst index 8141507df7..24353db2a9 100644 --- a/doc/api/pymongo/topology_description.rst +++ b/doc/api/pymongo/topology_description.rst @@ -7,4 +7,3 @@ .. autoclass:: pymongo.topology_description.TopologyDescription() :members: - diff --git a/doc/atlas.rst b/doc/atlas.rst index 0a64b294ce..6100e9d3c5 100644 --- a/doc/atlas.rst +++ b/doc/atlas.rst @@ -41,4 +41,3 @@ Connections to Atlas require TLS/SSL. .. _homebrew: https://brew.sh/ .. _macports: https://www.macports.org/ .. _requests: https://pypi.python.org/pypi/requests - diff --git a/doc/examples/server_selection.rst b/doc/examples/server_selection.rst index 28659c133e..fc436c0cd7 100644 --- a/doc/examples/server_selection.rst +++ b/doc/examples/server_selection.rst @@ -105,4 +105,4 @@ list of known hosts. As an example, for a 3-member replica set with a all available secondaries. -.. _server selection algorithm: https://docs.mongodb.com/manual/core/read-preference-mechanics/ \ No newline at end of file +.. _server selection algorithm: https://docs.mongodb.com/manual/core/read-preference-mechanics/ diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 22071bd3bb..b993e32f4e 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -950,4 +950,4 @@ Additional BSON classes implement ``__slots__`` :class:`~bson.max_key.MaxKey`, :class:`~bson.timestamp.Timestamp`, :class:`~bson.regex.Regex`, and :class:`~bson.dbref.DBRef` now implement ``__slots__`` to reduce memory usage. This means that their attributes are fixed, and new -attributes cannot be added to the object at runtime. \ No newline at end of file +attributes cannot be added to the object at runtime. diff --git a/doc/tools.rst b/doc/tools.rst index 65b38c16a8..304a1eaf5c 100644 --- a/doc/tools.rst +++ b/doc/tools.rst @@ -47,14 +47,14 @@ Humongolus possible. The code is available for download `at GitHub `_. Tutorials and usage examples are also available at GitHub. - + MincePy - `MincePy `_ is an - object-document mapper (ODM) designed to make any Python object storable - and queryable in a MongoDB database. It is designed with machine learning - and big-data computational and experimental science applications in mind - but is entirely general and can be useful to anyone looking to organise, - share, or process large amounts data with as little change to their current + `MincePy `_ is an + object-document mapper (ODM) designed to make any Python object storable + and queryable in a MongoDB database. It is designed with machine learning + and big-data computational and experimental science applications in mind + but is entirely general and can be useful to anyone looking to organise, + share, or process large amounts data with as little change to their current workflow as possible. Ming @@ -80,7 +80,7 @@ MotorEngine It implements the same modeling APIs to be data-portable, meaning that a model defined in MongoEngine can be read in MotorEngine. The source is `available on GitHub `_. - + uMongo `uMongo `_ is a Python MongoDB ODM. Its inception comes from two needs: the lack of async ODM and the diff --git a/test/certificates/ca.pem b/test/certificates/ca.pem index 6ac86cfcc1..24beea2d48 100644 --- a/test/certificates/ca.pem +++ b/test/certificates/ca.pem @@ -18,4 +18,4 @@ gT564CmvkUat8uXPz6olOCdwkMpJ9Sj62i0mpgXJdBfxKQ6TZ9yGz6m3jannjZpN LchB7xSAEWtqUgvNusq0dApJsf4n7jZ+oBZVaQw2+tzaMfaLqHgMwcu1FzA8UKCD sxCgIsZUs8DdxaD418Ot6nPfheOTqe24n+TTa+Z6O0W0QtnofJBx7tmAo1aEc57i 77s89pfwIJetpIlhzNSMKurCAocFCJMJLAASJFuu6dyDvPo= ------END CERTIFICATE----- \ No newline at end of file +-----END CERTIFICATE----- From ddb661444220474bcb448a7adbd13ef9220a588a Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 9 Feb 2022 15:12:02 -0800 Subject: [PATCH 0067/1588] PYTHON-2682 Add support for the comment field to all helpers (#847) --- pymongo/aggregation.py | 3 + pymongo/bulk.py | 5 +- pymongo/change_stream.py | 5 +- pymongo/collection.py | 252 ++++++++- pymongo/cursor.py | 2 +- pymongo/database.py | 70 ++- pymongo/mongo_client.py | 41 +- .../unified/change-streams.json | 146 +++++- test/crud/unified/aggregate.json | 280 ++++++++++ test/crud/unified/bulkWrite-comment.json | 494 ++++++++++++++++++ test/crud/unified/deleteMany-comment.json | 244 +++++++++ test/crud/unified/deleteOne-comment.json | 242 +++++++++ test/crud/unified/find-comment.json | 298 +++++++++++ .../unified/findOneAndDelete-comment.json | 211 ++++++++ .../unified/findOneAndReplace-comment.json | 234 +++++++++ .../unified/findOneAndUpdate-comment.json | 228 ++++++++ test/crud/unified/insertMany-comment.json | 225 ++++++++ test/crud/unified/insertOne-comment.json | 219 ++++++++ test/crud/unified/replaceOne-comment.json | 229 ++++++++ test/crud/unified/updateMany-comment.json | 244 +++++++++ test/crud/unified/updateOne-comment.json | 241 +++++++++ test/test_comment.py | 183 +++++++ 22 files changed, 4048 insertions(+), 48 deletions(-) create mode 100644 test/crud/unified/bulkWrite-comment.json create mode 100644 test/crud/unified/deleteMany-comment.json create mode 100644 test/crud/unified/deleteOne-comment.json create mode 100644 test/crud/unified/find-comment.json create mode 100644 test/crud/unified/findOneAndDelete-comment.json create mode 100644 test/crud/unified/findOneAndReplace-comment.json create mode 100644 test/crud/unified/findOneAndUpdate-comment.json create mode 100644 test/crud/unified/insertMany-comment.json create mode 100644 test/crud/unified/insertOne-comment.json create mode 100644 test/crud/unified/replaceOne-comment.json create mode 100644 test/crud/unified/updateMany-comment.json create mode 100644 test/crud/unified/updateOne-comment.json create mode 100644 test/test_comment.py diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 2b8cafe7cb..51be0dfa81 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -39,6 +39,7 @@ def __init__( let=None, user_fields=None, result_processor=None, + comment=None, ): if "explain" in options: raise ConfigurationError( @@ -57,6 +58,8 @@ def __init__( if let: common.validate_is_mapping("let", let) options["let"] = let + if comment is not None: + options["comment"] = comment self._options = options # This is the batchSize that will be used for setting the initial diff --git a/pymongo/bulk.py b/pymongo/bulk.py index e043e09fdd..fae55a5c10 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -138,13 +138,14 @@ def _raise_bulk_write_error(full_result): class _Bulk(object): """The private guts of the bulk write API.""" - def __init__(self, collection, ordered, bypass_document_validation): + def __init__(self, collection, ordered, bypass_document_validation, comment=None): """Initialize a _Bulk instance.""" self.collection = collection.with_options( codec_options=collection.codec_options._replace( unicode_decode_error_handler="replace", document_class=dict ) ) + self.comment = comment self.ordered = ordered self.ops = [] self.executed = False @@ -308,6 +309,8 @@ def _execute_command( write_concern = final_write_concern or write_concern cmd = SON([(cmd_name, self.collection.name), ("ordered", self.ordered)]) + if self.comment: + cmd["comment"] = self.comment if not write_concern.is_server_default: cmd["writeConcern"] = write_concern.document if self.bypass_doc_val: diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index a35c9cb844..50f6f72b73 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -95,6 +95,7 @@ def __init__( start_at_operation_time: Optional[Timestamp], session: Optional["ClientSession"], start_after: Optional[Mapping[str, Any]], + comment: Optional[Any] = None, ) -> None: if pipeline is None: pipeline = [] @@ -125,7 +126,7 @@ def __init__( self._collation = collation self._start_at_operation_time = start_at_operation_time self._session = session - + self._comment = comment # Initialize cursor. self._cursor = self._create_cursor() @@ -209,8 +210,8 @@ def _run_aggregation_cmd(self, session, explicit_session): self._command_options(), explicit_session, result_processor=self._process_result, + comment=self._comment, ) - return self._client._retryable_read( cmd.get_cursor, self._target._read_preference_for(session), session ) diff --git a/pymongo/collection.py b/pymongo/collection.py index b17bb61f34..df8db3f106 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -423,6 +423,7 @@ def bulk_write( ordered: bool = True, bypass_document_validation: bool = False, session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, ) -> BulkWriteResult: """Send a batch of write operations to the server. @@ -472,6 +473,8 @@ def bulk_write( ``False``. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: An instance of :class:`~pymongo.results.BulkWriteResult`. @@ -481,6 +484,9 @@ def bulk_write( .. note:: `bypass_document_validation` requires server version **>= 3.2** + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -491,7 +497,7 @@ def bulk_write( """ common.validate_list("requests", requests) - blk = _Bulk(self, ordered, bypass_document_validation) + blk = _Bulk(self, ordered, bypass_document_validation, comment=comment) for request in requests: try: request._add_to_bulk(blk) @@ -504,11 +510,15 @@ def bulk_write( return BulkWriteResult(bulk_api_result, True) return BulkWriteResult({}, False) - def _insert_one(self, doc, ordered, write_concern, op_id, bypass_doc_val, session): + def _insert_one( + self, doc, ordered, write_concern, op_id, bypass_doc_val, session, comment=None + ): """Internal helper for inserting a single document.""" write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged command = SON([("insert", self.name), ("ordered", ordered), ("documents", [doc])]) + if comment is not None: + command["comment"] = comment if not write_concern.is_server_default: command["writeConcern"] = write_concern.document @@ -538,6 +548,7 @@ def insert_one( document: _DocumentIn, bypass_document_validation: bool = False, session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, ) -> InsertOneResult: """Insert a single document. @@ -558,6 +569,8 @@ def insert_one( ``False``. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.InsertOneResult`. @@ -567,6 +580,9 @@ def insert_one( .. note:: `bypass_document_validation` requires server version **>= 3.2** + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -588,6 +604,7 @@ def insert_one( op_id=None, bypass_doc_val=bypass_document_validation, session=session, + comment=comment, ), write_concern.acknowledged, ) @@ -598,6 +615,7 @@ def insert_many( ordered: bool = True, bypass_document_validation: bool = False, session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, ) -> InsertManyResult: """Insert an iterable of documents. @@ -621,6 +639,8 @@ def insert_many( ``False``. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: An instance of :class:`~pymongo.results.InsertManyResult`. @@ -630,6 +650,9 @@ def insert_many( .. note:: `bypass_document_validation` requires server version **>= 3.2** + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -657,7 +680,7 @@ def gen(): yield (message._INSERT, document) write_concern = self._write_concern_for(session) - blk = _Bulk(self, ordered, bypass_document_validation) + blk = _Bulk(self, ordered, bypass_document_validation, comment=comment) blk.ops = [doc for doc in gen()] blk.execute(write_concern, session=session) return InsertManyResult(inserted_ids, write_concern.acknowledged) @@ -679,6 +702,7 @@ def _update( session=None, retryable_write=False, let=None, + comment=None, ): """Internal update / replace helper.""" common.validate_boolean("upsert", upsert) @@ -704,7 +728,6 @@ def _update( if not isinstance(hint, str): hint = helpers._index_document(hint) update_doc["hint"] = hint - command = SON([("update", self.name), ("ordered", ordered), ("updates", [update_doc])]) if let: common.validate_is_mapping("let", let) @@ -712,6 +735,8 @@ def _update( if not write_concern.is_server_default: command["writeConcern"] = write_concern.document + if comment is not None: + command["comment"] = comment # Update command. if bypass_doc_val: command["bypassDocumentValidation"] = True @@ -757,6 +782,7 @@ def _update_retryable( hint=None, session=None, let=None, + comment=None, ): """Internal update / replace helper.""" @@ -777,6 +803,7 @@ def _update(session, sock_info, retryable_write): session=session, retryable_write=retryable_write, let=let, + comment=comment, ) return self.__database.client._retryable_write( @@ -793,6 +820,7 @@ def replace_one( hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, ) -> UpdateResult: """Replace a single document matching the filter. @@ -845,12 +873,14 @@ def replace_one( constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). - + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. .. versionchanged:: 4.1 Added ``let`` parameter. + Added ``comment`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.6 @@ -878,6 +908,7 @@ def replace_one( hint=hint, session=session, let=let, + comment=comment, ), write_concern.acknowledged, ) @@ -893,6 +924,7 @@ def update_one( hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, ) -> UpdateResult: """Update a single document matching the filter. @@ -938,12 +970,15 @@ def update_one( constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. .. versionchanged:: 4.1 Added ``let`` parameter. + Added ``comment`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.9 @@ -974,6 +1009,7 @@ def update_one( hint=hint, session=session, let=let, + comment=comment, ), write_concern.acknowledged, ) @@ -989,6 +1025,7 @@ def update_many( hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, ) -> UpdateResult: """Update one or more documents that match the filter. @@ -1034,12 +1071,15 @@ def update_many( constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. .. versionchanged:: 4.1 Added ``let`` parameter. + Added ``comment`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.9 @@ -1071,22 +1111,32 @@ def update_many( hint=hint, session=session, let=let, + comment=comment, ), write_concern.acknowledged, ) - def drop(self, session: Optional["ClientSession"] = None) -> None: + def drop( + self, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + ) -> None: """Alias for :meth:`~pymongo.database.Database.drop_collection`. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. The following two calls are equivalent: >>> db.foo.drop() >>> db.drop_collection("foo") + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.7 :meth:`drop` now respects this :class:`Collection`'s :attr:`write_concern`. @@ -1100,7 +1150,7 @@ def drop(self, session: Optional["ClientSession"] = None) -> None: self.write_concern, self.read_concern, ) - dbo.drop_collection(self.__name, session=session) + dbo.drop_collection(self.__name, session=session, comment=comment) def _delete( self, @@ -1115,6 +1165,7 @@ def _delete( session=None, retryable_write=False, let=None, + comment=None, ): """Internal delete helper.""" common.validate_is_mapping("filter", criteria) @@ -1143,6 +1194,9 @@ def _delete( common.validate_is_document_type("let", let) command["let"] = let + if comment is not None: + command["comment"] = comment + # Delete command. result = sock_info.command( self.__database.name, @@ -1167,6 +1221,7 @@ def _delete_retryable( hint=None, session=None, let=None, + comment=None, ): """Internal delete helper.""" @@ -1183,6 +1238,7 @@ def _delete(session, sock_info, retryable_write): session=session, retryable_write=retryable_write, let=let, + comment=comment, ) return self.__database.client._retryable_write( @@ -1196,6 +1252,7 @@ def delete_one( hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, ) -> DeleteResult: """Delete a single document matching the filter. @@ -1223,12 +1280,15 @@ def delete_one( constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.DeleteResult`. .. versionchanged:: 4.1 Added ``let`` parameter. + Added ``comment`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.6 @@ -1247,6 +1307,7 @@ def delete_one( hint=hint, session=session, let=let, + comment=comment, ), write_concern.acknowledged, ) @@ -1258,6 +1319,7 @@ def delete_many( hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, ) -> DeleteResult: """Delete one or more documents matching the filter. @@ -1285,12 +1347,15 @@ def delete_many( constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.DeleteResult`. .. versionchanged:: 4.1 Added ``let`` parameter. + Added ``comment`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.6 @@ -1309,6 +1374,7 @@ def delete_many( hint=hint, session=session, let=let, + comment=comment, ), write_concern.acknowledged, ) @@ -1339,10 +1405,10 @@ def find_one( are the same as the arguments to :meth:`find`. >>> collection.find_one(max_time_ms=100) + """ if filter is not None and not isinstance(filter, abc.Mapping): filter = {"_id": filter} - cursor = self.find(filter, *args, **kwargs) for result in cursor.limit(-1): return result @@ -1566,7 +1632,6 @@ def find_raw_batches(self, *args: Any, **kwargs: Any) -> RawBatchCursor[_Documen # OP_MSG is required to support encryption. if self.__database.client._encrypter: raise InvalidOperation("find_raw_batches does not support auto encryption") - return RawBatchCursor(self, *args, **kwargs) def _count_cmd(self, session, sock_info, read_preference, cmd, collation): @@ -1605,7 +1670,7 @@ def _aggregate_one_result(self, sock_info, read_preference, cmd, collation, sess batch = result["cursor"]["firstBatch"] return batch[0] if batch else None - def estimated_document_count(self, **kwargs: Any) -> int: + def estimated_document_count(self, comment: Optional[Any] = None, **kwargs: Any) -> int: """Get an estimate of the number of documents in this collection using collection metadata. @@ -1619,12 +1684,17 @@ def estimated_document_count(self, **kwargs: Any) -> int: operation to run, in milliseconds. :Parameters: + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): See list of options above. + .. versionadded:: 3.7 """ if "session" in kwargs: raise ConfigurationError("estimated_document_count does not support sessions") + if comment is not None: + kwargs["comment"] = comment def _cmd(session, server, sock_info, read_preference): if sock_info.max_wire_version >= 12: @@ -1650,7 +1720,11 @@ def _cmd(session, server, sock_info, read_preference): return self.__database.client._retryable_read(_cmd, self.read_preference, None) def count_documents( - self, filter: Mapping[str, Any], session: Optional["ClientSession"] = None, **kwargs: Any + self, + filter: Mapping[str, Any], + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + **kwargs: Any, ) -> int: """Count the number of documents in this collection. @@ -1696,8 +1770,11 @@ def count_documents( documents. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): See list of options above. + .. versionadded:: 3.7 .. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ @@ -1710,6 +1787,8 @@ def count_documents( pipeline.append({"$skip": kwargs.pop("skip")}) if "limit" in kwargs: pipeline.append({"$limit": kwargs.pop("limit")}) + if comment is not None: + kwargs["comment"] = comment pipeline.append({"$group": {"_id": 1, "n": {"$sum": 1}}}) cmd = SON([("aggregate", self.__name), ("pipeline", pipeline), ("cursor", {})]) if "hint" in kwargs and not isinstance(kwargs["hint"], str): @@ -1731,6 +1810,7 @@ def create_indexes( self, indexes: Sequence[IndexModel], session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> List[str]: """Create one or more indexes on this collection. @@ -1747,9 +1827,14 @@ def create_indexes( instances. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): optional arguments to the createIndexes command (like maxTimeMS) can be passed as keyword arguments. + + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of this collection is automatically applied to this operation. @@ -1765,6 +1850,8 @@ def create_indexes( .. _createIndexes: https://docs.mongodb.com/manual/reference/command/createIndexes/ """ common.validate_list("indexes", indexes) + if comment is not None: + kwargs["comment"] = comment return self.__create_indexes(indexes, session, **kwargs) def __create_indexes(self, indexes, session, **kwargs): @@ -1811,7 +1898,11 @@ def gen_indexes(): return names def create_index( - self, keys: _IndexKeyHint, session: Optional["ClientSession"] = None, **kwargs: Any + self, + keys: _IndexKeyHint, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + **kwargs: Any, ) -> str: """Creates an index on this collection. @@ -1886,10 +1977,14 @@ def create_index( pairs specifying the index to create - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + arguments + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword - arguments + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionchanged:: 3.11 Added the ``hidden`` option. .. versionchanged:: 3.6 @@ -1912,10 +2007,17 @@ def create_index( cmd_options = {} if "maxTimeMS" in kwargs: cmd_options["maxTimeMS"] = kwargs.pop("maxTimeMS") + if comment is not None: + cmd_options["comment"] = comment index = IndexModel(keys, **kwargs) return self.__create_indexes([index], session, **cmd_options)[0] - def drop_indexes(self, session: Optional["ClientSession"] = None, **kwargs: Any) -> None: + def drop_indexes( + self, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: """Drops all indexes on this collection. Can be used on non-existant collections or collections with no indexes. @@ -1924,9 +2026,14 @@ def drop_indexes(self, session: Optional["ClientSession"] = None, **kwargs: Any) :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + arguments + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): optional arguments to the createIndexes command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of this collection is automatically applied to this operation. @@ -1939,10 +2046,16 @@ def drop_indexes(self, session: Optional["ClientSession"] = None, **kwargs: Any) when connected to MongoDB >= 3.4. """ + if comment is not None: + kwargs["comment"] = comment self.drop_index("*", session=session, **kwargs) def drop_index( - self, index_or_name: _IndexKeyHint, session: Optional["ClientSession"] = None, **kwargs: Any + self, + index_or_name: _IndexKeyHint, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + **kwargs: Any, ) -> None: """Drops the specified index on this collection. @@ -1964,12 +2077,17 @@ def drop_index( - `index_or_name`: index (or name of index) to drop - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): optional arguments to the createIndexes command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of this collection is automatically applied to this operation. + .. versionchanged:: 3.6 Added ``session`` parameter. Added support for arbitrary keyword arguments. @@ -1988,6 +2106,8 @@ def drop_index( cmd = SON([("dropIndexes", self.__name), ("index", name)]) cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment with self._socket_for_writes(session) as sock_info: self._command( sock_info, @@ -1999,7 +2119,9 @@ def drop_index( ) def list_indexes( - self, session: Optional["ClientSession"] = None + self, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, ) -> CommandCursor[MutableMapping[str, Any]]: """Get a cursor over the index documents for this collection. @@ -2011,10 +2133,15 @@ def list_indexes( :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: An instance of :class:`~pymongo.command_cursor.CommandCursor`. + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -2028,6 +2155,9 @@ def list_indexes( def _cmd(session, server, sock_info, read_preference): cmd = SON([("listIndexes", self.__name), ("cursor", {})]) + if comment is not None: + cmd["comment"] = comment + with self.__database.client._tmp_session(session, False) as s: try: cursor = self._command( @@ -2048,7 +2178,9 @@ def _cmd(session, server, sock_info, read_preference): return self.__database.client._retryable_read(_cmd, read_pref, session) def index_information( - self, session: Optional["ClientSession"] = None + self, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, ) -> MutableMapping[str, Any]: """Get information on this collection's indexes. @@ -2071,11 +2203,16 @@ def index_information( :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. """ - cursor = self.list_indexes(session=session) + cursor = self.list_indexes(session=session, comment=comment) info = {} for index in cursor: index["key"] = list(index["key"].items()) @@ -2083,7 +2220,11 @@ def index_information( info[index.pop("name")] = index return info - def options(self, session: Optional["ClientSession"] = None) -> MutableMapping[str, Any]: + def options( + self, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + ) -> MutableMapping[str, Any]: """Get the options set on this collection. Returns a dictionary of options and their values - see @@ -2094,6 +2235,8 @@ def options(self, session: Optional["ClientSession"] = None) -> MutableMapping[s :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. .. versionchanged:: 3.6 Added ``session`` parameter. @@ -2105,7 +2248,9 @@ def options(self, session: Optional["ClientSession"] = None) -> MutableMapping[s self.write_concern, self.read_concern, ) - cursor = dbo.list_collections(session=session, filter={"name": self.__name}) + cursor = dbo.list_collections( + session=session, filter={"name": self.__name}, comment=comment + ) result = None for doc in cursor: @@ -2130,8 +2275,11 @@ def _aggregate( session, explicit_session, let=None, + comment=None, **kwargs, ): + if comment is not None: + kwargs["comment"] = comment cmd = aggregation_command( self, cursor_class, @@ -2154,6 +2302,7 @@ def aggregate( pipeline: _Pipeline, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> CommandCursor[_DocumentType]: """Perform an aggregation using the aggregation framework on this @@ -2196,12 +2345,16 @@ def aggregate( fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. ``"$$var"``). This option is only supported on MongoDB >= 5.0. + - `comment` (optional): A user-provided comment to attach to this + command. + :Returns: A :class:`~pymongo.command_cursor.CommandCursor` over the result set. .. versionchanged:: 4.1 + Added ``comment`` parameter. Added ``let`` parameter. Support $merge and $out executing on secondaries according to the collection's :attr:`read_preference`. @@ -2228,6 +2381,7 @@ def aggregate( .. _aggregate command: https://docs.mongodb.com/manual/reference/command/aggregate """ + with self.__database.client._tmp_session(session, close=False) as s: return self._aggregate( _CollectionAggregationCommand, @@ -2236,11 +2390,16 @@ def aggregate( session=s, explicit_session=session is not None, let=let, + comment=comment, **kwargs, ) def aggregate_raw_batches( - self, pipeline: _Pipeline, session: Optional["ClientSession"] = None, **kwargs: Any + self, + pipeline: _Pipeline, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + **kwargs: Any, ) -> RawBatchCursor[_DocumentType]: """Perform an aggregation and retrieve batches of raw BSON. @@ -2268,7 +2427,8 @@ def aggregate_raw_batches( # OP_MSG is required to support encryption. if self.__database.client._encrypter: raise InvalidOperation("aggregate_raw_batches does not support auto encryption") - + if comment is not None: + kwargs["comment"] = comment with self.__database.client._tmp_session(session, close=False) as s: return self._aggregate( _CollectionRawAggregationCommand, @@ -2290,6 +2450,7 @@ def watch( start_at_operation_time: Optional[Timestamp] = None, session: Optional["ClientSession"] = None, start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, ) -> CollectionChangeStream[_DocumentType]: """Watch changes on this collection. @@ -2368,10 +2529,16 @@ def watch( - `start_after` (optional): The same as `resume_after` except that `start_after` can resume notifications after an invalidate event. This option and `resume_after` are mutually exclusive. + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: A :class:`~pymongo.change_stream.CollectionChangeStream` cursor. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.9 Added the ``start_after`` parameter. @@ -2396,10 +2563,15 @@ def watch( start_at_operation_time, session, start_after, + comment=comment, ) def rename( - self, new_name: str, session: Optional["ClientSession"] = None, **kwargs: Any + self, + new_name: str, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + **kwargs: Any, ) -> MutableMapping[str, Any]: """Rename this collection. @@ -2413,6 +2585,8 @@ def rename( - `new_name`: new name for this collection - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): additional arguments to the rename command may be passed as keyword arguments to this helper method (i.e. ``dropTarget=True``) @@ -2441,6 +2615,8 @@ def rename( new_name = "%s.%s" % (self.__database.name, new_name) cmd = SON([("renameCollection", self.__full_name), ("to", new_name)]) cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment write_concern = self._write_concern_for_cmd(cmd, session) with self._socket_for_writes(session) as sock_info: @@ -2459,6 +2635,7 @@ def distinct( key: str, filter: Optional[Mapping[str, Any]] = None, session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> List: """Get a list of distinct values for `key` among all documents @@ -2485,6 +2662,8 @@ def distinct( from which to retrieve the distinct values. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): See list of options above. .. versionchanged:: 3.6 @@ -2503,6 +2682,8 @@ def distinct( kwargs["query"] = filter collation = validate_collation_or_none(kwargs.pop("collation", None)) cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment def _cmd(session, server, sock_info, read_preference): return self._command( @@ -2611,6 +2792,7 @@ def find_one_and_delete( hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> _DocumentType: """Finds a single document and deletes it, returning the document. @@ -2656,13 +2838,15 @@ def find_one_and_delete( on MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): additional command arguments can be passed - as keyword arguments (for example maxTimeMS can be used with - recent server versions). - `let` (optional): Map of parameter names and values. Values must be constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. + - `**kwargs` (optional): additional command arguments can be passed + as keyword arguments (for example maxTimeMS can be used with + recent server versions). .. versionchanged:: 4.1 Added ``let`` parameter. @@ -2684,6 +2868,8 @@ def find_one_and_delete( .. versionadded:: 3.0 """ kwargs["remove"] = True + if comment is not None: + kwargs["comment"] = comment return self.__find_and_modify( filter, projection, sort, let=let, hint=hint, session=session, **kwargs ) @@ -2699,6 +2885,7 @@ def find_one_and_replace( hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> _DocumentType: """Finds a single document and replaces it, returning either the @@ -2754,11 +2941,13 @@ def find_one_and_replace( constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). - .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added the ``hint`` option. @@ -2779,6 +2968,8 @@ def find_one_and_replace( """ common.validate_ok_for_replace(replacement) kwargs["update"] = replacement + if comment is not None: + kwargs["comment"] = comment return self.__find_and_modify( filter, projection, @@ -2803,6 +2994,7 @@ def find_one_and_update( hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> _DocumentType: """Finds a single document and updates it, returning either the @@ -2897,12 +3089,12 @@ def find_one_and_update( constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). - .. versionchanged:: 4.1 - Added ``let`` parameter. .. versionchanged:: 3.11 Added the ``hint`` option. .. versionchanged:: 3.9 @@ -2925,6 +3117,8 @@ def find_one_and_update( common.validate_ok_for_update(update) common.validate_list_or_none("array_filters", array_filters) kwargs["update"] = update + if comment is not None: + kwargs["comment"] = comment return self.__find_and_modify( filter, projection, diff --git a/pymongo/cursor.py b/pymongo/cursor.py index ba9e5956f2..be4b998d31 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -183,7 +183,7 @@ def __init__( return_key: Optional[bool] = None, show_record_id: Optional[bool] = None, snapshot: Optional[bool] = None, - comment: Any = None, + comment: Optional[Any] = None, session: Optional["ClientSession"] = None, allow_disk_use: Optional[bool] = None, let: Optional[bool] = None, diff --git a/pymongo/database.py b/pymongo/database.py index 675db132f7..e6633ed230 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -471,6 +471,7 @@ def watch( start_at_operation_time: Optional[Timestamp] = None, session: Optional["ClientSession"] = None, start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, ) -> DatabaseChangeStream[_DocumentType]: """Watch changes on this database. @@ -542,10 +543,15 @@ def watch( - `start_after` (optional): The same as `resume_after` except that `start_after` can resume notifications after an invalidate event. This option and `resume_after` are mutually exclusive. + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.9 Added the ``start_after`` parameter. @@ -567,6 +573,7 @@ def watch( start_at_operation_time, session, start_after, + comment=comment, ) def _command( @@ -611,6 +618,7 @@ def command( read_preference: Optional[_ServerMode] = None, codec_options: Optional[CodecOptions] = DEFAULT_CODEC_OPTIONS, session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> Dict[str, Any]: """Issue a MongoDB command. @@ -665,9 +673,12 @@ def command( instance. - `session` (optional): A :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): additional keyword arguments will be added to the command document before it is sent + .. note:: :meth:`command` does **not** obey this Database's :attr:`read_preference` or :attr:`codec_options`. You must use the `read_preference` and `codec_options` parameters instead. @@ -695,6 +706,9 @@ def command( .. seealso:: The MongoDB documentation on `commands `_. """ + if comment is not None: + kwargs["comment"] = comment + if read_preference is None: read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY with self.__client._socket_for_reads(read_preference, session) as ( @@ -767,6 +781,7 @@ def list_collections( self, session: Optional["ClientSession"] = None, filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> CommandCursor[Dict[str, Any]]: """Get a cursor over the collections of this database. @@ -776,12 +791,15 @@ def list_collections( :class:`~pymongo.client_session.ClientSession`. - `filter` (optional): A query document to filter the list of collections returned from the listCollections command. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): Optional parameters of the `listCollections command `_ can be passed as keyword arguments to this method. The supported options differ by server version. + :Returns: An instance of :class:`~pymongo.command_cursor.CommandCursor`. @@ -790,6 +808,8 @@ def list_collections( if filter is not None: kwargs["filter"] = filter read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + if comment is not None: + kwargs["comment"] = comment def _cmd(session, server, sock_info, read_preference): return self._list_collections( @@ -802,6 +822,7 @@ def list_collection_names( self, session: Optional["ClientSession"] = None, filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> List[str]: """Get a list of all the collection names in this database. @@ -816,19 +837,25 @@ def list_collection_names( :class:`~pymongo.client_session.ClientSession`. - `filter` (optional): A query document to filter the list of collections returned from the listCollections command. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): Optional parameters of the `listCollections command `_ can be passed as keyword arguments to this method. The supported options differ by server version. + .. versionchanged:: 3.8 Added the ``filter`` and ``**kwargs`` parameters. .. versionadded:: 3.6 """ + if comment is not None: + kwargs["comment"] = comment if filter is None: kwargs["nameOnly"] = True + else: # The enumerate collections spec states that "drivers MUST NOT set # nameOnly if a filter specifies any keys other than name." @@ -840,7 +867,10 @@ def list_collection_names( return [result["name"] for result in self.list_collections(session=session, **kwargs)] def drop_collection( - self, name_or_collection: Union[str, Collection], session: Optional["ClientSession"] = None + self, + name_or_collection: Union[str, Collection], + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, ) -> Dict[str, Any]: """Drop a collection. @@ -849,10 +879,16 @@ def drop_collection( collection object itself - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + .. note:: The :attr:`~pymongo.database.Database.write_concern` of this database is automatically applied to this operation. + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -868,11 +904,14 @@ def drop_collection( if not isinstance(name, str): raise TypeError("name_or_collection must be an instance of str") + command = SON([("drop", name)]) + if comment is not None: + command["comment"] = comment + with self.__client._socket_for_writes(session) as sock_info: return self._command( sock_info, - "drop", - value=name, + command, allowable_errors=["ns not found", 26], write_concern=self._write_concern_for(session), parse_write_concern_error=True, @@ -886,6 +925,7 @@ def validate_collection( full: bool = False, session: Optional["ClientSession"] = None, background: Optional[bool] = None, + comment: Optional[Any] = None, ) -> Dict[str, Any]: """Validate a collection. @@ -907,6 +947,11 @@ def validate_collection( :class:`~pymongo.client_session.ClientSession`. - `background` (optional): A boolean flag that determines whether the command runs in the background. Requires MongoDB 4.4+. + - `comment` (optional): A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionchanged:: 3.11 Added ``background`` parameter. @@ -922,8 +967,10 @@ def validate_collection( if not isinstance(name, str): raise TypeError("name_or_collection must be an instance of str or " "Collection") - cmd = SON([("validate", name), ("scandata", scandata), ("full", full)]) + if comment is not None: + cmd["comment"] = comment + if background is not None: cmd["background"] = background @@ -970,7 +1017,11 @@ def __bool__(self) -> bool: ) def dereference( - self, dbref: DBRef, session: Optional["ClientSession"] = None, **kwargs: Any + self, + dbref: DBRef, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + **kwargs: Any, ) -> Optional[_DocumentType]: """Dereference a :class:`~bson.dbref.DBRef`, getting the document it points to. @@ -985,10 +1036,15 @@ def dereference( - `dbref`: the reference - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): any additional keyword arguments are the same as the arguments to :meth:`~pymongo.collection.Collection.find`. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. """ @@ -999,4 +1055,6 @@ def dereference( "trying to dereference a DBRef that points to " "another database (%r not %r)" % (dbref.database, self.__name) ) - return self[dbref.collection].find_one({"_id": dbref.id}, session=session, **kwargs) + return self[dbref.collection].find_one( + {"_id": dbref.id}, session=session, comment=comment, **kwargs + ) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 3fa2946c7c..6b0d55601f 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -846,6 +846,7 @@ def watch( start_at_operation_time: Optional[Timestamp] = None, session: Optional[client_session.ClientSession] = None, start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, ) -> ChangeStream[_DocumentType]: """Watch changes on this cluster. @@ -917,10 +918,15 @@ def watch( - `start_after` (optional): The same as `resume_after` except that `start_after` can resume notifications after an invalidate event. This option and `resume_after` are mutually exclusive. + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: A :class:`~pymongo.change_stream.ClusterChangeStream` cursor. + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.9 Added the ``start_after`` parameter. @@ -942,6 +948,7 @@ def watch( start_at_operation_time, session, start_after, + comment=comment, ) @property @@ -1709,19 +1716,25 @@ def server_info(self, session: Optional[client_session.ClientSession] = None) -> ) def list_databases( - self, session: Optional[client_session.ClientSession] = None, **kwargs: Any + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, ) -> CommandCursor[Dict[str, Any]]: """Get a cursor over the databases of the connected server. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): Optional parameters of the `listDatabases command `_ can be passed as keyword arguments to this method. The supported options differ by server version. + :Returns: An instance of :class:`~pymongo.command_cursor.CommandCursor`. @@ -1729,6 +1742,8 @@ def list_databases( """ cmd = SON([("listDatabases", 1)]) cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment admin = self._database_default_options("admin") res = admin._retryable_read_command(cmd, session=session) # listDatabases doesn't return a cursor (yet). Fake one. @@ -1740,22 +1755,30 @@ def list_databases( return CommandCursor(admin["$cmd"], cursor, None) def list_database_names( - self, session: Optional[client_session.ClientSession] = None + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, ) -> List[str]: """Get a list of the names of all databases on the connected server. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionadded:: 3.6 """ - return [doc["name"] for doc in self.list_databases(session, nameOnly=True)] + return [doc["name"] for doc in self.list_databases(session, nameOnly=True, comment=comment)] def drop_database( self, name_or_database: Union[str, database.Database], session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, ) -> None: """Drop a database. @@ -1769,6 +1792,11 @@ def drop_database( database to drop - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. @@ -1791,7 +1819,7 @@ def drop_database( with self._socket_for_writes(session) as sock_info: self[name]._command( sock_info, - "dropDatabase", + {"dropDatabase": 1, "comment": comment}, read_preference=ReadPreference.PRIMARY, write_concern=self._write_concern_for(session), parse_write_concern_error=True, @@ -1837,6 +1865,11 @@ def get_default_database( :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the default) the :attr:`read_concern` of this :class:`MongoClient` is used. + - `comment` (optional): A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionchanged:: 3.8 Undeprecated. Added the ``default``, ``codec_options``, diff --git a/test/change_streams/unified/change-streams.json b/test/change_streams/unified/change-streams.json index adaf00de2d..4aea9a4aa1 100644 --- a/test/change_streams/unified/change-streams.json +++ b/test/change_streams/unified/change-streams.json @@ -1,10 +1,21 @@ { "description": "change-streams", "schemaVersion": "1.0", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], "createEntities": [ { "client": { - "id": "client0" + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] } }, { @@ -34,10 +45,7 @@ "description": "Test array truncation", "runOnRequirements": [ { - "minServerVersion": "4.7", - "topologies": [ - "replicaset" - ] + "minServerVersion": "4.7" } ], "operations": [ @@ -111,6 +119,134 @@ } } ] + }, + { + "description": "Test with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": { + "name": "test1" + } + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": { + "name": "test1" + } + } + } + } + ] + } + ] + }, + { + "description": "Test with document comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": { + "name": "test1" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": { + "name": "test1" + } + } + } + } + ] + } + ] + }, + { + "description": "Test with string comment", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": "comment" + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": "comment" + } + } + } + ] + } + ] } ] } diff --git a/test/crud/unified/aggregate.json b/test/crud/unified/aggregate.json index dcdad761e8..f6da8ff32f 100644 --- a/test/crud/unified/aggregate.json +++ b/test/crud/unified/aggregate.json @@ -161,6 +161,286 @@ ] } ] + }, + { + "description": "aggregate with a string comment", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": "comment" + }, + "object": "collection0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": "comment" + } + } + } + ] + } + ] + }, + { + "description": "aggregate with a document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": { + "content": "test" + } + }, + "object": "collection0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": { + "content": "test" + } + } + } + } + ] + } + ] + }, + { + "description": "aggregate with a document comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": { + "content": "test" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": { + "content": "test" + } + }, + "commandName": "aggregate", + "databaseName": "aggregate-tests" + } + } + ] + } + ] + }, + { + "description": "aggregate with comment does not set comment on getMore", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "batchSize": 2, + "comment": "comment" + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "cursor": { + "batchSize": 2 + }, + "comment": "comment" + }, + "commandName": "aggregate", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "$$exists": false + } + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "$$exists": false + } + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + } + ] + } + ] } ] } diff --git a/test/crud/unified/bulkWrite-comment.json b/test/crud/unified/bulkWrite-comment.json new file mode 100644 index 0000000000..fac9644543 --- /dev/null +++ b/test/crud/unified/bulkWrite-comment.json @@ -0,0 +1,494 @@ +{ + "description": "bulkWrite-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "BulkWrite_comment" + } + } + ], + "initialData": [ + { + "collectionName": "BulkWrite_comment", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 5, + "x": "inserted" + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": "replaced" + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": "updated" + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + } + ], + "comment": "comment" + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 5 + } + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "BulkWrite_comment", + "documents": [ + { + "_id": 5, + "x": "inserted" + } + ], + "ordered": true, + "comment": "comment" + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "BulkWrite_comment", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "x": "replaced" + } + }, + { + "q": { + "_id": 2 + }, + "u": { + "$set": { + "x": "updated" + } + } + } + ], + "ordered": true, + "comment": "comment" + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_comment", + "deletes": [ + { + "q": { + "_id": 3 + }, + "limit": 1 + } + ], + "ordered": true, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_comment", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": "replaced" + }, + { + "_id": 2, + "x": "updated" + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": "inserted" + } + ] + } + ] + }, + { + "description": "BulkWrite with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 5, + "x": "inserted" + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": "replaced" + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": "updated" + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + } + ], + "comment": { + "key": "value" + } + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 5 + } + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "BulkWrite_comment", + "documents": [ + { + "_id": 5, + "x": "inserted" + } + ], + "ordered": true, + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "BulkWrite_comment", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "x": "replaced" + } + }, + { + "q": { + "_id": 2 + }, + "u": { + "$set": { + "x": "updated" + } + } + } + ], + "ordered": true, + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_comment", + "deletes": [ + { + "q": { + "_id": 3 + }, + "limit": 1 + } + ], + "ordered": true, + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_comment", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": "replaced" + }, + { + "_id": 2, + "x": "updated" + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": "inserted" + } + ] + } + ] + }, + { + "description": "BulkWrite with comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 5, + "x": "inserted" + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": "replaced" + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": "updated" + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + } + ], + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "BulkWrite_comment", + "documents": [ + { + "_id": 5, + "x": "inserted" + } + ], + "ordered": true, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_comment", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-comment.json b/test/crud/unified/deleteMany-comment.json new file mode 100644 index 0000000000..ea6a8524d9 --- /dev/null +++ b/test/crud/unified/deleteMany-comment.json @@ -0,0 +1,244 @@ +{ + "description": "deleteMany-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name2" + }, + { + "_id": 3, + "name": "name3" + } + ] + } + ], + "tests": [ + { + "description": "deleteMany with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "comment": "comment" + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 0 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "deleteMany with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "comment": { + "key": "value" + } + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 0 + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "deleteMany with comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 0 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name2" + }, + { + "_id": 3, + "name": "name3" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-comment.json b/test/crud/unified/deleteOne-comment.json new file mode 100644 index 0000000000..37f356ec6f --- /dev/null +++ b/test/crud/unified/deleteOne-comment.json @@ -0,0 +1,242 @@ +{ + "description": "deleteOne-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ], + "tests": [ + { + "description": "deleteOne with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + }, + { + "description": "deleteOne with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + }, + { + "description": "deleteOne with comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find-comment.json b/test/crud/unified/find-comment.json new file mode 100644 index 0000000000..6000bb0172 --- /dev/null +++ b/test/crud/unified/find-comment.json @@ -0,0 +1,298 @@ +{ + "description": "find-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "tests": [ + { + "description": "find with string comment", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectResult": [ + { + "_id": 1 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "comment": "comment" + } + } + } + ] + } + ] + }, + { + "description": "find with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + } + } + } + ] + } + ] + }, + { + "description": "find with document comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99", + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + } + } + } + ] + } + ] + }, + { + "description": "find with comment does not set comment on getMore", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2, + "comment": "comment" + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2, + "comment": "comment" + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-comment.json b/test/crud/unified/findOneAndDelete-comment.json new file mode 100644 index 0000000000..6853b9cc2d --- /dev/null +++ b/test/crud/unified/findOneAndDelete-comment.json @@ -0,0 +1,211 @@ +{ + "description": "findOneAndDelete-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndDelete with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "remove": true, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndDelete with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "remove": true, + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndDelete with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "remove": true, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-comment.json b/test/crud/unified/findOneAndReplace-comment.json new file mode 100644 index 0000000000..f817bb6937 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-comment.json @@ -0,0 +1,234 @@ +{ + "description": "findOneAndReplace-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndReplace with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 5 + }, + "comment": "comment" + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "x": 5 + }, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 5 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndReplace with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 5 + }, + "comment": { + "key": "value" + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "x": 5 + }, + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 5 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndReplace with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 5 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "x": 5 + }, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-comment.json b/test/crud/unified/findOneAndUpdate-comment.json new file mode 100644 index 0000000000..6dec5b39ee --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-comment.json @@ -0,0 +1,228 @@ +{ + "description": "findOneAndUpdate-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": "comment" + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": "comment" + } + } + } + ] + } + ] + }, + { + "description": "findOneAndUpdate with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": { + "key": "value" + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ] + }, + { + "description": "findOneAndUpdate with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/insertMany-comment.json b/test/crud/unified/insertMany-comment.json new file mode 100644 index 0000000000..7e835e8011 --- /dev/null +++ b/test/crud/unified/insertMany-comment.json @@ -0,0 +1,225 @@ +{ + "description": "insertMany-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "insertMany with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "insertMany with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "insertMany with comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/insertOne-comment.json b/test/crud/unified/insertOne-comment.json new file mode 100644 index 0000000000..a9f735ab6c --- /dev/null +++ b/test/crud/unified/insertOne-comment.json @@ -0,0 +1,219 @@ +{ + "description": "insertOne-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "insertOne with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2, + "x": 22 + }, + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "insertOne with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2, + "x": 22 + }, + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "insertOne with comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2, + "x": 22 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-comment.json b/test/crud/unified/replaceOne-comment.json new file mode 100644 index 0000000000..02fe90a44d --- /dev/null +++ b/test/crud/unified/replaceOne-comment.json @@ -0,0 +1,229 @@ +{ + "description": "replaceOne-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 22 + }, + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "x": 22 + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "ReplaceOne with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 22 + }, + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "x": 22 + } + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "ReplaceOne with comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 22 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "x": 22 + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-comment.json b/test/crud/unified/updateMany-comment.json new file mode 100644 index 0000000000..26abd92ed4 --- /dev/null +++ b/test/crud/unified/updateMany-comment.json @@ -0,0 +1,244 @@ +{ + "description": "updateMany-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": true + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateMany with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": true + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateMany with comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": true + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-comment.json b/test/crud/unified/updateOne-comment.json new file mode 100644 index 0000000000..9b3b71d395 --- /dev/null +++ b/test/crud/unified/updateOne-comment.json @@ -0,0 +1,241 @@ +{ + "description": "updateOne-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + } + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne with comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/test_comment.py b/test/test_comment.py new file mode 100644 index 0000000000..1c0e741621 --- /dev/null +++ b/test/test_comment.py @@ -0,0 +1,183 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the keyword argument 'comment' in various helpers.""" + +import inspect +import sys +from collections import defaultdict +from typing import Any, Union + +sys.path[0:0] = [""] + +from test import IntegrationTest, SkipTest, client_context, unittest +from test.utils import EventListener, rs_or_single_client + +from bson.dbref import DBRef +from pymongo.collection import Collection +from pymongo.command_cursor import CommandCursor +from pymongo.database import Database +from pymongo.mongo_client import MongoClient +from pymongo.operations import IndexModel +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.write_concern import WriteConcern + + +class Empty(object): + def __getattr__(self, item): + try: + self.__dict__[item] + except KeyError: + return self.empty + + def empty(self, *args, **kwargs): + return Empty() + + +class TestComment(IntegrationTest): + def _test_ops(self, helpers, already_supported, listener, db=Empty(), coll=Empty()): + results = listener.results + for h, args in helpers: + c = "testing comment with " + h.__name__ + with self.subTest("collection-" + h.__name__ + "-comment"): + for cc in [c, {"key": c}, ["any", 1]]: + results.clear() + kwargs = {"comment": cc} + if h == coll.rename: + tmp = db.get_collection("temp_temp_temp").drop() + destruct_coll = db.get_collection("test_temp") + destruct_coll.insert_one({}) + maybe_cursor = destruct_coll.rename(*args, **kwargs) + destruct_coll.drop() + elif h == db.validate_collection: + coll = db.get_collection("test") + coll.insert_one({}) + maybe_cursor = db.validate_collection(*args, **kwargs) + else: + coll.create_index("a") + maybe_cursor = h(*args, **kwargs) + self.assertIn( + "comment", + inspect.signature(h).parameters, + msg="Could not find 'comment' in the " + "signature of function %s" % (h.__name__), + ) + self.assertEqual( + inspect.signature(h).parameters["comment"].annotation, Union[Any, None] + ) + if isinstance(maybe_cursor, CommandCursor): + maybe_cursor.close() + tested = False + # For some reason collection.list_indexes creates two commands and the first + # one doesn't contain 'comment'. + for i in results["started"]: + if cc == i.command.get("comment", ""): + self.assertEqual(cc, i.command["comment"]) + tested = True + self.assertTrue(tested) + if h not in [coll.aggregate_raw_batches]: + self.assertIn( + "`comment` (optional):", + h.__doc__, + ) + if h not in already_supported: + self.assertIn( + "Added ``comment`` parameter", + h.__doc__, + ) + else: + self.assertNotIn( + "Added ``comment`` parameter", + h.__doc__, + ) + + results.clear() + + @client_context.require_version_min(4, 7, -1) + @client_context.require_replica_set + def test_database_helpers(self): + listener = EventListener() + db = rs_or_single_client(event_listeners=[listener]).db + helpers = [ + (db.watch, []), + (db.command, ["hello"]), + (db.list_collections, []), + (db.list_collection_names, []), + (db.drop_collection, ["hello"]), + (db.validate_collection, ["test"]), + (db.dereference, [DBRef("collection", 1)]), + ] + already_supported = [db.command, db.list_collections, db.list_collection_names] + self._test_ops(helpers, already_supported, listener, db=db, coll=db.get_collection("test")) + + @client_context.require_version_min(4, 7, -1) + @client_context.require_replica_set + def test_client_helpers(self): + listener = EventListener() + cli = rs_or_single_client(event_listeners=[listener]) + helpers = [ + (cli.watch, []), + (cli.list_databases, []), + (cli.list_database_names, []), + (cli.drop_database, ["test"]), + ] + already_supported = [ + cli.list_databases, + ] + self._test_ops(helpers, already_supported, listener) + + @client_context.require_version_min(4, 7, -1) + def test_collection_helpers(self): + listener = EventListener() + db = rs_or_single_client(event_listeners=[listener])[self.db.name] + coll = db.get_collection("test") + + helpers = [ + (coll.list_indexes, []), + (coll.drop, []), + (coll.index_information, []), + (coll.options, []), + (coll.aggregate, [[{"$set": {"x": 1}}]]), + (coll.aggregate_raw_batches, [[{"$set": {"x": 1}}]]), + (coll.rename, ["temp_temp_temp"]), + (coll.distinct, ["_id"]), + (coll.find_one_and_delete, [{}]), + (coll.find_one_and_replace, [{}, {}]), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}]), + (coll.estimated_document_count, []), + (coll.count_documents, [{}]), + (coll.create_indexes, [[IndexModel("a")]]), + (coll.create_index, ["a"]), + (coll.drop_index, [[("a", 1)]]), + (coll.drop_indexes, []), + ] + already_supported = [ + coll.estimated_document_count, + coll.count_documents, + coll.create_indexes, + coll.drop_indexes, + coll.options, + coll.find_one_and_replace, + coll.drop_index, + coll.rename, + coll.distinct, + coll.find_one_and_delete, + coll.find_one_and_update, + ] + self._test_ops(helpers, already_supported, listener, coll=coll, db=db) + + +if __name__ == "__main__": + unittest.main() From cbc7cc33e54756fa797ea94883815c10ae788002 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 10 Feb 2022 18:54:46 -0600 Subject: [PATCH 0068/1588] PYTHON-3073 Copy the unit tests from pymongo-stubs into pymongo (#859) --- .github/workflows/test-python.yml | 3 +- test/mypy_fails/insert_many_dict.py | 6 ++ test/mypy_fails/insert_one_list.py | 6 ++ test/test_bson.py | 10 +++ test/test_mypy.py | 125 ++++++++++++++++++++++++++++ 5 files changed, 149 insertions(+), 1 deletion(-) create mode 100644 test/mypy_fails/insert_many_dict.py create mode 100644 test/mypy_fails/insert_one_list.py create mode 100644 test/test_mypy.py diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 651f863d89..4b5f762786 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -37,6 +37,7 @@ jobs: mongodb-version: 4.4 - name: Run tests run: | + pip install mypy python setup.py test mypytest: @@ -59,4 +60,4 @@ jobs: - name: Run mypy run: | mypy --install-types --non-interactive bson gridfs tools pymongo - mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index test + mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --exclude "test/mypy_fails/*.*" test diff --git a/test/mypy_fails/insert_many_dict.py b/test/mypy_fails/insert_many_dict.py new file mode 100644 index 0000000000..6e8acb67b4 --- /dev/null +++ b/test/mypy_fails/insert_many_dict.py @@ -0,0 +1,6 @@ +from pymongo import MongoClient + +client = MongoClient() +client.test.test.insert_many( + {"a": 1} +) # error: Dict entry 0 has incompatible type "str": "int"; expected "Mapping[str, Any]": "int" diff --git a/test/mypy_fails/insert_one_list.py b/test/mypy_fails/insert_one_list.py new file mode 100644 index 0000000000..7a26a3ff79 --- /dev/null +++ b/test/mypy_fails/insert_one_list.py @@ -0,0 +1,6 @@ +from pymongo import MongoClient + +client = MongoClient() +client.test.test.insert_one( + [{}] +) # error: Argument 1 to "insert_one" of "Collection" has incompatible type "List[Dict[, ]]"; expected "Mapping[str, Any]" diff --git a/test/test_bson.py b/test/test_bson.py index f8f587567d..46aa6e5d9a 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1117,6 +1117,16 @@ def test_int64_pickling(self): ) self.round_trip_pickle(i64, pickled_with_3) + def test_bson_encode_decode(self) -> None: + doc = {"_id": ObjectId()} + encoded = bson.encode(doc) + decoded = bson.decode(encoded) + encoded = bson.encode(decoded) + decoded = bson.decode(encoded) + # Documents returned from decode are mutable. + decoded["new_field"] = 1 + self.assertTrue(decoded["_id"].generation_time) + if __name__ == "__main__": unittest.main() diff --git a/test/test_mypy.py b/test/test_mypy.py new file mode 100644 index 0000000000..0f1498c64b --- /dev/null +++ b/test/test_mypy.py @@ -0,0 +1,125 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that each file in mypy_fails/ actually fails mypy, and test some +sample client code that uses PyMongo typings.""" + +import os +import sys +import unittest +from typing import Any, Dict, Iterable, List + +try: + from mypy import api +except ImportError: + api = None + +from bson.son import SON +from pymongo.collection import Collection +from pymongo.errors import ServerSelectionTimeoutError +from pymongo.mongo_client import MongoClient +from pymongo.operations import InsertOne + +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mypy_fails") + + +def get_tests() -> Iterable[str]: + for dirpath, _, filenames in os.walk(TEST_PATH): + for filename in filenames: + yield os.path.join(dirpath, filename) + + +class TestMypyFails(unittest.TestCase): + def ensure_mypy_fails(self, filename: str) -> None: + if api is None: + raise unittest.SkipTest("Mypy is not installed") + stdout, stderr, exit_status = api.run([filename]) + self.assertTrue(exit_status, msg=stdout) + + def test_mypy_failures(self) -> None: + for filename in get_tests(): + with self.subTest(filename=filename): + self.ensure_mypy_fails(filename) + + +class TestPymongo(unittest.TestCase): + client: MongoClient + coll: Collection + + @classmethod + def setUpClass(cls) -> None: + cls.client = MongoClient(serverSelectionTimeoutMS=250, directConnection=False) + cls.coll = cls.client.test.test + try: + cls.client.admin.command("ping") + except ServerSelectionTimeoutError as exc: + raise unittest.SkipTest(f"Could not connect to MongoDB: {exc}") + + @classmethod + def tearDownClass(cls) -> None: + cls.client.close() + + def test_insert_find(self) -> None: + doc = {"my": "doc"} + coll2 = self.client.test.test2 + result = self.coll.insert_one(doc) + self.assertEqual(result.inserted_id, doc["_id"]) + retreived = self.coll.find_one({"_id": doc["_id"]}) + if retreived: + # Documents returned from find are mutable. + retreived["new_field"] = 1 + result2 = coll2.insert_one(retreived) + self.assertEqual(result2.inserted_id, result.inserted_id) + + def test_cursor_iterable(self) -> None: + def to_list(iterable: Iterable[Dict[str, Any]]) -> List[Dict[str, Any]]: + return list(iterable) + + self.coll.insert_one({}) + cursor = self.coll.find() + docs = to_list(cursor) + self.assertTrue(docs) + + def test_bulk_write(self) -> None: + self.coll.insert_one({}) + requests = [InsertOne({})] + result = self.coll.bulk_write(requests) + self.assertTrue(result.acknowledged) + + def test_aggregate_pipeline(self) -> None: + coll3 = self.client.test.test3 + coll3.insert_many( + [ + {"x": 1, "tags": ["dog", "cat"]}, + {"x": 2, "tags": ["cat"]}, + {"x": 2, "tags": ["mouse", "cat", "dog"]}, + {"x": 3, "tags": []}, + ] + ) + + class mydict(Dict[str, Any]): + pass + + result = coll3.aggregate( + [ + mydict({"$unwind": "$tags"}), + {"$group": {"_id": "$tags", "count": {"$sum": 1}}}, + {"$sort": SON([("count", -1), ("_id", -1)])}, + ] + ) + self.assertTrue(len(list(result))) + + +if __name__ == "__main__": + unittest.main() From c47557bc63cd60d5f709da3ec6fbdaa9fb783c7e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 10 Feb 2022 18:59:26 -0600 Subject: [PATCH 0069/1588] PYTHON-3062 Make Regex generic (#860) --- bson/regex.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/bson/regex.py b/bson/regex.py index 317c65049f..3e98477198 100644 --- a/bson/regex.py +++ b/bson/regex.py @@ -16,7 +16,7 @@ """ import re -from typing import Any, Pattern, Type, Union +from typing import Any, Generic, Pattern, Type, TypeVar, Union from bson._helpers import _getstate_slots, _setstate_slots from bson.son import RE_TYPE @@ -40,7 +40,10 @@ def str_flags_to_int(str_flags: str) -> int: return flags -class Regex(object): +_T = TypeVar("_T", str, bytes) + + +class Regex(Generic[_T]): """BSON regular expression data.""" __slots__ = ("pattern", "flags") @@ -51,7 +54,7 @@ class Regex(object): _type_marker = 11 @classmethod - def from_native(cls: Type["Regex"], regex: Pattern[Any]) -> "Regex": + def from_native(cls: Type["Regex"], regex: "Pattern[_T]") -> "Regex[_T]": """Convert a Python regular expression into a ``Regex`` instance. Note that in Python 3, a regular expression compiled from a @@ -80,7 +83,7 @@ def from_native(cls: Type["Regex"], regex: Pattern[Any]) -> "Regex": return Regex(regex.pattern, regex.flags) - def __init__(self, pattern: Union[str, bytes], flags: Union[str, int] = 0) -> None: + def __init__(self, pattern: _T, flags: Union[str, int] = 0) -> None: """BSON regular expression data. This class is useful to store and retrieve regular expressions that are @@ -93,7 +96,7 @@ def __init__(self, pattern: Union[str, bytes], flags: Union[str, int] = 0) -> No """ if not isinstance(pattern, (str, bytes)): raise TypeError("pattern must be a string, not %s" % type(pattern)) - self.pattern = pattern + self.pattern: _T = pattern if isinstance(flags, str): self.flags = str_flags_to_int(flags) @@ -116,7 +119,7 @@ def __ne__(self, other: Any) -> bool: def __repr__(self): return "Regex(%r, %r)" % (self.pattern, self.flags) - def try_compile(self) -> Pattern[Any]: + def try_compile(self) -> "Pattern[_T]": """Compile this :class:`Regex` as a Python regular expression. .. warning:: From 0700a84432f07c7f1294e79b850b10da8accc017 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 11 Feb 2022 06:32:01 -0600 Subject: [PATCH 0070/1588] PYTHON-1834 Add shellcheck (#858) --- .evergreen/build-manylinux.sh | 2 +- .evergreen/build-windows.sh | 4 ++-- .evergreen/release.sh | 2 +- .evergreen/run-mod-wsgi-tests.sh | 2 +- .evergreen/run-mongodb-aws-test.sh | 2 +- .evergreen/run-tests.sh | 17 +++++++++-------- .pre-commit-config.yaml | 9 +++++++++ .readthedocs.yaml | 1 + doc/conf.py | 1 + doc/docs-requirements.txt | 1 + pymongo/collection.py | 2 +- 11 files changed, 28 insertions(+), 15 deletions(-) diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index 602a8e1e6c..a9a7238cb2 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -27,7 +27,7 @@ fi for image in "${images[@]}"; do docker pull $image - docker run --rm -v `pwd`:/src $image /src/.evergreen/build-manylinux-internal.sh + docker run --rm -v "`pwd`:/src" $image /src/.evergreen/build-manylinux-internal.sh done ls dist diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh index 97c7940769..3a33558cc9 100755 --- a/.evergreen/build-windows.sh +++ b/.evergreen/build-windows.sh @@ -9,8 +9,8 @@ mkdir -p validdist mv dist/* validdist || true for VERSION in 36 37 38 39 310; do - _pythons=(C:/Python/Python${VERSION}/python.exe \ - C:/Python/32/Python${VERSION}/python.exe) + _pythons=("C:/Python/Python${VERSION}/python.exe" \ + "C:/Python/32/Python${VERSION}/python.exe") for PYTHON in "${_pythons[@]}"; do rm -rf build $PYTHON setup.py bdist_wheel diff --git a/.evergreen/release.sh b/.evergreen/release.sh index 759786b934..1fdd459ad9 100755 --- a/.evergreen/release.sh +++ b/.evergreen/release.sh @@ -1,6 +1,6 @@ #!/bin/bash -ex -if [ $(uname -s) = "Darwin" ]; then +if [ "$(uname -s)" = "Darwin" ]; then .evergreen/build-mac.sh elif [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin .evergreen/build-windows.sh diff --git a/.evergreen/run-mod-wsgi-tests.sh b/.evergreen/run-mod-wsgi-tests.sh index 03d72e9701..9a167895f8 100644 --- a/.evergreen/run-mod-wsgi-tests.sh +++ b/.evergreen/run-mod-wsgi-tests.sh @@ -23,7 +23,7 @@ export PYTHONHOME=/opt/python/$PYTHON_VERSION cd .. $APACHE -k start -f ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${APACHE_CONFIG} -trap "$APACHE -k stop -f ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${APACHE_CONFIG}" EXIT HUP +trap '$APACHE -k stop -f ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${APACHE_CONFIG}' EXIT HUP set +e wget -t 1 -T 10 -O - "http://localhost:8080${PROJECT_DIRECTORY}" diff --git a/.evergreen/run-mongodb-aws-test.sh b/.evergreen/run-mongodb-aws-test.sh index e51c12d609..9a33507cc8 100755 --- a/.evergreen/run-mongodb-aws-test.sh +++ b/.evergreen/run-mongodb-aws-test.sh @@ -40,7 +40,7 @@ fi set -x # Workaround macOS python 3.9 incompatibility with system virtualenv. -if [ $(uname -s) = "Darwin" ]; then +if [ "$(uname -s)" = "Darwin" ]; then VIRTUALENV="/Library/Frameworks/Python.framework/Versions/3.9/bin/python3 -m virtualenv" else VIRTUALENV=$(command -v virtualenv) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 69550ec932..7b9d051bd7 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -67,9 +67,9 @@ fi if [ -z "$PYTHON_BINARY" ]; then # Use Python 3 from the server toolchain to test on ARM, POWER or zSeries if a # system python3 doesn't exist or exists but is older than 3.6. - if is_python_36 $(command -v python3); then + if is_python_36 "$(command -v python3)"; then PYTHON=$(command -v python3) - elif is_python_36 $(command -v /opt/mongodbtoolchain/v2/bin/python3); then + elif is_python_36 "$(command -v /opt/mongodbtoolchain/v2/bin/python3)"; then PYTHON=$(command -v /opt/mongodbtoolchain/v2/bin/python3) else echo "Cannot test without python3.6+ installed!" @@ -119,20 +119,21 @@ if [ -n "$TEST_ENCRYPTION" ]; then # Use the nocrypto build to avoid dependency issues with older windows/python versions. BASE=$(pwd)/libmongocrypt/nocrypto if [ -f "${BASE}/lib/libmongocrypt.so" ]; then - export PYMONGOCRYPT_LIB=${BASE}/lib/libmongocrypt.so + PYMONGOCRYPT_LIB=${BASE}/lib/libmongocrypt.so elif [ -f "${BASE}/lib/libmongocrypt.dylib" ]; then - export PYMONGOCRYPT_LIB=${BASE}/lib/libmongocrypt.dylib + PYMONGOCRYPT_LIB=${BASE}/lib/libmongocrypt.dylib elif [ -f "${BASE}/bin/mongocrypt.dll" ]; then PYMONGOCRYPT_LIB=${BASE}/bin/mongocrypt.dll # libmongocrypt's windows dll is not marked executable. chmod +x $PYMONGOCRYPT_LIB - export PYMONGOCRYPT_LIB=$(cygpath -m $PYMONGOCRYPT_LIB) + PYMONGOCRYPT_LIB=$(cygpath -m $PYMONGOCRYPT_LIB) elif [ -f "${BASE}/lib64/libmongocrypt.so" ]; then - export PYMONGOCRYPT_LIB=${BASE}/lib64/libmongocrypt.so + PYMONGOCRYPT_LIB=${BASE}/lib64/libmongocrypt.so else echo "Cannot find libmongocrypt shared object file" exit 1 fi + export PYMONGOCRYPT_LIB # TODO: Test with 'pip install pymongocrypt' git clone --branch master https://github.com/mongodb/libmongocrypt.git libmongocrypt_git @@ -175,7 +176,7 @@ $PYTHON -c 'import sys; print(sys.version)' # Only cover CPython. PyPy reports suspiciously low coverage. PYTHON_IMPL=$($PYTHON -c "import platform; print(platform.python_implementation())") COVERAGE_ARGS="" -if [ -n "$COVERAGE" -a $PYTHON_IMPL = "CPython" ]; then +if [ -n "$COVERAGE" ] && [ "$PYTHON_IMPL" = "CPython" ]; then if $PYTHON -m coverage --version; then echo "INFO: coverage is installed, running tests with coverage..." COVERAGE_ARGS="-m coverage run --branch" @@ -186,7 +187,7 @@ fi $PYTHON setup.py clean if [ -z "$GREEN_FRAMEWORK" ]; then - if [ -z "$C_EXTENSIONS" -a $PYTHON_IMPL = "CPython" ]; then + if [ -z "$C_EXTENSIONS" ] && [ "$PYTHON_IMPL" = "CPython" ]; then # Fail if the C extensions fail to build. # This always sets 0 for exit status, even if the build fails, due diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 39062bbdf5..b20ad7ae55 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,3 +29,12 @@ repos: - id: isort files: \.py$ args: [--profile=black] + +# We use the Python version instead of the original version which seems to require Docker +# https://github.com/koalaman/shellcheck-precommit +- repo: https://github.com/shellcheck-py/shellcheck-py + rev: v0.8.0.1 + hooks: + - id: shellcheck + name: shellcheck + args: ["--severity=warning"] diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 358e7502f3..e2956c122b 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -8,6 +8,7 @@ version: 2 # Build documentation in the doc/ directory with Sphinx sphinx: configuration: doc/conf.py + fail_on_warning: true # Set the version of Python and requirements required to build the docs. python: diff --git a/doc/conf.py b/doc/conf.py index 47debcf14c..3f74a11d60 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -21,6 +21,7 @@ "sphinx.ext.coverage", "sphinx.ext.todo", "sphinx.ext.intersphinx", + "sphinxcontrib.shellcheck", ] # Add any paths that contain templates here, relative to this directory. diff --git a/doc/docs-requirements.txt b/doc/docs-requirements.txt index ce5d1abf36..455a47d217 100644 --- a/doc/docs-requirements.txt +++ b/doc/docs-requirements.txt @@ -1,3 +1,4 @@ Sphinx~=4.2 sphinx_rtd_theme~=0.5 readthedocs-sphinx-search~=0.1 +sphinxcontrib-shellcheck~=1.1 diff --git a/pymongo/collection.py b/pymongo/collection.py index df8db3f106..a61c905d29 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2947,7 +2947,7 @@ def find_one_and_replace( as keyword arguments (for example maxTimeMS can be used with recent server versions). - + .. versionchanged:: 4.1 Added ``let`` parameter. .. versionchanged:: 3.11 Added the ``hint`` option. From 80314255d7fb10769a083fcbee3d613a7263bd92 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 11 Feb 2022 12:41:29 -0600 Subject: [PATCH 0071/1588] PYTHON-3092 Add Type Discovery Files (#863) --- MANIFEST.in | 3 +++ bson/py.typed | 2 ++ gridfs/py.typed | 2 ++ pymongo/py.typed | 2 ++ 4 files changed, 9 insertions(+) create mode 100644 bson/py.typed create mode 100644 gridfs/py.typed create mode 100644 pymongo/py.typed diff --git a/MANIFEST.in b/MANIFEST.in index d017d16ab0..726c631e89 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -12,3 +12,6 @@ include tools/README.rst recursive-include test *.pem recursive-include test *.py recursive-include bson *.h +include bson/py.typed +include gridfs/py.typed +include pymongo/py.typed diff --git a/bson/py.typed b/bson/py.typed new file mode 100644 index 0000000000..0f4057061a --- /dev/null +++ b/bson/py.typed @@ -0,0 +1,2 @@ +# PEP-561 Support File. +# "Package maintainers who wish to support type checking of their code MUST add a marker file named py.typed to their package supporting typing". diff --git a/gridfs/py.typed b/gridfs/py.typed new file mode 100644 index 0000000000..0f4057061a --- /dev/null +++ b/gridfs/py.typed @@ -0,0 +1,2 @@ +# PEP-561 Support File. +# "Package maintainers who wish to support type checking of their code MUST add a marker file named py.typed to their package supporting typing". diff --git a/pymongo/py.typed b/pymongo/py.typed new file mode 100644 index 0000000000..0f4057061a --- /dev/null +++ b/pymongo/py.typed @@ -0,0 +1,2 @@ +# PEP-561 Support File. +# "Package maintainers who wish to support type checking of their code MUST add a marker file named py.typed to their package supporting typing". From 405c11dc2ccc36edd0cdac69056a58c0dec66c43 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 11 Feb 2022 12:43:02 -0600 Subject: [PATCH 0072/1588] PYTHON-3109 Test against latest rapid releases (#862) --- .evergreen/config.yml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index d681815c12..d17054169f 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1258,6 +1258,33 @@ tasks: TOPOLOGY: "sharded_cluster" - func: "run tests" + - name: "test-rapid-standalone" + tags: ["rapid", "standalone"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "rapid" + TOPOLOGY: "server" + - func: "run tests" + + - name: "test-rapid-replica_set" + tags: ["rapid", "replica_set"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "rapid" + TOPOLOGY: "replica_set" + - func: "run tests" + + - name: "test-rapid-sharded_cluster" + tags: ["rapid", "sharded_cluster"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "rapid" + TOPOLOGY: "sharded_cluster" + - func: "run tests" + - name: "test-serverless" tags: ["serverless"] commands: @@ -2138,6 +2165,7 @@ buildvariants: auth-ssl: "*" display_name: "${platform} ${auth-ssl}" tasks: &all-server-versions + - ".rapid" - ".latest" - ".5.0" - ".4.4" From 341d489f38ad51620fab50bfc7c3f8c1227fefee Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 14 Feb 2022 11:26:14 -0800 Subject: [PATCH 0073/1588] PYTHON-3088 Update load balancer tests to support dedicated load balancer port (#866) --- .evergreen/config.yml | 6 ++++++ pymongo/pool.py | 7 ------- test/__init__.py | 4 ---- 3 files changed, 6 insertions(+), 11 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index d17054169f..2a65324300 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -292,6 +292,7 @@ functions: DISABLE_TEST_COMMANDS=${DISABLE_TEST_COMMANDS} \ ORCHESTRATION_FILE=${ORCHESTRATION_FILE} \ REQUIRE_API_VERSION=${REQUIRE_API_VERSION} \ + LOAD_BALANCER=${LOAD_BALANCER} \ bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh # run-orchestration generates expansion file with the MONGODB_URI for the cluster - command: expansions.update @@ -457,6 +458,7 @@ functions: fi if [ -n "${test_loadbalancer}" ]; then export TEST_LOADBALANCER=1 + export LOAD_BALANCER=1 export SINGLE_MONGOS_LB_URI="${SINGLE_MONGOS_LB_URI}" export MULTI_MONGOS_LB_URI="${MULTI_MONGOS_LB_URI}" fi @@ -1712,8 +1714,12 @@ tasks: commands: - func: "bootstrap mongo-orchestration" vars: + VERSION: "latest" TOPOLOGY: "sharded_cluster" + LOAD_BALANCER: true - func: "run load-balancer" + vars: + LOAD_BALANCER: true - func: "run tests" - name: "test-fips-standalone" diff --git a/pymongo/pool.py b/pymongo/pool.py index d616408ef8..61945e2d5b 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -248,9 +248,6 @@ def _set_keepalive_times(sock): # main thread, to avoid the deadlock. See PYTHON-607. "foo".encode("idna") -# Remove after PYTHON-2712 -_MOCK_SERVICE_ID = False - def _raise_connection_failure(address, error, msg_prefix=None): """Convert a socket.error to ConnectionFailure and raise it.""" @@ -633,10 +630,6 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): auth_ctx = None doc = self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable) - # PYTHON-2712 will remove this topologyVersion fallback logic. - if self.opts.load_balanced and _MOCK_SERVICE_ID: - process_id = doc.get("topologyVersion", {}).get("processId") - doc.setdefault("serviceId", process_id) if not self.opts.load_balanced: doc.pop("serviceId", None) hello = Hello(doc, awaitable=awaitable) diff --git a/test/__init__.py b/test/__init__.py index 32220cfff3..d75c011547 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -101,10 +101,6 @@ SINGLE_MONGOS_LB_URI = os.environ.get("SINGLE_MONGOS_LB_URI") MULTI_MONGOS_LB_URI = os.environ.get("MULTI_MONGOS_LB_URI") if TEST_LOADBALANCER: - # Remove after PYTHON-2712 - from pymongo import pool - - pool._MOCK_SERVICE_ID = True res = parse_uri(SINGLE_MONGOS_LB_URI or "") host, port = res["nodelist"][0] db_user = res["username"] or db_user From 2db512f5d509e52ed0bc9cde55acd028dc81e022 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 14 Feb 2022 16:14:36 -0600 Subject: [PATCH 0074/1588] PYTHON-3078 Remove Use of Unsupported NoReturn Type Class (#864) --- .evergreen/config.yml | 2 +- bson/__init__.py | 3 +-- bson/objectid.py | 4 ++-- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 2a65324300..8edc43df20 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1729,7 +1729,7 @@ tasks: vars: VERSION: "latest" TOPOLOGY: "server" - PYTHON_BINARY: "/opt/mongodbtoolchain/v3/bin/python3" + PYTHON_BINARY: "/opt/mongodbtoolchain/v2/bin/python3" - func: "run tests" # }}} - name: "coverage-report" diff --git a/bson/__init__.py b/bson/__init__.py index 9431909f9c..d9124d1b32 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -76,7 +76,6 @@ List, Mapping, MutableMapping, - NoReturn, Sequence, Tuple, Type, @@ -167,7 +166,7 @@ def get_data_and_view(data: Any) -> Tuple[Any, memoryview]: return view.tobytes(), view -def _raise_unknown_type(element_type: int, element_name: str) -> NoReturn: +def _raise_unknown_type(element_type: int, element_name: str) -> None: """Unknown type helper.""" raise InvalidBSON( "Detected unknown BSON type %r for fieldname '%s'. Are " diff --git a/bson/objectid.py b/bson/objectid.py index 9ad3ed60be..7413fd497b 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -24,7 +24,7 @@ import threading import time from random import SystemRandom -from typing import Any, NoReturn, Optional, Type, Union +from typing import Any, Optional, Type, Union from bson.errors import InvalidId from bson.tz_util import utc @@ -32,7 +32,7 @@ _MAX_COUNTER_VALUE = 0xFFFFFF -def _raise_invalid_id(oid: str) -> NoReturn: +def _raise_invalid_id(oid: str) -> None: raise InvalidId( "%r is not a valid ObjectId, it must be a 12-byte input" " or a 24-character hex string" % oid From 9482019a537bdf3493122f5c9ec8167df0f15e02 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 15 Feb 2022 15:40:36 -0800 Subject: [PATCH 0075/1588] Add resync-syncs workarounds for incomplete spec work (#873) --- .evergreen/resync-specs.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index 1d0742258b..bf20f23037 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -79,6 +79,7 @@ do ;; cmap|CMAP) cpjson connection-monitoring-and-pooling/tests cmap + rm $PYMONGO/test/cmap/wait-queue-fairness.json # PYTHON-1873 ;; command*monitoring) cpjson command-monitoring/tests command_monitoring @@ -127,6 +128,7 @@ do transactions|transactions-convenient-api) cpjson transactions/tests/ transactions cpjson transactions-convenient-api/tests/ transactions-convenient-api + rm $PYMONGO/test/transactions/legacy/errors-client.json # PYTHON-1894 ;; unified) cpjson unified-test-format/tests/ unified-test-format/ From 09f8aa9928e9763e790a7fd6e30bccb701efea9d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 15 Feb 2022 15:49:39 -0800 Subject: [PATCH 0076/1588] PYTHON-3072 Use _Address in more places (#871) --- pymongo/command_cursor.py | 8 ++++---- pymongo/mongo_client.py | 4 ++-- pymongo/uri_parser.py | 9 +++------ 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index d7a37766b2..2adc389baf 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -22,7 +22,7 @@ from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure from pymongo.message import _CursorAddress, _GetMore, _RawBatchGetMore from pymongo.response import PinnedResponse -from pymongo.typings import _DocumentType +from pymongo.typings import _Address, _DocumentType if TYPE_CHECKING: from pymongo.client_session import ClientSession @@ -38,7 +38,7 @@ def __init__( self, collection: "Collection[_DocumentType]", cursor_info: Mapping[str, Any], - address: Optional[Tuple[str, Optional[int]]], + address: Optional[_Address], batch_size: int = 0, max_await_time_ms: Optional[int] = None, session: Optional["ClientSession"] = None, @@ -254,7 +254,7 @@ def cursor_id(self) -> int: return self.__id @property - def address(self) -> Optional[Tuple[str, Optional[int]]]: + def address(self) -> Optional[_Address]: """The (host, port) of the server used, or None. .. versionadded:: 3.0 @@ -309,7 +309,7 @@ def __init__( self, collection: "Collection[_DocumentType]", cursor_info: Mapping[str, Any], - address: Optional[Tuple[str, Optional[int]]], + address: Optional[_Address], batch_size: int = 0, max_await_time_ms: Optional[int] = None, session: Optional["ClientSession"] = None, diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 6b0d55601f..e9fa932ff1 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -86,7 +86,7 @@ from pymongo.settings import TopologySettings from pymongo.topology import Topology, _ErrorContext from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription -from pymongo.typings import _CollationIn, _DocumentType, _Pipeline +from pymongo.typings import _Address, _CollationIn, _DocumentType, _Pipeline from pymongo.uri_parser import ( _check_options, _handle_option_deprecations, @@ -1061,7 +1061,7 @@ def is_mongos(self) -> bool: return self._server_property("server_type") == SERVER_TYPE.Mongos @property - def nodes(self) -> FrozenSet[Tuple[str, Optional[int]]]: + def nodes(self) -> FrozenSet[_Address]: """Set of all currently connected servers. .. warning:: When connected to a replica set the value of :attr:`nodes` diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 76c6e4d513..3417c4954e 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -40,6 +40,7 @@ ) from pymongo.errors import ConfigurationError, InvalidURI from pymongo.srv_resolver import _HAVE_DNSPYTHON, _SrvResolver +from pymongo.typings import _Address SCHEME = "mongodb://" SCHEME_LEN = len(SCHEME) @@ -114,9 +115,7 @@ def parse_ipv6_literal_host( return entity[1:i], entity[i + 2 :] -def parse_host( - entity: str, default_port: Optional[int] = DEFAULT_PORT -) -> Tuple[str, Optional[int]]: +def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Address: """Validates a host string Returns a 2-tuple of host followed by port where port is default_port @@ -363,9 +362,7 @@ def split_options( return options -def split_hosts( - hosts: str, default_port: Optional[int] = DEFAULT_PORT -) -> List[Tuple[str, Optional[int]]]: +def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> List[_Address]: """Takes a string of the form host1[:port],host2[:port]... and splits it into (host, port) tuples. If [:port] isn't present the default_port is used. From 7a8f6b344240a90c4922541c20b22571e42b4fb6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 16 Feb 2022 17:11:12 -0800 Subject: [PATCH 0077/1588] PYTHON-2147 Use verified peer cert chain in OCSP when available (#877) --- pymongo/ocsp_support.py | 10 ++++++++-- pymongo/pyopenssl_context.py | 4 +++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index 369055ea8d..56d18a29bf 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -275,12 +275,18 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): _LOGGER.debug("No peer cert?") return 0 cert = cert.to_cryptography() - chain = conn.get_peer_cert_chain() + # Use the verified chain when available (pyopenssl>=20.0). + if hasattr(conn, "get_verified_chain"): + chain = conn.get_verified_chain() + trusted_ca_certs = None + else: + chain = conn.get_peer_cert_chain() + trusted_ca_certs = user_data.trusted_ca_certs if not chain: _LOGGER.debug("No peer cert chain?") return 0 chain = [cer.to_cryptography() for cer in chain] - issuer = _get_issuer_cert(cert, chain, user_data.trusted_ca_certs) + issuer = _get_issuer_cert(cert, chain, trusted_ca_certs) must_staple = False # https://tools.ietf.org/html/rfc7633#section-4.2.3.1 ext = _get_extension(cert, _TLSFeature) diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index d42cafb084..9e4c5cab40 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -276,7 +276,9 @@ def load_verify_locations(self, cafile=None, capath=None): ssl.CERT_NONE. """ self._ctx.load_verify_locations(cafile, capath) - self._callback_data.trusted_ca_certs = _load_trusted_ca_certs(cafile) + # Manually load the CA certs when get_verified_chain is not available (pyopenssl<20). + if not hasattr(_SSL.Connection, "get_verified_chain"): + self._callback_data.trusted_ca_certs = _load_trusted_ca_certs(cafile) def _load_certifi(self): """Attempt to load CA certs from certifi.""" From 1a90e477cf0024c47d24e0c2cfe38442ed9ae222 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 16 Feb 2022 19:29:21 -0600 Subject: [PATCH 0078/1588] PYTHON-3131 Test Failure - test_mypy on macos + auth (#875) --- test/test_mypy.py | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/test/test_mypy.py b/test/test_mypy.py index 0f1498c64b..5b9746f723 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -25,10 +25,10 @@ except ImportError: api = None +from test import IntegrationTest + from bson.son import SON from pymongo.collection import Collection -from pymongo.errors import ServerSelectionTimeoutError -from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mypy_fails") @@ -53,22 +53,13 @@ def test_mypy_failures(self) -> None: self.ensure_mypy_fails(filename) -class TestPymongo(unittest.TestCase): - client: MongoClient +class TestPymongo(IntegrationTest): coll: Collection @classmethod - def setUpClass(cls) -> None: - cls.client = MongoClient(serverSelectionTimeoutMS=250, directConnection=False) + def setUpClass(cls): + super().setUpClass() cls.coll = cls.client.test.test - try: - cls.client.admin.command("ping") - except ServerSelectionTimeoutError as exc: - raise unittest.SkipTest(f"Could not connect to MongoDB: {exc}") - - @classmethod - def tearDownClass(cls) -> None: - cls.client.close() def test_insert_find(self) -> None: doc = {"my": "doc"} From a0fe7c03af08adde0c893071e1664b43570b9841 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 17 Feb 2022 06:44:08 -0600 Subject: [PATCH 0079/1588] PYTHON-3120 Set up flake8 linting (#868) --- .flake8 | 30 +++++++ .github/workflows/test-python.yml | 4 + .pre-commit-config.yaml | 25 +++++- bson/__init__.py | 20 ++--- bson/binary.py | 4 +- bson/codec_options.py | 6 +- bson/dbref.py | 4 +- bson/json_util.py | 12 ++- bson/objectid.py | 2 +- bson/raw_bson.py | 3 +- bson/tz_util.py | 2 +- doc/conf.py | 2 +- green_framework_test.py | 4 +- gridfs/__init__.py | 7 +- gridfs/grid_file.py | 17 ++-- pymongo/__init__.py | 19 ++-- pymongo/aggregation.py | 2 +- pymongo/auth.py | 16 ++-- pymongo/bulk.py | 5 +- pymongo/change_stream.py | 4 +- pymongo/client_session.py | 6 +- pymongo/collation.py | 2 +- pymongo/collection.py | 13 ++- pymongo/command_cursor.py | 2 +- pymongo/common.py | 39 ++++---- pymongo/compression_support.py | 2 +- pymongo/cursor.py | 13 ++- pymongo/daemon.py | 4 +- pymongo/database.py | 6 +- pymongo/encryption.py | 6 +- pymongo/encryption_options.py | 3 +- pymongo/errors.py | 2 +- pymongo/event_loggers.py | 88 ++++++++----------- pymongo/helpers.py | 10 +-- pymongo/message.py | 18 ++-- pymongo/mongo_client.py | 9 +- pymongo/monitoring.py | 6 +- pymongo/network.py | 8 +- pymongo/periodic_executor.py | 2 +- pymongo/pool.py | 18 ++-- pymongo/pyopenssl_context.py | 4 +- pymongo/read_preferences.py | 7 +- pymongo/results.py | 2 +- pymongo/server.py | 1 - pymongo/server_description.py | 2 +- pymongo/socket_checker.py | 2 +- pymongo/ssl_context.py | 4 +- pymongo/ssl_support.py | 4 +- pymongo/typings.py | 2 - pymongo/uri_parser.py | 45 ++++------ pymongo/write_concern.py | 2 +- setup.py | 15 ++-- test/__init__.py | 13 ++- test/atlas/test_connection.py | 4 +- test/mockupdb/test_cluster_time.py | 2 +- test/mockupdb/test_handshake.py | 4 +- test/mockupdb/test_list_indexes.py | 2 +- test/mockupdb/test_mixed_version_sharded.py | 2 +- .../mockupdb/test_mongos_command_read_mode.py | 3 +- .../test_network_disconnect_primary.py | 1 - test/mockupdb/test_slave_okay_single.py | 12 +-- test/mod_wsgi_test/mod_wsgi_test.wsgi | 4 +- test/ocsp/test_ocsp.py | 2 +- test/test_auth.py | 19 ++-- test/test_binary.py | 2 - test/test_bson.py | 64 ++++++-------- test/test_bson_corpus.py | 4 +- test/test_client.py | 25 ++---- test/test_client_context.py | 2 +- test/test_cmap.py | 2 - test/test_collection.py | 8 +- test/test_command_monitoring_legacy.py | 7 +- test/test_comment.py | 15 ++-- test/test_crud_v1.py | 2 +- test/test_cursor.py | 11 +-- test/test_custom_types.py | 4 +- test/test_data_lake.py | 4 +- test/test_database.py | 7 +- test/test_dbref.py | 2 +- test/test_discovery_and_monitoring.py | 2 +- test/test_dns.py | 2 +- test/test_encryption.py | 28 +++--- test/test_errors.py | 4 +- test/test_examples.py | 16 ++-- test/test_grid_file.py | 2 +- test/test_gridfs_bucket.py | 2 +- test/test_heartbeat_monitoring.py | 1 - test/test_json_util.py | 18 ++-- test/test_load_balancer.py | 2 +- test/test_max_staleness.py | 16 ++-- test/test_monitoring.py | 17 ++-- test/test_mypy.py | 1 - test/test_pooling.py | 2 +- test/test_saslprep.py | 2 +- test/test_sdam_monitoring_spec.py | 10 +-- test/test_server_description.py | 2 +- test/test_session.py | 2 +- test/test_son.py | 4 +- test/test_srv_polling.py | 4 +- test/test_ssl.py | 20 ++--- test/test_threads.py | 2 +- test/test_uri_parser.py | 46 ++++------ test/unified_format.py | 10 +-- tools/clean.py | 8 +- tools/fail_if_no_c.py | 4 +- 105 files changed, 454 insertions(+), 535 deletions(-) create mode 100644 .flake8 diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000000..e5bc58921a --- /dev/null +++ b/.flake8 @@ -0,0 +1,30 @@ +[flake8] +max-line-length = 100 +enable-extensions = G +extend-ignore = + G200, G202, + # black adds spaces around ':' + E203, + # E501 line too long (let black handle line length) + E501 + # B305 `.next()` is not a thing on Python 3 + B305 +per-file-ignores = + # E402 module level import not at top of file + pymongo/__init__.py: E402 + + # G004 Logging statement uses f-string + pymongo/event_loggers.py: G004 + + # E402 module level import not at top of file + # B011 Do not call assert False since python -O removes these calls + # F405 'Foo' may be undefined, or defined from star imports + # E741 ambiguous variable name + # B007 Loop control variable 'foo' not used within the loop body + # F403 'from foo import *' used; unable to detect undefined names + # B001 Do not use bare `except:` + # E722 do not use bare 'except' + # E731 do not assign a lambda expression, use a def + # F811 redefinition of unused 'foo' from line XXX + # F841 local variable 'foo' is assigned to but never used + test/*: E402, B011, F405, E741, B007, F403, B001, E722, E731, F811, F841 diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 4b5f762786..046915b04a 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -31,6 +31,8 @@ jobs: uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: 'setup.py' - name: Start MongoDB uses: supercharge/mongodb-github-action@1.7.0 with: @@ -53,6 +55,8 @@ jobs: uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: 'setup.py' - name: Install dependencies run: | python -m pip install -U pip mypy diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b20ad7ae55..5c1e92f5b7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.4.0 + rev: v4.1.0 hooks: - id: check-added-large-files - id: check-case-conflict @@ -24,17 +24,36 @@ repos: args: [--line-length=100] - repo: https://github.com/PyCQA/isort - rev: 5.7.0 + rev: 5.10.1 hooks: - id: isort files: \.py$ args: [--profile=black] +- repo: https://gitlab.com/pycqa/flake8 + rev: 3.9.2 + hooks: + - id: flake8 + additional_dependencies: [ + 'flake8-bugbear==20.1.4', + 'flake8-logging-format==0.6.0', + 'flake8-implicit-str-concat==0.2.0', + ] + # We use the Python version instead of the original version which seems to require Docker # https://github.com/koalaman/shellcheck-precommit - repo: https://github.com/shellcheck-py/shellcheck-py - rev: v0.8.0.1 + rev: v0.8.0.4 hooks: - id: shellcheck name: shellcheck args: ["--severity=warning"] + +- repo: https://github.com/sirosen/check-jsonschema + rev: 0.11.0 + hooks: + - id: check-jsonschema + name: "Check GitHub Workflows" + files: ^\.github/workflows/ + types: [yaml] + args: ["--schemafile", "https://json.schemastore.org/github-workflow"] diff --git a/bson/__init__.py b/bson/__init__.py index d9124d1b32..a287db1801 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -84,7 +84,7 @@ cast, ) -from bson.binary import ( +from bson.binary import ( # noqa: F401 ALL_UUID_SUBTYPES, CSHARP_LEGACY, JAVA_LEGACY, @@ -513,7 +513,7 @@ def _bson_to_dict(data: Any, opts: Any) -> Any: if _USE_C: - _bson_to_dict = _cbson._bson_to_dict + _bson_to_dict = _cbson._bson_to_dict # noqa: F811 _PACK_FLOAT = struct.Struct(" bytes: """Make a 'C' string, checking for embedded NUL characters.""" if isinstance(string, bytes): if b"\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") + raise InvalidDocument("BSON keys / regex patterns must not contain a NUL character") try: _utf_8_decode(string, None, True) return string + b"\x00" except UnicodeError: - raise InvalidStringData("strings in documents must be valid " "UTF-8: %r" % string) + raise InvalidStringData("strings in documents must be valid UTF-8: %r" % string) else: if "\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") + raise InvalidDocument("BSON keys / regex patterns must not contain a NUL character") return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" @@ -562,7 +562,7 @@ def _make_c_string(string: Union[str, bytes]) -> bytes: _utf_8_decode(string, None, True) return string + b"\x00" except UnicodeError: - raise InvalidStringData("strings in documents must be valid " "UTF-8: %r" % string) + raise InvalidStringData("strings in documents must be valid UTF-8: %r" % string) else: return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" @@ -571,7 +571,7 @@ def _make_name(string: str) -> bytes: """Make a 'C' string suitable for a BSON key.""" # Keys can only be text in python 3. if "\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") + raise InvalidDocument("BSON keys / regex patterns must not contain a NUL character") return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" @@ -846,7 +846,7 @@ def _name_value_to_bson( def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: Any) -> bytes: """Encode a single key, value pair.""" if not isinstance(key, str): - raise InvalidDocument("documents must have only string keys, " "key was %r" % (key,)) + raise InvalidDocument("documents must have only string keys, key was %r" % (key,)) if check_keys: if key.startswith("$"): raise InvalidDocument("key %r must not start with '$'" % (key,)) @@ -876,7 +876,7 @@ def _dict_to_bson(doc: Any, check_keys: bool, opts: Any, top_level: bool = True) if _USE_C: - _dict_to_bson = _cbson._dict_to_bson + _dict_to_bson = _cbson._dict_to_bson # noqa: F811 def _millis_to_datetime(millis: int, opts: Any) -> datetime.datetime: @@ -1032,7 +1032,7 @@ def decode_all( if _USE_C: - decode_all = _cbson.decode_all + decode_all = _cbson.decode_all # noqa: F811 def _decode_selective(rawdoc: Any, fields: Any, codec_options: Any) -> Mapping[Any, Any]: diff --git a/bson/binary.py b/bson/binary.py index e20bf87af3..93c43ee40c 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -260,7 +260,7 @@ def from_uuid( if uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError( - "uuid_representation must be a value " "from bson.binary.UuidRepresentation" + "uuid_representation must be a value from bson.binary.UuidRepresentation" ) if uuid_representation == UuidRepresentation.UNSPECIFIED: @@ -310,7 +310,7 @@ def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUI if uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError( - "uuid_representation must be a value from " "bson.binary.UuidRepresentation" + "uuid_representation must be a value from bson.binary.UuidRepresentation" ) if uuid_representation == UuidRepresentation.UNSPECIFIED: diff --git a/bson/codec_options.py b/bson/codec_options.py index b43a0275d8..8e5f97df30 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -23,12 +23,10 @@ Any, Callable, Dict, - Generic, Iterable, MutableMapping, Optional, Type, - TypeVar, Union, cast, ) @@ -312,10 +310,10 @@ def __new__( raise TypeError("tz_aware must be True or False") if uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError( - "uuid_representation must be a value " "from bson.binary.UuidRepresentation" + "uuid_representation must be a value from bson.binary.UuidRepresentation" ) if not isinstance(unicode_decode_error_handler, (str, None)): # type: ignore - raise ValueError("unicode_decode_error_handler must be a string " "or None") + raise ValueError("unicode_decode_error_handler must be a string or None") if tzinfo is not None: if not isinstance(tzinfo, datetime.tzinfo): raise TypeError("tzinfo must be an instance of datetime.tzinfo") diff --git a/bson/dbref.py b/bson/dbref.py index 773c95f59d..7849435f23 100644 --- a/bson/dbref.py +++ b/bson/dbref.py @@ -35,7 +35,7 @@ def __init__( collection: str, id: Any, database: Optional[str] = None, - _extra: Mapping[str, Any] = {}, + _extra: Optional[Mapping[str, Any]] = None, **kwargs: Any ) -> None: """Initialize a new :class:`DBRef`. @@ -63,7 +63,7 @@ def __init__( self.__collection = collection self.__id = id self.__database = database - kwargs.update(_extra) + kwargs.update(_extra or {}) self.__kwargs = kwargs @property diff --git a/bson/json_util.py b/bson/json_util.py index 3cdf701f70..99dbc62609 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -283,7 +283,7 @@ def __new__( self.json_mode = json_mode if self.json_mode == JSONMode.RELAXED: if strict_number_long: - raise ValueError("Cannot specify strict_number_long=True with" " JSONMode.RELAXED") + raise ValueError("Cannot specify strict_number_long=True with JSONMode.RELAXED") if datetime_representation not in (None, DatetimeRepresentation.ISO8601): raise ValueError( "datetime_representation must be DatetimeRepresentation." @@ -296,7 +296,7 @@ def __new__( self.strict_uuid = True elif self.json_mode == JSONMode.CANONICAL: if strict_number_long not in (None, True): - raise ValueError("Cannot specify strict_number_long=False with" " JSONMode.RELAXED") + raise ValueError("Cannot specify strict_number_long=False with JSONMode.RELAXED") if datetime_representation not in (None, DatetimeRepresentation.NUMBERLONG): raise ValueError( "datetime_representation must be DatetimeRepresentation." @@ -581,11 +581,9 @@ def _parse_canonical_binary(doc: Any, json_options: JSONOptions) -> Union[Binary if not isinstance(b64, str): raise TypeError("$binary base64 must be a string: %s" % (doc,)) if not isinstance(subtype, str) or len(subtype) > 2: - raise TypeError("$binary subType must be a string at most 2 " "characters: %s" % (doc,)) + raise TypeError("$binary subType must be a string at most 2 characters: %s" % (doc,)) if len(binary) != 2: - raise TypeError( - '$binary must include only "base64" and "subType" ' "components: %s" % (doc,) - ) + raise TypeError('$binary must include only "base64" and "subType" components: %s' % (doc,)) data = base64.b64decode(b64.encode()) return _binary_or_uuid(data, int(subtype, 16), json_options) @@ -686,7 +684,7 @@ def _parse_canonical_regex(doc: Any) -> Regex: opts = regex["options"] if not isinstance(opts, str): raise TypeError( - "Bad $regularExpression options, options must be " "string, was type %s" % (type(opts)) + "Bad $regularExpression options, options must be string, was type %s" % (type(opts)) ) return Regex(regex["pattern"], opts) diff --git a/bson/objectid.py b/bson/objectid.py index 7413fd497b..24d25d0377 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -204,7 +204,7 @@ def __validate(self, oid: Any) -> None: _raise_invalid_id(oid) else: raise TypeError( - "id must be an instance of (bytes, str, ObjectId), " "not %s" % (type(oid),) + "id must be an instance of (bytes, str, ObjectId), not %s" % (type(oid),) ) @property diff --git a/bson/raw_bson.py b/bson/raw_bson.py index c102b367a2..ca7207f0a2 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -51,8 +51,7 @@ overhead of decoding or encoding BSON. """ -from collections.abc import Mapping as _Mapping -from typing import Any, ItemsView, Iterator, Mapping, Optional, cast +from typing import Any, ItemsView, Iterator, Mapping, Optional from bson import _get_object_size, _raw_to_dict from bson.codec_options import _RAW_BSON_DOCUMENT_MARKER diff --git a/bson/tz_util.py b/bson/tz_util.py index 43ae52ccff..8106c77b40 100644 --- a/bson/tz_util.py +++ b/bson/tz_util.py @@ -15,7 +15,7 @@ """Timezone related utilities for BSON.""" from datetime import datetime, timedelta, tzinfo -from typing import Any, Optional, Tuple, Union +from typing import Optional, Tuple, Union ZERO: timedelta = timedelta(0) diff --git a/doc/conf.py b/doc/conf.py index 3f74a11d60..c2f97dabfe 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -9,7 +9,7 @@ sys.path[0:0] = [os.path.abspath("..")] -import pymongo +import pymongo # noqa # -- General configuration ----------------------------------------------------- diff --git a/green_framework_test.py b/green_framework_test.py index 610845a9f6..d638d9b014 100644 --- a/green_framework_test.py +++ b/green_framework_test.py @@ -59,7 +59,7 @@ def run(framework_name, *args): # Run the tests. sys.argv[:] = ["setup.py", "test"] + list(args) - import setup + import setup # noqa def main(): @@ -87,7 +87,7 @@ def main(): list_frameworks() sys.exit() else: - assert False, "unhandled option" + raise AssertionError("unhandled option") if not args: print(usage) diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 22b28af1a7..73425a9e53 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -35,11 +35,10 @@ ) from pymongo import ASCENDING, DESCENDING from pymongo.client_session import ClientSession -from pymongo.collation import Collation from pymongo.collection import Collection -from pymongo.common import UNAUTHORIZED_CODES, validate_string +from pymongo.common import validate_string from pymongo.database import Database -from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.errors import ConfigurationError from pymongo.read_preferences import _ServerMode from pymongo.write_concern import WriteConcern @@ -83,7 +82,7 @@ def __init__(self, database: Database, collection: str = "fs"): database = _clear_entity_type_registry(database) if not database.write_concern.acknowledged: - raise ConfigurationError("database must use " "acknowledged write_concern") + raise ConfigurationError("database must use acknowledged write_concern") self.__collection = database[collection] self.__files = self.__collection.files diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 93a97158ae..b290fc68b0 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -17,7 +17,7 @@ import io import math import os -from typing import Any, Iterable, List, Mapping, Optional, cast +from typing import Any, Iterable, List, Mapping, Optional from bson.binary import Binary from bson.int64 import Int64 @@ -172,10 +172,10 @@ def __init__( :attr:`~pymongo.collection.Collection.write_concern` """ if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an " "instance of Collection") + raise TypeError("root_collection must be an instance of Collection") if not root_collection.write_concern.acknowledged: - raise ConfigurationError("root_collection must use " "acknowledged write_concern") + raise ConfigurationError("root_collection must use acknowledged write_concern") _disallow_transactions(session) # Handle alternative naming @@ -240,7 +240,7 @@ def closed(self) -> bool: "uploadDate", "Date that this file was uploaded.", closed_only=True ) md5: Optional[str] = _grid_in_property( - "md5", "MD5 of the contents of this file " "if an md5 sum was created.", closed_only=True + "md5", "MD5 of the contents of this file if an md5 sum was created.", closed_only=True ) _buffer: io.BytesIO @@ -356,7 +356,7 @@ def write(self, data: Any) -> None: try: data = data.encode(self.encoding) except AttributeError: - raise TypeError("must specify an encoding for file in " "order to write str") + raise TypeError("must specify an encoding for file in order to write str") read = io.BytesIO(data).read if self._buffer.tell() > 0: @@ -365,7 +365,7 @@ def write(self, data: Any) -> None: if space: try: to_write = read(space) - except: + except BaseException: self.abort() raise self._buffer.write(to_write) @@ -447,7 +447,7 @@ def __init__( from the server. Metadata is fetched when first needed. """ if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an " "instance of Collection") + raise TypeError("root_collection must be an instance of Collection") _disallow_transactions(session) root_collection = _clear_entity_type_registry(root_collection) @@ -477,7 +477,7 @@ def __init__( "metadata", "Metadata attached to this file." ) md5: Optional[str] = _grid_out_property( - "md5", "MD5 of the contents of this file " "if an md5 sum was created." + "md5", "MD5 of the contents of this file if an md5 sum was created." ) _file: Any @@ -886,7 +886,6 @@ def __init__( def next(self) -> GridOut: """Get next GridOut object from cursor.""" _disallow_transactions(self.session) - # Work around "super is not iterable" issue in Python 3.x next_file = super(GridOutCursor, self).next() return GridOut(self.__root_collection, file_document=next_file, session=self.session) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index f8baa91971..9581068036 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -69,11 +69,14 @@ def get_version_string() -> str: """Current version of PyMongo.""" -from pymongo.collection import ReturnDocument -from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION -from pymongo.cursor import CursorType -from pymongo.mongo_client import MongoClient -from pymongo.operations import ( +from pymongo.collection import ReturnDocument # noqa: F401 +from pymongo.common import ( # noqa: F401 + MAX_SUPPORTED_WIRE_VERSION, + MIN_SUPPORTED_WIRE_VERSION, +) +from pymongo.cursor import CursorType # noqa: F401 +from pymongo.mongo_client import MongoClient # noqa: F401 +from pymongo.operations import ( # noqa: F401 DeleteMany, DeleteOne, IndexModel, @@ -82,14 +85,14 @@ def get_version_string() -> str: UpdateMany, UpdateOne, ) -from pymongo.read_preferences import ReadPreference -from pymongo.write_concern import WriteConcern +from pymongo.read_preferences import ReadPreference # noqa: F401 +from pymongo.write_concern import WriteConcern # noqa: F401 def has_c() -> bool: """Is the C extension installed?""" try: - from pymongo import _cmessage # type: ignore[attr-defined] + from pymongo import _cmessage # type: ignore[attr-defined] # noqa: F401 return True except ImportError: diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 51be0dfa81..e190fefc56 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -43,7 +43,7 @@ def __init__( ): if "explain" in options: raise ConfigurationError( - "The explain option is not supported. " "Use Database.command instead." + "The explain option is not supported. Use Database.command instead." ) self._target = target diff --git a/pymongo/auth.py b/pymongo/auth.py index 0a4e7e7324..3d259335b0 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -121,7 +121,7 @@ def _build_credentials_tuple(mech, source, user, passwd, extra, database): if passwd is not None: raise ConfigurationError("Passwords are not supported by MONGODB-X509") if source is not None and source != "$external": - raise ValueError("authentication source must be " "$external or None for MONGODB-X509") + raise ValueError("authentication source must be $external or None for MONGODB-X509") # Source is always $external, user can be None. return MongoCredential(mech, "$external", user, None, None, None) elif mech == "MONGODB-AWS": @@ -129,7 +129,7 @@ def _build_credentials_tuple(mech, source, user, passwd, extra, database): raise ConfigurationError("username without a password is not supported by MONGODB-AWS") if source is not None and source != "$external": raise ConfigurationError( - "authentication source must be " "$external or None for MONGODB-AWS" + "authentication source must be $external or None for MONGODB-AWS" ) properties = extra.get("authmechanismproperties", {}) @@ -302,7 +302,7 @@ def _authenticate_gssapi(credentials, sock_info): """Authenticate using GSSAPI.""" if not HAVE_KERBEROS: raise ConfigurationError( - 'The "kerberos" module must be ' "installed to use GSSAPI authentication." + 'The "kerberos" module must be installed to use GSSAPI authentication.' ) try: @@ -351,7 +351,7 @@ def _authenticate_gssapi(credentials, sock_info): # 0 == continue, 1 == complete, -1 == error # Only authGSSClientStep can return 0. if kerberos.authGSSClientStep(ctx, "") != 0: - raise OperationFailure("Unknown kerberos " "failure in step function.") + raise OperationFailure("Unknown kerberos failure in step function.") # Start a SASL conversation with mongod/s # Note: pykerberos deals with base64 encoded byte strings. @@ -372,7 +372,7 @@ def _authenticate_gssapi(credentials, sock_info): for _ in range(10): result = kerberos.authGSSClientStep(ctx, str(response["payload"])) if result == -1: - raise OperationFailure("Unknown kerberos " "failure in step function.") + raise OperationFailure("Unknown kerberos failure in step function.") payload = kerberos.authGSSClientResponse(ctx) or "" @@ -388,15 +388,15 @@ def _authenticate_gssapi(credentials, sock_info): if result == kerberos.AUTH_GSS_COMPLETE: break else: - raise OperationFailure("Kerberos " "authentication failed to complete.") + raise OperationFailure("Kerberos authentication failed to complete.") # Once the security context is established actually authenticate. # See RFC 4752, Section 3.1, last two paragraphs. if kerberos.authGSSClientUnwrap(ctx, str(response["payload"])) != 1: - raise OperationFailure("Unknown kerberos " "failure during GSS_Unwrap step.") + raise OperationFailure("Unknown kerberos failure during GSS_Unwrap step.") if kerberos.authGSSClientWrap(ctx, kerberos.authGSSClientResponse(ctx), username) != 1: - raise OperationFailure("Unknown kerberos " "failure during GSS_Wrap step.") + raise OperationFailure("Unknown kerberos failure during GSS_Wrap step.") payload = kerberos.authGSSClientResponse(ctx) cmd = SON( diff --git a/pymongo/bulk.py b/pymongo/bulk.py index fae55a5c10..c736bd7d6f 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -26,7 +26,6 @@ from pymongo.collation import validate_collation_or_none from pymongo.common import ( validate_is_document_type, - validate_is_mapping, validate_ok_for_replace, validate_ok_for_update, ) @@ -476,7 +475,7 @@ def execute_no_results(self, sock_info, generator, write_concern): # Cannot have both unacknowledged writes and bypass document validation. if self.bypass_doc_val: raise OperationFailure( - "Cannot set bypass_document_validation with" " unacknowledged write concern" + "Cannot set bypass_document_validation with unacknowledged write concern" ) if self.ordered: @@ -488,7 +487,7 @@ def execute(self, write_concern, session): if not self.ops: raise InvalidOperation("No operations to execute") if self.executed: - raise InvalidOperation("Bulk operations can " "only be executed once.") + raise InvalidOperation("Bulk operations can only be executed once.") self.executed = True write_concern = write_concern or self.collection.write_concern session = _validate_session_write_concern(session, write_concern) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 50f6f72b73..d054046bda 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -15,7 +15,7 @@ """Watch changes on a collection, a database, or the entire cluster.""" import copy -from typing import TYPE_CHECKING, Any, Dict, Generic, Iterator, Mapping, Optional, Union +from typing import TYPE_CHECKING, Any, Dict, Generic, Mapping, Optional, Union from bson import _bson_to_dict from bson.raw_bson import RawBSONDocument @@ -363,7 +363,7 @@ def try_next(self) -> Optional[_DocumentType]: except KeyError: self.close() raise InvalidOperation( - "Cannot provide resume functionality when the resume " "token is missing." + "Cannot provide resume functionality when the resume token is missing." ) # If this is the last change document from the current batch, cache the diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 44381c0241..4cf41b2c70 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -192,7 +192,7 @@ def __init__( ) -> None: if snapshot: if causal_consistency: - raise ConfigurationError("snapshot reads do not support " "causal_consistency=True") + raise ConfigurationError("snapshot reads do not support causal_consistency=True") causal_consistency = False elif causal_consistency is None: causal_consistency = True @@ -717,7 +717,7 @@ def start_transaction( self._check_ended() if self.options.snapshot: - raise InvalidOperation("Transactions are not supported in " "snapshot sessions") + raise InvalidOperation("Transactions are not supported in snapshot sessions") if self.in_transaction: raise InvalidOperation("Transaction already in progress") @@ -885,7 +885,7 @@ def advance_operation_time(self, operation_time: Timestamp) -> None: another `ClientSession` instance. """ if not isinstance(operation_time, Timestamp): - raise TypeError("operation_time must be an instance " "of bson.timestamp.Timestamp") + raise TypeError("operation_time must be an instance of bson.timestamp.Timestamp") self._advance_operation_time(operation_time) def _process_response(self, reply): diff --git a/pymongo/collation.py b/pymongo/collation.py index aef480b932..5bc73c07c8 100644 --- a/pymongo/collation.py +++ b/pymongo/collation.py @@ -221,4 +221,4 @@ def validate_collation_or_none( return value.document if isinstance(value, dict): return value - raise TypeError("collation must be a dict, an instance of collation.Collation, " "or None.") + raise TypeError("collation must be a dict, an instance of collation.Collation, or None.") diff --git a/pymongo/collection.py b/pymongo/collection.py index a61c905d29..8de1fbeeaa 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -29,7 +29,6 @@ Union, ) -from bson.code import Code from bson.codec_options import CodecOptions from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument @@ -204,11 +203,11 @@ def __init__( if not name or ".." in name: raise InvalidName("collection names cannot be empty") if "$" in name and not (name.startswith("oplog.$main") or name.startswith("$cmd")): - raise InvalidName("collection names must not " "contain '$': %r" % name) + raise InvalidName("collection names must not contain '$': %r" % name) if name[0] == "." or name[-1] == ".": - raise InvalidName("collection names must not start " "or end with '.': %r" % name) + raise InvalidName("collection names must not start or end with '.': %r" % name) if "\x00" in name: - raise InvalidName("collection names must not contain the " "null character") + raise InvalidName("collection names must not contain the null character") collation = validate_collation_or_none(kwargs.pop("collation", None)) self.__database: Database[_DocumentType] = database @@ -1873,7 +1872,7 @@ def gen_indexes(): for index in indexes: if not isinstance(index, IndexModel): raise TypeError( - "%r is not an instance of " "pymongo.operations.IndexModel" % (index,) + "%r is not an instance of pymongo.operations.IndexModel" % (index,) ) document = index.document names.append(document["name"]) @@ -2725,7 +2724,7 @@ def __find_and_modify( common.validate_is_mapping("filter", filter) if not isinstance(return_document, bool): raise ValueError( - "return_document must be " "ReturnDocument.BEFORE or ReturnDocument.AFTER" + "return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER" ) collation = validate_collation_or_none(kwargs.pop("collation", None)) cmd = SON([("findAndModify", self.__name), ("query", filter), ("new", return_document)]) @@ -2751,7 +2750,7 @@ def _find_and_modify(session, sock_info, retryable_write): if array_filters is not None: if not acknowledged: raise ConfigurationError( - "arrayFilters is unsupported for unacknowledged " "writes." + "arrayFilters is unsupported for unacknowledged writes." ) cmd["arrayFilters"] = list(array_filters) if hint is not None: diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index 2adc389baf..d10e23f957 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -15,7 +15,7 @@ """CommandCursor class to iterate over command results.""" from collections import deque -from typing import TYPE_CHECKING, Any, Generic, Iterator, Mapping, Optional, Tuple +from typing import TYPE_CHECKING, Any, Generic, Iterator, Mapping, Optional from bson import _convert_raw_document_lists_to_streams from pymongo.cursor import _CURSOR_CLOSED_ERRORS, _SocketManager diff --git a/pymongo/common.py b/pymongo/common.py index 769b277cf3..5255468b5a 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -30,7 +30,6 @@ Tuple, Type, Union, - cast, ) from urllib.parse import unquote_plus @@ -180,7 +179,7 @@ def validate_boolean_or_string(option: str, value: Any) -> bool: """Validates that value is True, False, 'true', or 'false'.""" if isinstance(value, str): if value not in ("true", "false"): - raise ValueError("The value of %s must be " "'true' or 'false'" % (option,)) + raise ValueError("The value of %s must be 'true' or 'false'" % (option,)) return value == "true" return validate_boolean(option, value) @@ -193,7 +192,7 @@ def validate_integer(option: str, value: Any) -> int: try: return int(value) except ValueError: - raise ValueError("The value of %s must be " "an integer" % (option,)) + raise ValueError("The value of %s must be an integer" % (option,)) raise TypeError("Wrong type for %s, value must be an integer" % (option,)) @@ -201,7 +200,7 @@ def validate_positive_integer(option: str, value: Any) -> int: """Validate that 'value' is a positive integer, which does not include 0.""" val = validate_integer(option, value) if val <= 0: - raise ValueError("The value of %s must be " "a positive integer" % (option,)) + raise ValueError("The value of %s must be a positive integer" % (option,)) return val @@ -209,7 +208,7 @@ def validate_non_negative_integer(option: str, value: Any) -> int: """Validate that 'value' is a positive integer or 0.""" val = validate_integer(option, value) if val < 0: - raise ValueError("The value of %s must be " "a non negative integer" % (option,)) + raise ValueError("The value of %s must be a non negative integer" % (option,)) return val @@ -242,7 +241,7 @@ def validate_string(option: str, value: Any) -> str: """Validates that 'value' is an instance of `str`.""" if isinstance(value, str): return value - raise TypeError("Wrong type for %s, value must be an instance of " "str" % (option,)) + raise TypeError("Wrong type for %s, value must be an instance of str" % (option,)) def validate_string_or_none(option: str, value: Any) -> Optional[str]: @@ -261,7 +260,7 @@ def validate_int_or_basestring(option: str, value: Any) -> Union[int, str]: return int(value) except ValueError: return value - raise TypeError("Wrong type for %s, value must be an " "integer or a string" % (option,)) + raise TypeError("Wrong type for %s, value must be an integer or a string" % (option,)) def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[int, str]: @@ -275,7 +274,7 @@ def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[in return value return validate_non_negative_integer(option, val) raise TypeError( - "Wrong type for %s, value must be an " "non negative integer or a string" % (option,) + "Wrong type for %s, value must be an non negative integer or a string" % (option,) ) @@ -294,7 +293,7 @@ def validate_positive_float(option: str, value: Any) -> float: # float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at # one billion - this is a reasonable approximation for infinity if not 0 < value < 1e9: - raise ValueError("%s must be greater than 0 and " "less than one billion" % (option,)) + raise ValueError("%s must be greater than 0 and less than one billion" % (option,)) return value @@ -402,7 +401,7 @@ def validate_read_preference_tags(name: str, value: Any) -> List[Dict[str, str]] tags[unquote_plus(key)] = unquote_plus(val) tag_sets.append(tags) except Exception: - raise ValueError("%r not a valid " "value for %s" % (tag_set, name)) + raise ValueError("%r not a valid value for %s" % (tag_set, name)) return tag_sets @@ -735,7 +734,7 @@ def validate_auth_option(option: str, value: Any) -> Tuple[str, Any]: """Validate optional authentication parameters.""" lower, value = validate(option, value) if lower not in _AUTH_OPTIONS: - raise ConfigurationError("Unknown " "authentication option: %s" % (option,)) + raise ConfigurationError("Unknown authentication option: %s" % (option,)) return option, value @@ -762,12 +761,12 @@ def get_validated_options( validated_options: MutableMapping[str, Any] if isinstance(options, _CaseInsensitiveDictionary): validated_options = _CaseInsensitiveDictionary() - get_normed_key = lambda x: x - get_setter_key = lambda x: options.cased_key(x) + get_normed_key = lambda x: x # noqa: E731 + get_setter_key = lambda x: options.cased_key(x) # noqa: E731 else: validated_options = {} - get_normed_key = lambda x: x.lower() - get_setter_key = lambda x: x + get_normed_key = lambda x: x.lower() # noqa: E731 + get_setter_key = lambda x: x # noqa: E731 for opt, value in options.items(): normed_key = get_normed_key(opt) @@ -804,9 +803,7 @@ def __init__( ) -> None: if not isinstance(codec_options, CodecOptions): - raise TypeError( - "codec_options must be an instance of " "bson.codec_options.CodecOptions" - ) + raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") self.__codec_options = codec_options if not isinstance(read_preference, _ServerMode): @@ -819,14 +816,12 @@ def __init__( if not isinstance(write_concern, WriteConcern): raise TypeError( - "write_concern must be an instance of " "pymongo.write_concern.WriteConcern" + "write_concern must be an instance of pymongo.write_concern.WriteConcern" ) self.__write_concern = write_concern if not isinstance(read_concern, ReadConcern): - raise TypeError( - "read_concern must be an instance of " "pymongo.read_concern.ReadConcern" - ) + raise TypeError("read_concern must be an instance of pymongo.read_concern.ReadConcern") self.__read_concern = read_concern @property diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index 72cc232867..ed7021494f 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -82,7 +82,7 @@ def validate_compressors(dummy, value): def validate_zlib_compression_level(option, value): try: level = int(value) - except: + except Exception: raise TypeError("%s must be an integer, not %r." % (option, value)) if level < -1 or level > 9: raise ValueError("%s must be between -1 and 9, not %d." % (option, level)) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index be4b998d31..02f1905df3 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -25,7 +25,6 @@ Iterable, List, Mapping, - MutableMapping, Optional, Sequence, Tuple, @@ -277,7 +276,7 @@ def __init__( # Exhaust cursor support if cursor_type == CursorType.EXHAUST: if self.__collection.database.client.is_mongos: - raise InvalidOperation("Exhaust cursors are " "not supported by mongos") + raise InvalidOperation("Exhaust cursors are not supported by mongos") if limit: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = True @@ -509,7 +508,7 @@ def add_option(self, mask: int) -> "Cursor[_DocumentType]": if self.__limit: raise InvalidOperation("Can't use limit and exhaust together.") if self.__collection.database.client.is_mongos: - raise InvalidOperation("Exhaust cursors are " "not supported by mongos") + raise InvalidOperation("Exhaust cursors are not supported by mongos") self.__exhaust = True self.__query_flags |= mask @@ -730,14 +729,14 @@ def __getitem__(self, index): skip = 0 if index.start is not None: if index.start < 0: - raise IndexError("Cursor instances do not support " "negative indices") + raise IndexError("Cursor instances do not support negative indices") skip = index.start if index.stop is not None: limit = index.stop - skip if limit < 0: raise IndexError( - "stop index must be greater than start " "index for slice %r" % index + "stop index must be greater than start index for slice %r" % index ) if limit == 0: self.__empty = True @@ -750,7 +749,7 @@ def __getitem__(self, index): if isinstance(index, int): if index < 0: - raise IndexError("Cursor instances do not support negative " "indices") + raise IndexError("Cursor instances do not support negative indices") clone = self.clone() clone.skip(index + self.__skip) clone.limit(-1) # use a hard limit @@ -758,7 +757,7 @@ def __getitem__(self, index): for doc in clone: return doc raise IndexError("no such item for Cursor instance") - raise TypeError("index %r cannot be applied to Cursor " "instances" % index) + raise TypeError("index %r cannot be applied to Cursor instances" % index) def max_scan(self, max_scan: Optional[int]) -> "Cursor[_DocumentType]": """**DEPRECATED** - Limit the number of documents to scan when diff --git a/pymongo/daemon.py b/pymongo/daemon.py index 53141751ac..4fdf147a59 100644 --- a/pymongo/daemon.py +++ b/pymongo/daemon.py @@ -70,7 +70,7 @@ def _spawn_daemon(args): _silence_resource_warning(popen) except FileNotFoundError as exc: warnings.warn( - f"Failed to start {args[0]}: is it on your $PATH?\n" f"Original exception: {exc}", + f"Failed to start {args[0]}: is it on your $PATH?\nOriginal exception: {exc}", RuntimeWarning, stacklevel=2, ) @@ -96,7 +96,7 @@ def _spawn(args): ) except FileNotFoundError as exc: warnings.warn( - f"Failed to start {args[0]}: is it on your $PATH?\n" f"Original exception: {exc}", + f"Failed to start {args[0]}: is it on your $PATH?\nOriginal exception: {exc}", RuntimeWarning, stacklevel=2, ) diff --git a/pymongo/database.py b/pymongo/database.py index e6633ed230..f92dbc8aed 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -47,7 +47,7 @@ def _check_name(name): for invalid_char in [" ", ".", "$", "/", "\\", "\x00", '"']: if invalid_char in name: - raise InvalidName("database names cannot contain the " "character %r" % invalid_char) + raise InvalidName("database names cannot contain the character %r" % invalid_char) if TYPE_CHECKING: @@ -966,7 +966,7 @@ def validate_collection( name = name.name if not isinstance(name, str): - raise TypeError("name_or_collection must be an instance of str or " "Collection") + raise TypeError("name_or_collection must be an instance of str or Collection") cmd = SON([("validate", name), ("scandata", scandata), ("full", full)]) if comment is not None: cmd["comment"] = comment @@ -988,7 +988,7 @@ def validate_collection( if "result" in res: info = res["result"] if info.find("exception") != -1 or info.find("corrupt") != -1: - raise CollectionInvalid("%s invalid: " "%s" % (name, info)) + raise CollectionInvalid("%s invalid: %s" % (name, info)) elif not res.get("valid", False): valid = False break diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 4a6653f959..9616ac89cd 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -21,7 +21,7 @@ try: from pymongocrypt.auto_encrypter import AutoEncrypter - from pymongocrypt.errors import MongoCryptError + from pymongocrypt.errors import MongoCryptError # noqa: F401 from pymongocrypt.explicit_encrypter import ExplicitEncrypter from pymongocrypt.mongocrypt import MongoCryptOptions from pymongocrypt.state_machine import MongoCryptCallback @@ -440,9 +440,7 @@ def __init__( ) if not isinstance(codec_options, CodecOptions): - raise TypeError( - "codec_options must be an instance of " "bson.codec_options.CodecOptions" - ) + raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index c206b4c8b5..2ac12bc4b4 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -14,11 +14,10 @@ """Support for automatic client-side field level encryption.""" -import copy from typing import TYPE_CHECKING, Any, List, Mapping, Optional try: - import pymongocrypt + import pymongocrypt # noqa: F401 _HAVE_PYMONGOCRYPT = True except ImportError: diff --git a/pymongo/errors.py b/pymongo/errors.py index a98a5a7fb8..4a167383ca 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -15,7 +15,7 @@ """Exceptions raised by PyMongo.""" from typing import Any, Iterable, List, Mapping, Optional, Sequence, Tuple, Union -from bson.errors import * +from bson.errors import InvalidDocument try: # CPython 3.7+ diff --git a/pymongo/event_loggers.py b/pymongo/event_loggers.py index 0b92d9fa2b..248dfb17bd 100644 --- a/pymongo/event_loggers.py +++ b/pymongo/event_loggers.py @@ -43,25 +43,25 @@ class CommandLogger(monitoring.CommandListener): def started(self, event: monitoring.CommandStartedEvent) -> None: logging.info( - "Command {0.command_name} with request id " - "{0.request_id} started on server " - "{0.connection_id}".format(event) + f"Command {event.command_name} with request id " + f"{event.request_id} started on server " + f"{event.connection_id}" ) def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: logging.info( - "Command {0.command_name} with request id " - "{0.request_id} on server {0.connection_id} " - "succeeded in {0.duration_micros} " - "microseconds".format(event) + f"Command {event.command_name} with request id " + f"{event.request_id} on server {event.connection_id} " + f"succeeded in {event.duration_micros} " + "microseconds" ) def failed(self, event: monitoring.CommandFailedEvent) -> None: logging.info( - "Command {0.command_name} with request id " - "{0.request_id} on server {0.connection_id} " - "failed in {0.duration_micros} " - "microseconds".format(event) + f"Command {event.command_name} with request id " + f"{event.request_id} on server {event.connection_id} " + f"failed in {event.duration_micros} " + "microseconds" ) @@ -77,7 +77,7 @@ class ServerLogger(monitoring.ServerListener): """ def opened(self, event: monitoring.ServerOpeningEvent) -> None: - logging.info("Server {0.server_address} added to topology " "{0.topology_id}".format(event)) + logging.info(f"Server {event.server_address} added to topology {event.topology_id}") def description_changed(self, event: monitoring.ServerDescriptionChangedEvent) -> None: previous_server_type = event.previous_description.server_type @@ -85,15 +85,13 @@ def description_changed(self, event: monitoring.ServerDescriptionChangedEvent) - if new_server_type != previous_server_type: # server_type_name was added in PyMongo 3.4 logging.info( - "Server {0.server_address} changed type from " - "{0.previous_description.server_type_name} to " - "{0.new_description.server_type_name}".format(event) + f"Server {event.server_address} changed type from " + f"{event.previous_description.server_type_name} to " + f"{event.new_description.server_type_name}" ) def closed(self, event: monitoring.ServerClosedEvent) -> None: - logging.warning( - "Server {0.server_address} removed from topology " "{0.topology_id}".format(event) - ) + logging.warning(f"Server {event.server_address} removed from topology {event.topology_id}") class HeartbeatLogger(monitoring.ServerHeartbeatListener): @@ -108,19 +106,19 @@ class HeartbeatLogger(monitoring.ServerHeartbeatListener): """ def started(self, event: monitoring.ServerHeartbeatStartedEvent) -> None: - logging.info("Heartbeat sent to server " "{0.connection_id}".format(event)) + logging.info(f"Heartbeat sent to server {event.connection_id}") def succeeded(self, event: monitoring.ServerHeartbeatSucceededEvent) -> None: # The reply.document attribute was added in PyMongo 3.4. logging.info( - "Heartbeat to server {0.connection_id} " + f"Heartbeat to server {event.connection_id} " "succeeded with reply " - "{0.reply.document}".format(event) + f"{event.reply.document}" ) def failed(self, event: monitoring.ServerHeartbeatFailedEvent) -> None: logging.warning( - "Heartbeat to server {0.connection_id} " "failed with error {0.reply}".format(event) + f"Heartbeat to server {event.connection_id} failed with error {event.reply}" ) @@ -136,20 +134,18 @@ class TopologyLogger(monitoring.TopologyListener): """ def opened(self, event: monitoring.TopologyOpenedEvent) -> None: - logging.info("Topology with id {0.topology_id} " "opened".format(event)) + logging.info(f"Topology with id {event.topology_id} opened") def description_changed(self, event: monitoring.TopologyDescriptionChangedEvent) -> None: - logging.info( - "Topology description updated for " "topology id {0.topology_id}".format(event) - ) + logging.info(f"Topology description updated for topology id {event.topology_id}") previous_topology_type = event.previous_description.topology_type new_topology_type = event.new_description.topology_type if new_topology_type != previous_topology_type: # topology_type_name was added in PyMongo 3.4 logging.info( - "Topology {0.topology_id} changed type from " - "{0.previous_description.topology_type_name} to " - "{0.new_description.topology_type_name}".format(event) + f"Topology {event.topology_id} changed type from " + f"{event.previous_description.topology_type_name} to " + f"{event.new_description.topology_type_name}" ) # The has_writable_server and has_readable_server methods # were added in PyMongo 3.4. @@ -159,7 +155,7 @@ def description_changed(self, event: monitoring.TopologyDescriptionChangedEvent) logging.warning("No readable servers available.") def closed(self, event: monitoring.TopologyClosedEvent) -> None: - logging.info("Topology with id {0.topology_id} " "closed".format(event)) + logging.info(f"Topology with id {event.topology_id} closed") class ConnectionPoolLogger(monitoring.ConnectionPoolListener): @@ -181,53 +177,45 @@ class ConnectionPoolLogger(monitoring.ConnectionPoolListener): """ def pool_created(self, event: monitoring.PoolCreatedEvent) -> None: - logging.info("[pool {0.address}] pool created".format(event)) + logging.info(f"[pool {event.address}] pool created") def pool_ready(self, event): - logging.info("[pool {0.address}] pool ready".format(event)) + logging.info(f"[pool {event.address}] pool ready") def pool_cleared(self, event: monitoring.PoolClearedEvent) -> None: - logging.info("[pool {0.address}] pool cleared".format(event)) + logging.info(f"[pool {event.address}] pool cleared") def pool_closed(self, event: monitoring.PoolClosedEvent) -> None: - logging.info("[pool {0.address}] pool closed".format(event)) + logging.info(f"[pool {event.address}] pool closed") def connection_created(self, event: monitoring.ConnectionCreatedEvent) -> None: - logging.info( - "[pool {0.address}][conn #{0.connection_id}] " "connection created".format(event) - ) + logging.info(f"[pool {event.address}][conn #{event.connection_id}] connection created") def connection_ready(self, event: monitoring.ConnectionReadyEvent) -> None: logging.info( - "[pool {0.address}][conn #{0.connection_id}] " - "connection setup succeeded".format(event) + f"[pool {event.address}][conn #{event.connection_id}] connection setup succeeded" ) def connection_closed(self, event: monitoring.ConnectionClosedEvent) -> None: logging.info( - "[pool {0.address}][conn #{0.connection_id}] " - "connection closed, reason: " - "{0.reason}".format(event) + f"[pool {event.address}][conn #{event.connection_id}] " + f'connection closed, reason: "{event.reason}"' ) def connection_check_out_started( self, event: monitoring.ConnectionCheckOutStartedEvent ) -> None: - logging.info("[pool {0.address}] connection check out " "started".format(event)) + logging.info(f"[pool {event.address}] connection check out started") def connection_check_out_failed(self, event: monitoring.ConnectionCheckOutFailedEvent) -> None: - logging.info( - "[pool {0.address}] connection check out " "failed, reason: {0.reason}".format(event) - ) + logging.info(f"[pool {event.address}] connection check out failed, reason: {event.reason}") def connection_checked_out(self, event: monitoring.ConnectionCheckedOutEvent) -> None: logging.info( - "[pool {0.address}][conn #{0.connection_id}] " - "connection checked out of pool".format(event) + f"[pool {event.address}][conn #{event.connection_id}] connection checked out of pool" ) def connection_checked_in(self, event: monitoring.ConnectionCheckedInEvent) -> None: logging.info( - "[pool {0.address}][conn #{0.connection_id}] " - "connection checked into pool".format(event) + f"[pool {event.address}][conn #{event.connection_id}] connection checked into pool" ) diff --git a/pymongo/helpers.py b/pymongo/helpers.py index f12c1e1655..8311aafa8f 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -86,9 +86,7 @@ def _index_list(key_or_list, direction=None): if isinstance(key_or_list, abc.ItemsView): return list(key_or_list) elif not isinstance(key_or_list, (list, tuple)): - raise TypeError( - "if no direction is specified, " "key_or_list must be an instance of list" - ) + raise TypeError("if no direction is specified, key_or_list must be an instance of list") return key_or_list @@ -104,7 +102,7 @@ def _index_document(index_list): "mean %r?" % list(index_list.items()) ) elif not isinstance(index_list, (list, tuple)): - raise TypeError("must use a list of (key, direction) pairs, " "not: " + repr(index_list)) + raise TypeError("must use a list of (key, direction) pairs, not: " + repr(index_list)) if not len(index_list): raise ValueError("key_or_list must not be the empty list") @@ -237,11 +235,11 @@ def _fields_list_to_dict(fields, option_name): if isinstance(fields, (abc.Sequence, abc.Set)): if not all(isinstance(field, str) for field in fields): raise TypeError( - "%s must be a list of key names, each an " "instance of str" % (option_name,) + "%s must be a list of key names, each an instance of str" % (option_name,) ) return dict.fromkeys(fields, 1) - raise TypeError("%s must be a mapping or " "list of key names" % (option_name,)) + raise TypeError("%s must be a mapping or list of key names" % (option_name,)) def _handle_exception(): diff --git a/pymongo/message.py b/pymongo/message.py index 18cf0a6bf3..92d59c3ebd 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -638,7 +638,7 @@ def _op_msg_uncompressed(flags, command, identifier, docs, opts): if _use_c: - _op_msg_uncompressed = _cmessage._op_msg + _op_msg_uncompressed = _cmessage._op_msg # noqa: F811 def _op_msg(flags, command, dbname, read_preference, opts, ctx=None): @@ -712,7 +712,7 @@ def _query_uncompressed( if _use_c: - _query_uncompressed = _cmessage._query_message + _query_uncompressed = _cmessage._query_message # noqa: F811 def _query( @@ -754,7 +754,7 @@ def _get_more_uncompressed(collection_name, num_to_return, cursor_id): if _use_c: - _get_more_uncompressed = _cmessage._get_more_message + _get_more_uncompressed = _cmessage._get_more_message # noqa: F811 def _get_more(collection_name, num_to_return, cursor_id, ctx=None): @@ -1085,7 +1085,7 @@ def _encode_batched_op_msg(operation, command, docs, ack, opts, ctx): if _use_c: - _encode_batched_op_msg = _cmessage._encode_batched_op_msg + _encode_batched_op_msg = _cmessage._encode_batched_op_msg # noqa: F811 def _batched_op_msg_compressed(operation, command, docs, ack, opts, ctx): @@ -1120,7 +1120,7 @@ def _batched_op_msg(operation, command, docs, ack, opts, ctx): if _use_c: - _batched_op_msg = _cmessage._batched_op_msg + _batched_op_msg = _cmessage._batched_op_msg # noqa: F811 def _do_batched_op_msg(namespace, operation, command, docs, opts, ctx): @@ -1149,7 +1149,7 @@ def _encode_batched_write_command(namespace, operation, command, docs, opts, ctx if _use_c: - _encode_batched_write_command = _cmessage._encode_batched_write_command + _encode_batched_write_command = _cmessage._encode_batched_write_command # noqa: F811 def _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, buf): @@ -1348,7 +1348,7 @@ def __init__(self, flags, payload_document): self.flags = flags self.payload_document = payload_document - def raw_response(self, cursor_id=None, user_fields={}): + def raw_response(self, cursor_id=None, user_fields={}): # noqa: B006 """ cursor_id is ignored user_fields is used to determine which fields must not be decoded @@ -1395,12 +1395,12 @@ def unpack(cls, msg): flags, first_payload_type, first_payload_size = cls.UNPACK_FROM(msg) if flags != 0: if flags & cls.CHECKSUM_PRESENT: - raise ProtocolError("Unsupported OP_MSG flag checksumPresent: " "0x%x" % (flags,)) + raise ProtocolError("Unsupported OP_MSG flag checksumPresent: 0x%x" % (flags,)) if flags ^ cls.MORE_TO_COME: raise ProtocolError("Unsupported OP_MSG flags: 0x%x" % (flags,)) if first_payload_type != 0: - raise ProtocolError("Unsupported OP_MSG payload type: " "0x%x" % (first_payload_type,)) + raise ProtocolError("Unsupported OP_MSG payload type: 0x%x" % (first_payload_type,)) if len(msg) != first_payload_size + 5: raise ProtocolError("Unsupported OP_MSG reply: >1 section") diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index e9fa932ff1..9414d71962 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -52,7 +52,6 @@ cast, ) -import bson from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry from bson.son import SON from bson.timestamp import Timestamp @@ -687,7 +686,7 @@ def __init__( srv_service_name = keyword_opts.get("srvservicename") srv_max_hosts = keyword_opts.get("srvmaxhosts") if len([h for h in host if "/" in h]) > 1: - raise ConfigurationError("host must not contain multiple MongoDB " "URIs") + raise ConfigurationError("host must not contain multiple MongoDB URIs") for entity in host: # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' # it must be a URI, @@ -1165,7 +1164,7 @@ def _get_socket(self, server, session): and sock_info.max_wire_version < 8 ): raise ConfigurationError( - "Auto-encryption requires a minimum MongoDB version " "of 4.2" + "Auto-encryption requires a minimum MongoDB version of 4.2" ) yield sock_info @@ -1229,7 +1228,7 @@ def _socket_from_server(self, read_preference, server, session): def _socket_for_reads(self, read_preference, session): assert read_preference is not None, "read_preference must not be None" - topology = self._get_topology() + _ = self._get_topology() server = self._select_server(read_preference, session) return self._socket_from_server(read_preference, server, session) @@ -1814,7 +1813,7 @@ def drop_database( name = name.name if not isinstance(name, str): - raise TypeError("name_or_database must be an instance " "of str or a Database") + raise TypeError("name_or_database must be an instance of str or a Database") with self._socket_for_writes(session) as sock_info: self[name]._command( diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 6a3ed6d07e..4798542dc7 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -633,7 +633,7 @@ def __init__( super(CommandStartedEvent, self).__init__( command_name, request_id, connection_id, operation_id, service_id=service_id ) - cmd_name, cmd_doc = command_name.lower(), command[command_name] + cmd_name = command_name.lower() if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, command): self.__cmd: Mapping[str, Any] = {} else: @@ -651,7 +651,7 @@ def database_name(self) -> str: return self.__db def __repr__(self): - return ("<%s %s db: %r, command: %r, operation_id: %s, " "service_id: %s>") % ( + return ("<%s %s db: %r, command: %r, operation_id: %s, service_id: %s>") % ( self.__class__.__name__, self.connection_id, self.database_name, @@ -708,7 +708,7 @@ def reply(self) -> _DocumentOut: return self.__reply def __repr__(self): - return ("<%s %s command: %r, operation_id: %s, duration_micros: %s, " "service_id: %s>") % ( + return ("<%s %s command: %r, operation_id: %s, duration_micros: %s, service_id: %s>") % ( self.__class__.__name__, self.connection_id, self.command_name, diff --git a/pymongo/network.py b/pymongo/network.py index db952af731..01dca0b835 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -210,10 +210,10 @@ def receive_message(sock_info, request_id, max_message_size=MAX_MESSAGE_SIZE): # No request_id for exhaust cursor "getMore". if request_id is not None: if request_id != response_to: - raise ProtocolError("Got response id %r but expected " "%r" % (response_to, request_id)) + raise ProtocolError("Got response id %r but expected %r" % (response_to, request_id)) if length <= 16: raise ProtocolError( - "Message length (%r) not longer than standard " "message header size (16)" % (length,) + "Message length (%r) not longer than standard message header size (16)" % (length,) ) if length > max_message_size: raise ProtocolError( @@ -231,7 +231,7 @@ def receive_message(sock_info, request_id, max_message_size=MAX_MESSAGE_SIZE): try: unpack_reply = _UNPACK_REPLY[op_code] except KeyError: - raise ProtocolError("Got opcode %r but expected " "%r" % (op_code, _UNPACK_REPLY.keys())) + raise ProtocolError("Got opcode %r but expected %r" % (op_code, _UNPACK_REPLY.keys())) return unpack_reply(data) @@ -272,7 +272,7 @@ def _receive_data_on_socket(sock_info, length, deadline): try: wait_for_read(sock_info, deadline) chunk_length = sock_info.sock.recv_into(mv[bytes_read:]) - except (IOError, OSError) as exc: + except (IOError, OSError) as exc: # noqa: B014 if _errno_from_exception(exc) == errno.EINTR: continue raise diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index 5bb08ec23f..2c3727a7a3 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -124,7 +124,7 @@ def _run(self): if not self._target(): self._stopped = True break - except: + except BaseException: with self._lock: self._stopped = True self._thread_will_exit = True diff --git a/pymongo/pool.py b/pymongo/pool.py index 61945e2d5b..09709ffbf4 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -72,7 +72,7 @@ def is_ip_address(address): try: ipaddress.ip_address(address) return True - except (ValueError, UnicodeError): + except (ValueError, UnicodeError): # noqa: B014 return False @@ -857,9 +857,7 @@ def validate_session(self, client, session): """ if session: if session._client is not client: - raise InvalidOperation( - "Can only use session with the MongoClient that" " started it" - ) + raise InvalidOperation("Can only use session with the MongoClient that started it") def close_socket(self, reason): """Close this connection with a reason.""" @@ -963,7 +961,7 @@ def _create_connection(address, options): # Check if dealing with a unix domain socket if host.endswith(".sock"): if not hasattr(socket, "AF_UNIX"): - raise ConnectionFailure("UNIX-sockets are not supported " "on this system") + raise ConnectionFailure("UNIX-sockets are not supported on this system") sock = socket.socket(socket.AF_UNIX) # SOCK_CLOEXEC not supported for Unix sockets. _set_non_inheritable_non_atomic(sock.fileno()) @@ -1045,7 +1043,7 @@ def _configured_socket(address, options): # Raise _CertificateError directly like we do after match_hostname # below. raise - except (IOError, OSError, _SSLError) as exc: + except (IOError, OSError, _SSLError) as exc: # noqa: B014 sock.close() # We raise AutoReconnect for transient and permanent SSL handshake # failures alike. Permanent handshake failures, like protocol @@ -1246,8 +1244,8 @@ def update_is_writable(self, is_writable): """ self.is_writable = is_writable with self.lock: - for socket in self.sockets: - socket.update_is_writable(self.is_writable) + for _socket in self.sockets: + _socket.update_is_writable(self.is_writable) def reset(self, service_id=None): self._reset(close=False, service_id=service_id) @@ -1386,7 +1384,7 @@ def get_socket(self, handler=None): listeners.publish_connection_checked_out(self.address, sock_info.id) try: yield sock_info - except: + except BaseException: # Exception in caller. Ensure the connection gets returned. # Note that when pinned is True, the session owns the # connection and it is responsible for checking the connection @@ -1433,7 +1431,7 @@ def _get_socket(self): self.address, ConnectionCheckOutFailedReason.POOL_CLOSED ) raise _PoolClosedError( - "Attempted to check out a connection from closed connection " "pool" + "Attempted to check out a connection from closed connection pool" ) with self.lock: diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 9e4c5cab40..eae38daef8 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -75,7 +75,7 @@ def _is_ip_address(address): try: _ip_address(address) return True - except (ValueError, UnicodeError): + except (ValueError, UnicodeError): # noqa: B014 return False @@ -145,7 +145,7 @@ def sendall(self, buf, flags=0): # XXX: It's not clear if this can actually happen. PyOpenSSL # doesn't appear to have any interrupt handling, nor any interrupt # errors for OpenSSL connections. - except (IOError, OSError) as exc: + except (IOError, OSError) as exc: # noqa: B014 if _errno_from_exception(exc) == _EINTR: continue raise diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 02a2e88bf0..5ce2fbafcc 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -49,8 +49,7 @@ def _validate_tag_sets(tag_sets): raise TypeError(("Tag sets %r invalid, must be a sequence") % (tag_sets,)) if len(tag_sets) == 0: raise ValueError( - ("Tag sets %r invalid, must be None or contain at least one set of" " tags") - % (tag_sets,) + ("Tag sets %r invalid, must be None or contain at least one set of tags") % (tag_sets,) ) for tags in tag_sets: @@ -500,10 +499,10 @@ def make_read_preference( ) -> _ServerMode: if mode == _PRIMARY: if tag_sets not in (None, [{}]): - raise ConfigurationError("Read preference primary " "cannot be combined with tags") + raise ConfigurationError("Read preference primary cannot be combined with tags") if max_staleness != -1: raise ConfigurationError( - "Read preference primary cannot be " "combined with maxStalenessSeconds" + "Read preference primary cannot be combined with maxStalenessSeconds" ) return Primary() return _ALL_READ_PREFERENCES[mode](tag_sets, max_staleness) # type: ignore diff --git a/pymongo/results.py b/pymongo/results.py index 127f574184..1cbb614bf3 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -13,7 +13,7 @@ # limitations under the License. """Result class definitions.""" -from typing import Any, Dict, List, Mapping, Optional, Sequence, cast +from typing import Any, Dict, List, Optional, cast from pymongo.errors import InvalidOperation diff --git a/pymongo/server.py b/pymongo/server.py index be1e7da89c..f26f473c32 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -21,7 +21,6 @@ from pymongo.helpers import _check_command_response from pymongo.message import _convert_exception, _OpMsg from pymongo.response import PinnedResponse, Response -from pymongo.server_type import SERVER_TYPE _CURSOR_DOC_FIELDS = {"cursor": {"firstBatch": 1, "nextBatch": 1}} diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 6b2a71df0b..47e27c531b 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -15,7 +15,7 @@ """Represent one server the driver is connected to.""" import time -from typing import Any, Dict, Mapping, Optional, Set, Tuple, cast +from typing import Any, Dict, Mapping, Optional, Set, Tuple from bson import EPOCH_NAIVE from bson.objectid import ObjectId diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py index 70c12f0699..420953db2e 100644 --- a/pymongo/socket_checker.py +++ b/pymongo/socket_checker.py @@ -17,7 +17,7 @@ import errno import select import sys -from typing import Any, Optional, Union +from typing import Any, Optional # PYTHON-2320: Jython does not fully support poll on SSL sockets, # https://bugs.jython.org/issue2900 diff --git a/pymongo/ssl_context.py b/pymongo/ssl_context.py index e546105141..148bef936d 100644 --- a/pymongo/ssl_context.py +++ b/pymongo/ssl_context.py @@ -31,10 +31,10 @@ # Base Exception class SSLError = _ssl.SSLError -from ssl import SSLContext +from ssl import SSLContext # noqa: F401,E402 if hasattr(_ssl, "VERIFY_CRL_CHECK_LEAF"): - from ssl import VERIFY_CRL_CHECK_LEAF + from ssl import VERIFY_CRL_CHECK_LEAF # noqa: F401 # Python 3.7 uses OpenSSL's hostname matching implementation # making it the obvious version to start using SSLConext.check_hostname. # Python 3.6 might have been a good version, but it suffers diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 7b5417fefa..06ef7ef185 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -34,7 +34,7 @@ # CPython ssl module constants to configure certificate verification # at a high level. This is legacy behavior, but requires us to # import the ssl module even if we're only using it for this purpose. - import ssl as _stdlibssl + import ssl as _stdlibssl # noqa from ssl import CERT_NONE, CERT_REQUIRED HAS_SNI = _ssl.HAS_SNI @@ -79,7 +79,7 @@ def get_ssl_context( if _ssl.IS_PYOPENSSL: raise ConfigurationError("tlsCRLFile cannot be used with PyOpenSSL") # Match the server's behavior. - setattr(ctx, "verify_flags", getattr(_ssl, "VERIFY_CRL_CHECK_LEAF", 0)) + setattr(ctx, "verify_flags", getattr(_ssl, "VERIFY_CRL_CHECK_LEAF", 0)) # noqa ctx.load_verify_locations(crlfile) if ca_certs is not None: ctx.load_verify_locations(ca_certs) diff --git a/pymongo/typings.py b/pymongo/typings.py index 263b591e24..19d92b2381 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -17,13 +17,11 @@ TYPE_CHECKING, Any, Dict, - List, Mapping, MutableMapping, Optional, Sequence, Tuple, - Type, TypeVar, Union, ) diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 3417c4954e..fa44dd8569 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -15,19 +15,8 @@ """Tools to parse and validate a MongoDB URI.""" import re -import sys import warnings -from typing import ( - Any, - Dict, - List, - Mapping, - MutableMapping, - Optional, - Tuple, - Union, - cast, -) +from typing import Any, Dict, List, Mapping, MutableMapping, Optional, Tuple, Union from urllib.parse import unquote_plus from pymongo.client_options import _parse_ssl_options @@ -107,7 +96,7 @@ def parse_ipv6_literal_host( """ if entity.find("]") == -1: raise ValueError( - "an IPv6 address literal must be " "enclosed in '[' and ']' according " "to RFC 2732." + "an IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732." ) i = entity.find("]:") if i == -1: @@ -196,7 +185,7 @@ def _handle_security_options(options): if tlsinsecure is not None: for opt in _IMPLICIT_TLSINSECURE_OPTS: if opt in options: - err_msg = "URI options %s and %s cannot be specified " "simultaneously." + err_msg = "URI options %s and %s cannot be specified simultaneously." raise InvalidURI( err_msg % (options.cased_key("tlsinsecure"), options.cased_key(opt)) ) @@ -205,7 +194,7 @@ def _handle_security_options(options): tlsallowinvalidcerts = options.get("tlsallowinvalidcertificates") if tlsallowinvalidcerts is not None: if "tlsdisableocspendpointcheck" in options: - err_msg = "URI options %s and %s cannot be specified " "simultaneously." + err_msg = "URI options %s and %s cannot be specified simultaneously." raise InvalidURI( err_msg % ("tlsallowinvalidcertificates", options.cased_key("tlsdisableocspendpointcheck")) @@ -218,7 +207,7 @@ def _handle_security_options(options): if tlscrlfile is not None: for opt in ("tlsinsecure", "tlsallowinvalidcertificates", "tlsdisableocspendpointcheck"): if options.get(opt) is True: - err_msg = "URI option %s=True cannot be specified when " "CRL checking is enabled." + err_msg = "URI option %s=True cannot be specified when CRL checking is enabled." raise InvalidURI(err_msg % (opt,)) if "ssl" in options and "tls" in options: @@ -231,7 +220,7 @@ def truth_value(val): return val if truth_value(options.get("ssl")) != truth_value(options.get("tls")): - err_msg = "Can not specify conflicting values for URI options %s " "and %s." + err_msg = "Can not specify conflicting values for URI options %s and %s." raise InvalidURI(err_msg % (options.cased_key("ssl"), options.cased_key("tls"))) return options @@ -252,7 +241,7 @@ def _handle_option_deprecations(options): if mode == "renamed": newoptname = message if newoptname in options: - warn_msg = "Deprecated option '%s' ignored in favor of " "'%s'." + warn_msg = "Deprecated option '%s' ignored in favor of '%s'." warnings.warn( warn_msg % (options.cased_key(optname), options.cased_key(newoptname)), DeprecationWarning, @@ -378,7 +367,7 @@ def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> List[ nodes = [] for entity in hosts.split(","): if not entity: - raise ConfigurationError("Empty host " "(or extra comma in host list).") + raise ConfigurationError("Empty host (or extra comma in host list).") port = default_port # Unix socket entities don't have ports if entity.endswith(".sock"): @@ -486,7 +475,7 @@ def parse_uri( scheme_free = uri[SRV_SCHEME_LEN:] else: raise InvalidURI( - "Invalid URI scheme: URI must " "begin with '%s' or '%s'" % (SCHEME, SRV_SCHEME) + "Invalid URI scheme: URI must begin with '%s' or '%s'" % (SCHEME, SRV_SCHEME) ) if not scheme_free: @@ -504,7 +493,7 @@ def parse_uri( path_part = "" if not path_part and "?" in host_part: - raise InvalidURI("A '/' is required between " "the host list and any options.") + raise InvalidURI("A '/' is required between the host list and any options.") if path_part: dbase, _, opts = path_part.partition("?") @@ -528,9 +517,7 @@ def parse_uri( hosts = host_part if "/" in hosts: - raise InvalidURI( - "Any '/' in a unix domain socket must be" " percent-encoded: %s" % host_part - ) + raise InvalidURI("Any '/' in a unix domain socket must be percent-encoded: %s" % host_part) hosts = unquote_plus(hosts) fqdn = None @@ -538,11 +525,11 @@ def parse_uri( if is_srv: if options.get("directConnection"): raise ConfigurationError( - "Cannot specify directConnection=true with " "%s URIs" % (SRV_SCHEME,) + "Cannot specify directConnection=true with %s URIs" % (SRV_SCHEME,) ) nodes = split_hosts(hosts, default_port=None) if len(nodes) != 1: - raise InvalidURI("%s URIs must include one, " "and only one, hostname" % (SRV_SCHEME,)) + raise InvalidURI("%s URIs must include one, and only one, hostname" % (SRV_SCHEME,)) fqdn, port = nodes[0] if port is not None: raise InvalidURI("%s URIs must not include a port number" % (SRV_SCHEME,)) @@ -557,7 +544,7 @@ def parse_uri( parsed_dns_options = split_options(dns_options, validate, warn, normalize) if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: raise ConfigurationError( - "Only authSource, replicaSet, and loadBalanced are " "supported from DNS" + "Only authSource, replicaSet, and loadBalanced are supported from DNS" ) for opt, val in parsed_dns_options.items(): if opt not in options: @@ -570,11 +557,11 @@ def parse_uri( options["tls"] = True if validate else "true" elif not is_srv and options.get("srvServiceName") is not None: raise ConfigurationError( - "The srvServiceName option is only allowed " "with 'mongodb+srv://' URIs" + "The srvServiceName option is only allowed with 'mongodb+srv://' URIs" ) elif not is_srv and srv_max_hosts: raise ConfigurationError( - "The srvMaxHosts option is only allowed " "with 'mongodb+srv://' URIs" + "The srvMaxHosts option is only allowed with 'mongodb+srv://' URIs" ) else: nodes = split_hosts(hosts, default_port=default_port) diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index fea912d569..ced71d0488 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -73,7 +73,7 @@ def __init__( if not isinstance(fsync, bool): raise TypeError("fsync must be True or False") if j and fsync: - raise ConfigurationError("Can't set both j " "and fsync at the same time") + raise ConfigurationError("Can't set both j and fsync at the same time") self.__document["fsync"] = fsync if w == 0 and j is True: diff --git a/setup.py b/setup.py index 5dbbdde22b..699ced1f85 100755 --- a/setup.py +++ b/setup.py @@ -10,11 +10,10 @@ # Hack to silence atexit traceback in some Python versions try: - import multiprocessing + import multiprocessing # noqa: F401 except ImportError: pass -from setuptools import __version__ as _setuptools_version from setuptools import setup if sys.version_info[:2] < (3, 10): @@ -41,7 +40,7 @@ try: try: readme_content = f.read() - except: + except BaseException: readme_content = "" finally: f.close() @@ -152,7 +151,7 @@ def run(self): try: os.makedirs(path) - except: + except BaseException: pass sphinx_args = ["-E", "-b", mode, "doc", path] @@ -169,7 +168,7 @@ def run(self): raise RuntimeError("documentation step '%s' failed" % (mode,)) sys.stdout.write( - "\nDocumentation step '%s' performed, results here:\n" " %s/\n" % (mode, path) + "\nDocumentation step '%s' performed, results here:\n %s/\n" % (mode, path) ) @@ -232,7 +231,7 @@ def run(self): self.warning_message % ( "Extension modules", - "There was an issue with " "your platform configuration" " - see above.", + "There was an issue with your platform configuration - see above.", ) ) @@ -246,8 +245,8 @@ def build_extension(self, ext): warnings.warn( self.warning_message % ( - "The %s extension " "module" % (name,), - "The output above " "this warning shows how " "the compilation " "failed.", + "The %s extension module" % (name,), + "The output above this warning shows how the compilation failed.", ) ) diff --git a/test/__init__.py b/test/__init__.py index d75c011547..be0825025a 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -20,7 +20,6 @@ import socket import sys import threading -import time import traceback import unittest import warnings @@ -34,7 +33,7 @@ HAVE_XML = False try: - import ipaddress + import ipaddress # noqa HAVE_IPADDRESS = True except ImportError: @@ -667,7 +666,7 @@ def require_secondary_read_pref(self): """ return self._require( lambda: self.supports_secondary_read_pref, - "This cluster does not support secondary read " "preference", + "This cluster does not support secondary read preference", ) def require_no_replica_set(self, func): @@ -757,7 +756,7 @@ def is_topology_type(self, topologies): return True return False - def require_cluster_type(self, topologies=[]): + def require_cluster_type(self, topologies=[]): # noqa """Run a test only if the client is connected to a cluster that conforms to one of the specified topologies. Acceptable topologies are 'single', 'replicaset', and 'sharded'.""" @@ -825,7 +824,7 @@ def require_server_resolvable(self, func): """Run a test only if the hostname 'server' is resolvable.""" return self._require( lambda: self.server_is_resolvable, - "No hosts entry for 'server'. Cannot validate " "hostname in the certificate", + "No hosts entry for 'server'. Cannot validate hostname in the certificate", func=func, ) @@ -1125,9 +1124,9 @@ def test_cases(suite): # Helper method to workaround https://bugs.python.org/issue21724 def clear_warning_registry(): """Clear the __warningregistry__ for all modules.""" - for name, module in list(sys.modules.items()): + for _, module in list(sys.modules.items()): if hasattr(module, "__warningregistry__"): - setattr(module, "__warningregistry__", {}) + setattr(module, "__warningregistry__", {}) # noqa class SystemCertsPatcher(object): diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index cad2b10683..a1eb97edee 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -25,7 +25,7 @@ from pymongo.ssl_support import HAS_SNI try: - import dns + import dns # noqa HAS_DNS = True except ImportError: @@ -120,7 +120,7 @@ def test_uniqueness(self): duplicates = [names for names in uri_to_names.values() if len(names) > 1] self.assertFalse( duplicates, - "Error: the following env variables have " "duplicate values: %s" % (duplicates,), + "Error: the following env variables have duplicate values: %s" % (duplicates,), ) diff --git a/test/mockupdb/test_cluster_time.py b/test/mockupdb/test_cluster_time.py index e6d8c2126c..cb06a129d2 100644 --- a/test/mockupdb/test_cluster_time.py +++ b/test/mockupdb/test_cluster_time.py @@ -28,7 +28,7 @@ def cluster_time_conversation(self, callback, replies): server = MockupDB() # First test all commands include $clusterTime with wire version 6. - responder = server.autoresponds( + _ = server.autoresponds( "ismaster", { "minWireVersion": 0, diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index c9799fa21e..39188e8ad0 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -33,7 +33,7 @@ def test_hello_with_option(self, protocol, **kwargs): def respond(r): # Only save the very first request from the driver. - if self.handshake_req == None: + if self.handshake_req is None: self.handshake_req = r load_balanced_kwargs = {"serviceId": ObjectId()} if kwargs.get("loadBalanced") else {} return r.reply( @@ -261,7 +261,7 @@ def responder(request): self.addCleanup(client.close) self.assertRaises(OperationFailure, client.db.collection.find_one, {"a": 1}) self.assertTrue( - self.found_auth_msg, "Could not find authentication " "command with correct protocol" + self.found_auth_msg, "Could not find authentication command with correct protocol" ) diff --git a/test/mockupdb/test_list_indexes.py b/test/mockupdb/test_list_indexes.py index 2bdbd7b910..20764e6e5a 100644 --- a/test/mockupdb/test_list_indexes.py +++ b/test/mockupdb/test_list_indexes.py @@ -16,7 +16,7 @@ import unittest -from mockupdb import MockupDB, OpGetMore, going +from mockupdb import MockupDB, going from bson import SON from pymongo import MongoClient diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index ce91794ee4..d5fb9913cc 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -18,7 +18,7 @@ import unittest from queue import Queue -from mockupdb import MockupDB, OpMsg, go +from mockupdb import MockupDB, go from operations import upgrades from pymongo import MongoClient diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py index d2c3bfc1b0..b7f8532e38 100644 --- a/test/mockupdb/test_mongos_command_read_mode.py +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -15,10 +15,9 @@ import itertools import unittest -from mockupdb import MockupDB, OpMsg, go, going +from mockupdb import MockupDB, OpMsg, going from operations import operations -from bson import SON from pymongo import MongoClient, ReadPreference from pymongo.read_preferences import ( _MONGOS_MODES, diff --git a/test/mockupdb/test_network_disconnect_primary.py b/test/mockupdb/test_network_disconnect_primary.py index dcf5256fac..ea13a3b042 100755 --- a/test/mockupdb/test_network_disconnect_primary.py +++ b/test/mockupdb/test_network_disconnect_primary.py @@ -13,7 +13,6 @@ # limitations under the License. import unittest -from queue import Queue from mockupdb import Future, MockupDB, OpReply, going, wait_until diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py index 98cd1f2706..07cd6c7448 100644 --- a/test/mockupdb/test_slave_okay_single.py +++ b/test/mockupdb/test_slave_okay_single.py @@ -48,15 +48,9 @@ def test(self): ismaster_with_version["minWireVersion"] = 2 ismaster_with_version["maxWireVersion"] = 6 self.server.autoresponds("ismaster", **ismaster_with_version) - if operation.op_type == "always-use-secondary": - slave_ok = True - elif operation.op_type == "may-use-secondary": - slave_ok = mode != "primary" or server_type != "mongos" - elif operation.op_type == "must-use-primary": - slave_ok = server_type != "mongos" - else: - assert False, "unrecognized op_type %r" % operation.op_type - + self.assertIn( + operation.op_type, ("always-use-secondary", "may-use-secondary", "must-use-primary") + ) pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) client = MongoClient(self.server.uri, read_preference=pref) diff --git a/test/mod_wsgi_test/mod_wsgi_test.wsgi b/test/mod_wsgi_test/mod_wsgi_test.wsgi index bfd1c4bab0..7c7b24cb70 100644 --- a/test/mod_wsgi_test/mod_wsgi_test.wsgi +++ b/test/mod_wsgi_test/mod_wsgi_test.wsgi @@ -25,7 +25,7 @@ repository_path = os.path.normpath(os.path.join(this_path, '..', '..')) sys.path.insert(0, repository_path) import pymongo -from pymongo.hello import HelloCompat +from pymongo.hello import HelloCompat # noqa from pymongo.mongo_client import MongoClient client = MongoClient() @@ -33,7 +33,7 @@ collection = client.test.test ndocs = 20 collection.drop() collection.insert_many([{'i': i} for i in range(ndocs)]) -client.close() # Discard main thread's request socket. +client.close() # Discard main thread's request socket. client = MongoClient() collection = client.test.test diff --git a/test/ocsp/test_ocsp.py b/test/ocsp/test_ocsp.py index cce846feac..a0770afefa 100644 --- a/test/ocsp/test_ocsp.py +++ b/test/ocsp/test_ocsp.py @@ -40,7 +40,7 @@ def _connect(options): - uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS=%s" "&tlsCAFile=%s&%s") % ( + uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS=%s&tlsCAFile=%s&%s") % ( TIMEOUT_MS, CA_FILE, options, diff --git a/test/test_auth.py b/test/test_auth.py index 5abdbef3dc..69ed27bda0 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -21,11 +21,10 @@ sys.path[0:0] = [""] -from test import IntegrationTest, SkipTest, Version, client_context, unittest +from test import IntegrationTest, SkipTest, client_context, unittest from test.utils import ( AllowListEventListener, delay, - get_pool, ignore_deprecations, rs_or_single_client, rs_or_single_client_noauth, @@ -119,14 +118,14 @@ def test_credentials_hashing(self): def test_gssapi_simple(self): assert GSSAPI_PRINCIPAL is not None if GSSAPI_PASS is not None: - uri = "mongodb://%s:%s@%s:%d/?authMechanism=" "GSSAPI" % ( + uri = "mongodb://%s:%s@%s:%d/?authMechanism=GSSAPI" % ( quote_plus(GSSAPI_PRINCIPAL), GSSAPI_PASS, GSSAPI_HOST, GSSAPI_PORT, ) else: - uri = "mongodb://%s@%s:%d/?authMechanism=" "GSSAPI" % ( + uri = "mongodb://%s@%s:%d/?authMechanism=GSSAPI" % ( quote_plus(GSSAPI_PRINCIPAL), GSSAPI_HOST, GSSAPI_PORT, @@ -266,7 +265,7 @@ class TestSASLPlain(unittest.TestCase): @classmethod def setUpClass(cls): if not SASL_HOST or not SASL_USER or not SASL_PASS: - raise SkipTest("Must set SASL_HOST, " "SASL_USER, and SASL_PASS to test SASL") + raise SkipTest("Must set SASL_HOST, SASL_USER, and SASL_PASS to test SASL") def test_sasl_plain(self): @@ -282,7 +281,7 @@ def test_sasl_plain(self): assert SASL_USER is not None assert SASL_PASS is not None - uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;" "authSource=%s" % ( + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s" % ( quote_plus(SASL_USER), quote_plus(SASL_PASS), SASL_HOST, @@ -305,7 +304,7 @@ def test_sasl_plain(self): ) client.ldap.test.find_one() - uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;" "authSource=%s;replicaSet=%s" % ( + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s;replicaSet=%s" % ( quote_plus(SASL_USER), quote_plus(SASL_PASS), SASL_HOST, @@ -318,7 +317,7 @@ def test_sasl_plain(self): def test_sasl_plain_bad_credentials(self): def auth_string(user, password): - uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;" "authSource=%s" % ( + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s" % ( quote_plus(user), quote_plus(password), SASL_HOST, @@ -484,7 +483,7 @@ def test_scram(self): if client_context.is_rs: host, port = client_context.host, client_context.port - uri = "mongodb://both:pwd@%s:%d/testscram" "?replicaSet=%s" % ( + uri = "mongodb://both:pwd@%s:%d/testscram?replicaSet=%s" % ( host, port, client_context.replica_set_name, @@ -641,7 +640,7 @@ def test_uri_options(self): self.assertTrue(db.command("dbstats")) # Test authSource - uri = "mongodb://user:pass@%s:%d" "/pymongo_test2?authSource=pymongo_test" % (host, port) + uri = "mongodb://user:pass@%s:%d/pymongo_test2?authSource=pymongo_test" % (host, port) client = rs_or_single_client_noauth(uri) self.assertRaises(OperationFailure, client.pymongo_test2.command, "dbstats") self.assertTrue(client.pymongo_test.command("dbstats")) diff --git a/test/test_binary.py b/test/test_binary.py index 6352e93d2c..7d0ef2ce2e 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -19,14 +19,12 @@ import copy import mmap import pickle -import platform import sys import uuid sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import ignore_deprecations import bson from bson import decode, encode diff --git a/test/test_bson.py b/test/test_bson.py index 46aa6e5d9a..9bf8df897a 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -204,35 +204,33 @@ def test_basic_validation(self): self.assertInvalid(b"\x07\x00\x00\x00\x02a\x00\x78\x56\x34\x12") self.assertInvalid(b"\x09\x00\x00\x00\x10a\x00\x05\x00") self.assertInvalid(b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00") - self.assertInvalid(b"\x13\x00\x00\x00\x02foo\x00" b"\x04\x00\x00\x00bar\x00\x00") + self.assertInvalid(b"\x13\x00\x00\x00\x02foo\x00\x04\x00\x00\x00bar\x00\x00") self.assertInvalid( - b"\x18\x00\x00\x00\x03foo\x00\x0f\x00\x00" b"\x00\x10bar\x00\xff\xff\xff\x7f\x00\x00" - ) - self.assertInvalid( - b"\x15\x00\x00\x00\x03foo\x00\x0c" b"\x00\x00\x00\x08bar\x00\x01\x00\x00" + b"\x18\x00\x00\x00\x03foo\x00\x0f\x00\x00\x00\x10bar\x00\xff\xff\xff\x7f\x00\x00" ) + self.assertInvalid(b"\x15\x00\x00\x00\x03foo\x00\x0c\x00\x00\x00\x08bar\x00\x01\x00\x00") self.assertInvalid( b"\x1c\x00\x00\x00\x03foo\x00" b"\x12\x00\x00\x00\x02bar\x00" b"\x05\x00\x00\x00baz\x00\x00\x00" ) - self.assertInvalid(b"\x10\x00\x00\x00\x02a\x00" b"\x04\x00\x00\x00abc\xff\x00") + self.assertInvalid(b"\x10\x00\x00\x00\x02a\x00\x04\x00\x00\x00abc\xff\x00") def test_bad_string_lengths(self): - self.assertInvalid(b"\x0c\x00\x00\x00\x02\x00" b"\x00\x00\x00\x00\x00\x00") - self.assertInvalid(b"\x12\x00\x00\x00\x02\x00" b"\xff\xff\xff\xfffoobar\x00\x00") - self.assertInvalid(b"\x0c\x00\x00\x00\x0e\x00" b"\x00\x00\x00\x00\x00\x00") - self.assertInvalid(b"\x12\x00\x00\x00\x0e\x00" b"\xff\xff\xff\xfffoobar\x00\x00") + self.assertInvalid(b"\x0c\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x12\x00\x00\x00\x02\x00\xff\xff\xff\xfffoobar\x00\x00") + self.assertInvalid(b"\x0c\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x12\x00\x00\x00\x0e\x00\xff\xff\xff\xfffoobar\x00\x00") self.assertInvalid( - b"\x18\x00\x00\x00\x0c\x00" b"\x00\x00\x00\x00\x00RY\xb5j" b"\xfa[\xd8A\xd6X]\x99\x00" + b"\x18\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00RY\xb5j\xfa[\xd8A\xd6X]\x99\x00" ) self.assertInvalid( b"\x1e\x00\x00\x00\x0c\x00" b"\xff\xff\xff\xfffoobar\x00" b"RY\xb5j\xfa[\xd8A\xd6X]\x99\x00" ) - self.assertInvalid(b"\x0c\x00\x00\x00\r\x00" b"\x00\x00\x00\x00\x00\x00") - self.assertInvalid(b"\x0c\x00\x00\x00\r\x00" b"\xff\xff\xff\xff\x00\x00") + self.assertInvalid(b"\x0c\x00\x00\x00\r\x00\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x0c\x00\x00\x00\r\x00\xff\xff\xff\xff\x00\x00") self.assertInvalid( b"\x1c\x00\x00\x00\x0f\x00" b"\x15\x00\x00\x00\x00\x00" @@ -393,9 +391,7 @@ def test_invalid_field_name(self): def test_data_timestamp(self): self.assertEqual( {"test": Timestamp(4, 20)}, - decode( - b"\x13\x00\x00\x00\x11\x74\x65\x73\x74\x00\x14" b"\x00\x00\x00\x04\x00\x00\x00\x00" - ), + decode(b"\x13\x00\x00\x00\x11\x74\x65\x73\x74\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00"), ) def test_basic_encode(self): @@ -414,29 +410,29 @@ def test_basic_encode(self): ) self.assertEqual( encode({"mike": 100}), - b"\x0F\x00\x00\x00\x10\x6D\x69\x6B\x65\x00\x64\x00" b"\x00\x00\x00", + b"\x0F\x00\x00\x00\x10\x6D\x69\x6B\x65\x00\x64\x00\x00\x00\x00", ) self.assertEqual( encode({"hello": 1.5}), - b"\x14\x00\x00\x00\x01\x68\x65\x6C\x6C\x6F\x00\x00" b"\x00\x00\x00\x00\x00\xF8\x3F\x00", + b"\x14\x00\x00\x00\x01\x68\x65\x6C\x6C\x6F\x00\x00\x00\x00\x00\x00\x00\xF8\x3F\x00", ) self.assertEqual( encode({"true": True}), b"\x0C\x00\x00\x00\x08\x74\x72\x75\x65\x00\x01\x00" ) self.assertEqual( - encode({"false": False}), b"\x0D\x00\x00\x00\x08\x66\x61\x6C\x73\x65\x00\x00" b"\x00" + encode({"false": False}), b"\x0D\x00\x00\x00\x08\x66\x61\x6C\x73\x65\x00\x00\x00" ) self.assertEqual( encode({"empty": []}), - b"\x11\x00\x00\x00\x04\x65\x6D\x70\x74\x79\x00\x05" b"\x00\x00\x00\x00\x00", + b"\x11\x00\x00\x00\x04\x65\x6D\x70\x74\x79\x00\x05\x00\x00\x00\x00\x00", ) self.assertEqual( encode({"none": {}}), - b"\x10\x00\x00\x00\x03\x6E\x6F\x6E\x65\x00\x05\x00" b"\x00\x00\x00\x00", + b"\x10\x00\x00\x00\x03\x6E\x6F\x6E\x65\x00\x05\x00\x00\x00\x00\x00", ) self.assertEqual( encode({"test": Binary(b"test", 0)}), - b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" b"\x00\x00\x00\x74\x65\x73\x74\x00", + b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00\x00\x00\x00\x74\x65\x73\x74\x00", ) self.assertEqual( encode({"test": Binary(b"test", 2)}), @@ -445,24 +441,24 @@ def test_basic_encode(self): ) self.assertEqual( encode({"test": Binary(b"test", 128)}), - b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" b"\x00\x00\x80\x74\x65\x73\x74\x00", + b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00\x00\x00\x80\x74\x65\x73\x74\x00", ) self.assertEqual(encode({"test": None}), b"\x0B\x00\x00\x00\x0A\x74\x65\x73\x74\x00\x00") self.assertEqual( encode({"date": datetime.datetime(2007, 1, 8, 0, 30, 11)}), - b"\x13\x00\x00\x00\x09\x64\x61\x74\x65\x00\x38\xBE" b"\x1C\xFF\x0F\x01\x00\x00\x00", + b"\x13\x00\x00\x00\x09\x64\x61\x74\x65\x00\x38\xBE\x1C\xFF\x0F\x01\x00\x00\x00", ) self.assertEqual( encode({"regex": re.compile(b"a*b", re.IGNORECASE)}), - b"\x12\x00\x00\x00\x0B\x72\x65\x67\x65\x78\x00\x61" b"\x2A\x62\x00\x69\x00\x00", + b"\x12\x00\x00\x00\x0B\x72\x65\x67\x65\x78\x00\x61\x2A\x62\x00\x69\x00\x00", ) self.assertEqual( encode({"$where": Code("test")}), - b"\x16\x00\x00\x00\r$where\x00\x05\x00\x00\x00test" b"\x00\x00", + b"\x16\x00\x00\x00\r$where\x00\x05\x00\x00\x00test\x00\x00", ) self.assertEqual( encode({"$field": Code("function(){ return true;}", scope=None)}), - b"+\x00\x00\x00\r$field\x00\x1a\x00\x00\x00" b"function(){ return true;}\x00\x00", + b"+\x00\x00\x00\r$field\x00\x1a\x00\x00\x00function(){ return true;}\x00\x00", ) self.assertEqual( encode({"$field": Code("return function(){ return x; }", scope={"x": False})}), @@ -496,7 +492,7 @@ def test_unknown_type(self): part = "type %r for fieldname 'foo'" % (b"\x14",) docs = [ b"\x0e\x00\x00\x00\x14foo\x00\x01\x00\x00\x00\x00", - (b"\x16\x00\x00\x00\x04foo\x00\x0c\x00\x00\x00\x140" b"\x00\x01\x00\x00\x00\x00\x00"), + (b"\x16\x00\x00\x00\x04foo\x00\x0c\x00\x00\x00\x140\x00\x01\x00\x00\x00\x00\x00"), ( b" \x00\x00\x00\x04bar\x00\x16\x00\x00\x00\x030\x00\x0e\x00\x00" b"\x00\x14foo\x00\x01\x00\x00\x00\x00\x00\x00" @@ -518,7 +514,7 @@ def test_dbpointer(self): # not support creation of the DBPointer type, but will decode # DBPointer to DBRef. - bs = b"\x18\x00\x00\x00\x0c\x00\x01\x00\x00" b"\x00\x00RY\xb5j\xfa[\xd8A\xd6X]\x99\x00" + bs = b"\x18\x00\x00\x00\x0c\x00\x01\x00\x00\x00\x00RY\xb5j\xfa[\xd8A\xd6X]\x99\x00" self.assertEqual({"": DBRef("", ObjectId("5259b56afa5bd841d6585d99"))}, decode(bs)) @@ -785,9 +781,7 @@ def test_bson_regex(self): self.assertEqual(0, bson_re1.flags) doc1 = {"r": bson_re1} - doc1_bson = ( - b"\x11\x00\x00\x00" b"\x0br\x00[\\w-\\.]\x00\x00" b"\x00" # document length # r: regex - ) # document terminator + doc1_bson = b"\x11\x00\x00\x00\x0br\x00[\\w-\\.]\x00\x00\x00" # document length # r: regex # document terminator self.assertEqual(doc1_bson, encode(doc1)) self.assertEqual(doc1, decode(doc1_bson)) @@ -798,9 +792,7 @@ def test_bson_regex(self): doc2_with_re = {"r": re2} doc2_with_bson_re = {"r": bson_re2} - doc2_bson = ( - b"\x11\x00\x00\x00" b"\x0br\x00.*\x00imsux\x00" b"\x00" # document length # r: regex - ) # document terminator + doc2_bson = b"\x11\x00\x00\x00\x0br\x00.*\x00imsux\x00\x00" # document length # r: regex # document terminator self.assertEqual(doc2_bson, encode(doc2_with_re)) self.assertEqual(doc2_bson, encode(doc2_with_bson_re)) @@ -917,7 +909,7 @@ def test_timestamp_comparison(self): def test_timestamp_highorder_bits(self): doc = {"a": Timestamp(0xFFFFFFFF, 0xFFFFFFFF)} - doc_bson = b"\x10\x00\x00\x00" b"\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff" b"\x00" + doc_bson = b"\x10\x00\x00\x00\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff\x00" self.assertEqual(doc_bson, encode(doc)) self.assertEqual(doc, decode(doc_bson)) diff --git a/test/test_bson_corpus.py b/test/test_bson_corpus.py index 4a46276573..4f8fc7413a 100644 --- a/test/test_bson_corpus.py +++ b/test/test_bson_corpus.py @@ -208,13 +208,13 @@ def run_test(self): # Null bytes are validated when encoding to BSON. if "Null" in description: to_bson(doc) - raise AssertionError("exception not raised for test " "case: " + description) + raise AssertionError("exception not raised for test case: " + description) except (ValueError, KeyError, TypeError, InvalidId, InvalidDocument): pass elif bson_type == "0x05": try: decode_extjson(parse_error_case["string"]) - raise AssertionError("exception not raised for test " "case: " + description) + raise AssertionError("exception not raised for test case: " + description) except (TypeError, ValueError): pass else: diff --git a/test/test_client.py b/test/test_client.py index 0487161b1e..9f01c1c054 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -26,7 +26,6 @@ import sys import threading import time -import warnings from typing import Type, no_type_check sys.path[0:0] = [""] @@ -88,7 +87,6 @@ ServerSelectionTimeoutError, WriteConcernError, ) -from pymongo.hello import HelloCompat from pymongo.mongo_client import MongoClient from pymongo.monitoring import ServerHeartbeatListener, ServerHeartbeatStartedEvent from pymongo.pool import _METADATA, PoolOptions, SocketInfo @@ -99,10 +97,7 @@ from pymongo.settings import TOPOLOGY_TYPE from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.topology import _ErrorContext -from pymongo.topology_description import ( - TopologyDescription, - _updated_topology_description_srv_polling, -) +from pymongo.topology_description import TopologyDescription from pymongo.write_concern import WriteConcern @@ -279,7 +274,7 @@ def test_primary_read_pref_with_tags(self): MongoClient("mongodb://host/?readpreferencetags=dc:east") with self.assertRaises(ConfigurationError): - MongoClient("mongodb://host/?" "readpreference=primary&readpreferencetags=dc:east") + MongoClient("mongodb://host/?readpreference=primary&readpreferencetags=dc:east") def test_read_preference(self): c = rs_or_single_client( @@ -394,7 +389,7 @@ def test_uri_codec_options(self): def test_uri_option_precedence(self): # Ensure kwarg options override connection string options. - uri = "mongodb://localhost/?ssl=true&replicaSet=name" "&readPreference=primary" + uri = "mongodb://localhost/?ssl=true&replicaSet=name&readPreference=primary" c = MongoClient(uri, ssl=False, replicaSet="newname", readPreference="secondaryPreferred") clopts = c._MongoClient__options opts = clopts._options @@ -590,7 +585,7 @@ def test_max_idle_time_checkout(self): with server._pool.get_socket() as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) - time.sleep(1) # Sleep so that the socket becomes stale. + time.sleep(1) # Sleep so that the socket becomes stale. with server._pool.get_socket() as new_sock_info: self.assertNotEqual(sock_info, new_sock_info) @@ -712,7 +707,7 @@ def test_host_w_port(self): def test_repr(self): # Used to test 'eval' below. - import bson + import bson # noqa: F401 client = MongoClient( "mongodb://localhost:27017,localhost:27018/?replicaSet=replset" @@ -723,9 +718,7 @@ def test_repr(self): the_repr = repr(client) self.assertIn("MongoClient(host=", the_repr) - self.assertIn( - "document_class=bson.son.SON, " "tz_aware=False, " "connect=False, ", the_repr - ) + self.assertIn("document_class=bson.son.SON, tz_aware=False, connect=False, ", the_repr) self.assertIn("connecttimeoutms=12345", the_repr) self.assertIn("replicaset='replset'", the_repr) self.assertIn("w=1", the_repr) @@ -744,7 +737,7 @@ def test_repr(self): ) the_repr = repr(client) self.assertIn("MongoClient(host=", the_repr) - self.assertIn("document_class=dict, " "tz_aware=False, " "connect=False, ", the_repr) + self.assertIn("document_class=dict, tz_aware=False, connect=False, ", the_repr) self.assertIn("connecttimeoutms=12345", the_repr) self.assertIn("replicaset='replset'", the_repr) self.assertIn("sockettimeoutms=None", the_repr) @@ -1651,7 +1644,7 @@ def test_service_name_from_kwargs(self): ) self.assertEqual(client._topology_settings.srv_service_name, "customname") client = MongoClient( - "mongodb+srv://user:password@test22.test.build.10gen.cc" "/?srvServiceName=customname", + "mongodb+srv://user:password@test22.test.build.10gen.cc/?srvServiceName=customname", connect=False, ) self.assertEqual(client._topology_settings.srv_service_name, "customname") @@ -1864,7 +1857,7 @@ def test_discover_primary(self): # Fail over. c.kill_host("a:1") c.mock_primary = "b:2" - wait_until(lambda: c.address == ("b", 2), "wait for server " "address to be " "updated") + wait_until(lambda: c.address == ("b", 2), "wait for server address to be updated") # a:1 not longer in nodes. self.assertLess(len(c.nodes), 3) diff --git a/test/test_client_context.py b/test/test_client_context.py index b3eb711087..9ee5b96d61 100644 --- a/test/test_client_context.py +++ b/test/test_client_context.py @@ -49,7 +49,7 @@ def test_enableTestCommands_is_disabled(self): self.assertFalse( client_context.test_commands_enabled, - "enableTestCommands must be disabled when " "PYMONGO_DISABLE_TEST_COMMANDS is set.", + "enableTestCommands must be disabled when PYMONGO_DISABLE_TEST_COMMANDS is set.", ) def test_setdefaultencoding_worked(self): diff --git a/test/test_cmap.py b/test/test_cmap.py index b79f36b803..a2a1d8d214 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -16,7 +16,6 @@ import os import sys -import threading import time sys.path[0:0] = [""] @@ -25,7 +24,6 @@ from test.pymongo_mocks import DummyMonitor from test.utils import ( CMAPListener, - OvertCommandListener, TestCreator, camel_to_snake, client_context, diff --git a/test/test_collection.py b/test/test_collection.py index f81c2c2645..d9f51f530d 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -246,8 +246,7 @@ def test_create_indexes_commitQuorum_requires_44(self): db = self.db with self.assertRaisesRegex( ConfigurationError, - "Must be connected to MongoDB 4\.4\+ to use the commitQuorum " - "option for createIndexes", + r"Must be connected to MongoDB 4\.4\+ to use the commitQuorum option for createIndexes", ): db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") @@ -1511,7 +1510,7 @@ def test_aggregation_cursor(self): # batchSize - 1 self.assertEqual(4, len(cursor._CommandCursor__data)) # type: ignore # Exhaust the cursor. There shouldn't be any errors. - for doc in cursor: + for _doc in cursor: pass def test_aggregation_cursor_alive(self): @@ -1898,7 +1897,8 @@ def test_array_filters_validation(self): with self.assertRaises(TypeError): c.update_many({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] with self.assertRaises(TypeError): - c.find_one_and_update({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] + update = {"$set": {"a": 1}} + c.find_one_and_update({}, update, array_filters={}) # type: ignore[arg-type] def test_array_filters_unacknowledged(self): c_w0 = self.db.test.with_options(write_concern=WriteConcern(w=0)) diff --git a/test/test_command_monitoring_legacy.py b/test/test_command_monitoring_legacy.py index ed3d516f97..5d9f2fe3ee 100644 --- a/test/test_command_monitoring_legacy.py +++ b/test/test_command_monitoring_legacy.py @@ -21,12 +21,7 @@ sys.path[0:0] = [""] from test import client_context, unittest -from test.utils import ( - EventListener, - parse_read_preference, - rs_or_single_client, - wait_until, -) +from test.utils import EventListener, parse_read_preference, rs_or_single_client import pymongo from bson import json_util diff --git a/test/test_comment.py b/test/test_comment.py index 1c0e741621..c83428fd70 100644 --- a/test/test_comment.py +++ b/test/test_comment.py @@ -16,23 +16,16 @@ import inspect import sys -from collections import defaultdict from typing import Any, Union sys.path[0:0] = [""] -from test import IntegrationTest, SkipTest, client_context, unittest +from test import IntegrationTest, client_context, unittest from test.utils import EventListener, rs_or_single_client from bson.dbref import DBRef -from pymongo.collection import Collection from pymongo.command_cursor import CommandCursor -from pymongo.database import Database -from pymongo.mongo_client import MongoClient from pymongo.operations import IndexModel -from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import ReadPreference -from pymongo.write_concern import WriteConcern class Empty(object): @@ -47,7 +40,9 @@ def empty(self, *args, **kwargs): class TestComment(IntegrationTest): - def _test_ops(self, helpers, already_supported, listener, db=Empty(), coll=Empty()): + def _test_ops( + self, helpers, already_supported, listener, db=Empty(), coll=Empty() # noqa: B008 + ): results = listener.results for h, args in helpers: c = "testing comment with " + h.__name__ @@ -56,7 +51,7 @@ def _test_ops(self, helpers, already_supported, listener, db=Empty(), coll=Empty results.clear() kwargs = {"comment": cc} if h == coll.rename: - tmp = db.get_collection("temp_temp_temp").drop() + _ = db.get_collection("temp_temp_temp").drop() destruct_coll = db.get_collection("test_temp") destruct_coll.insert_one({}) maybe_cursor = destruct_coll.rename(*args, **kwargs) diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py index c23ce28061..ca4b84c26d 100644 --- a/test/test_crud_v1.py +++ b/test/test_crud_v1.py @@ -19,7 +19,7 @@ sys.path[0:0] = [""] -from test import IntegrationTest, client_context, unittest +from test import IntegrationTest, unittest from test.utils import ( TestCreator, camel_to_snake, diff --git a/test/test_cursor.py b/test/test_cursor.py index 7a80b003df..5b4efcd391 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -39,12 +39,7 @@ from pymongo import ASCENDING, DESCENDING from pymongo.collation import Collation from pymongo.cursor import Cursor, CursorType -from pymongo.errors import ( - ConfigurationError, - ExecutionTimeout, - InvalidOperation, - OperationFailure, -) +from pymongo.errors import ExecutionTimeout, InvalidOperation, OperationFailure from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -355,7 +350,7 @@ def test_hint(self): ) spec = [("num", DESCENDING)] - index = db.test.create_index(spec) + _ = db.test.create_index(spec) first = next(db.test.find()) self.assertEqual(0, first.get("num")) @@ -763,7 +758,7 @@ def test_where(self): self.assertEqual([8, 9], [a["x"] for a in cursor]) a = db.test.find() - b = a.where("this.x > 3") + _ = a.where("this.x > 3") for _ in a: break self.assertRaises(InvalidOperation, a.where, "this.x < 3") diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 4659a62e62..a7073cde93 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -545,9 +545,7 @@ def transform_bson(self, value): ) def test_initialize_fail(self): - err_msg = ( - "Expected an instance of TypeEncoder, TypeDecoder, " "or TypeCodec, got .* instead" - ) + err_msg = "Expected an instance of TypeEncoder, TypeDecoder, or TypeCodec, got .* instead" with self.assertRaisesRegex(TypeError, err_msg): TypeRegistry(self.codecs) # type: ignore[arg-type] diff --git a/test/test_data_lake.py b/test/test_data_lake.py index 863b3a4f59..fbf79994d3 100644 --- a/test/test_data_lake.py +++ b/test/test_data_lake.py @@ -28,8 +28,6 @@ rs_or_single_client, ) -from pymongo.auth import MECHANISMS - # Location of JSON test specifications. _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data_lake") @@ -42,7 +40,7 @@ def test_connected_to_data_lake(self): self.assertTrue( client_context.is_data_lake, - "client context.is_data_lake must be True when " "DATA_LAKE is set", + "client context.is_data_lake must be True when DATA_LAKE is set", ) diff --git a/test/test_database.py b/test/test_database.py index 9a08d971db..8844046ad1 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -14,22 +14,18 @@ """Test the database module.""" -import datetime import re import sys from typing import Any, List, Mapping sys.path[0:0] = [""] -from test import IntegrationTest, SkipTest, client_context, unittest +from test import IntegrationTest, client_context, unittest from test.test_custom_types import DECIMAL_CODECOPTS from test.utils import ( IMPOSSIBLE_WRITE_CONCERN, - DeprecationFilter, OvertCommandListener, - ignore_deprecations, rs_or_single_client, - server_started_with_auth, wait_until, ) @@ -44,7 +40,6 @@ from pymongo.database import Database from pymongo.errors import ( CollectionInvalid, - ConfigurationError, ExecutionTimeout, InvalidName, OperationFailure, diff --git a/test/test_dbref.py b/test/test_dbref.py index 8e98bd8ce5..281aef473f 100644 --- a/test/test_dbref.py +++ b/test/test_dbref.py @@ -69,7 +69,7 @@ def test_repr(self): self.assertEqual(repr(DBRef("coll", 5, foo="bar")), "DBRef('coll', 5, foo='bar')") self.assertEqual( repr(DBRef("coll", ObjectId("1234567890abcdef12345678"), "foo")), - "DBRef('coll', ObjectId('1234567890abcdef12345678'), " "'foo')", + "DBRef('coll', ObjectId('1234567890abcdef12345678'), 'foo')", ) def test_equality(self): diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 51b168b0a0..d17a0d4166 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -284,7 +284,7 @@ def mock_command(*args, **kwargs): def insert_command(i): try: client.test.command("insert", "test", documents=[{"i": i}]) - except AutoReconnect as exc: + except AutoReconnect: pass threads = [] diff --git a/test/test_dns.py b/test/test_dns.py index d47e115f41..352d05376a 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -77,7 +77,7 @@ def run_test(self): options["tls"] = options.pop("ssl") parsed_options = test_case.get("parsed_options") # See DRIVERS-1324, unless tls is explicitly set to False we need TLS. - needs_tls = not (options and (options.get("ssl") == False or options.get("tls") == False)) + needs_tls = not (options and (options.get("ssl") is False or options.get("tls") is False)) if needs_tls and not client_context.tls: self.skipTest("this test requires a TLS cluster") if not needs_tls and client_context.tls: diff --git a/test/test_encryption.py b/test/test_encryption.py index 31c3dd2bcd..f63127a7be 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -718,7 +718,7 @@ class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): MASTER_KEYS = { "aws": { "region": "us-east-1", - "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-" "4bd9-9f25-e30687b580d0", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", }, "azure": { "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", @@ -1259,7 +1259,7 @@ def test_01_aws_region_key(self): { "region": "us-east-1", "key": ( - "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" ), }, ) @@ -1271,7 +1271,7 @@ def test_02_aws_region_key_endpoint(self): { "region": "us-east-1", "key": ( - "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" ), "endpoint": "kms.us-east-1.amazonaws.com", }, @@ -1284,7 +1284,7 @@ def test_03_aws_region_key_endpoint_port(self): { "region": "us-east-1", "key": ( - "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" ), "endpoint": "kms.us-east-1.amazonaws.com:443", }, @@ -1294,9 +1294,7 @@ def test_03_aws_region_key_endpoint_port(self): def test_04_aws_endpoint_invalid_port(self): master_key = { "region": "us-east-1", - "key": ( - "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" - ), + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), "endpoint": "kms.us-east-1.amazonaws.com:12345", } with self.assertRaises(EncryptionError) as ctx: @@ -1307,9 +1305,7 @@ def test_04_aws_endpoint_invalid_port(self): def test_05_aws_endpoint_wrong_region(self): master_key = { "region": "us-east-1", - "key": ( - "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" - ), + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), "endpoint": "kms.us-east-2.amazonaws.com", } # The full error should be something like: @@ -1323,9 +1319,7 @@ def test_05_aws_endpoint_wrong_region(self): def test_06_aws_endpoint_invalid_host(self): master_key = { "region": "us-east-1", - "key": ( - "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" - ), + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), "endpoint": "doesnotexist.invalid", } with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): @@ -1583,9 +1577,9 @@ def _run_test(self, max_pool_size, auto_encryption_opts): event_listeners=[self.client_listener, self.topology_listener], ) - if auto_encryption_opts._bypass_auto_encryption == True: + if auto_encryption_opts._bypass_auto_encryption is True: self.client_test.db.coll.insert_one({"_id": 0, "encrypted": self.ciphertext}) - elif auto_encryption_opts._bypass_auto_encryption == False: + elif auto_encryption_opts._bypass_auto_encryption is False: client_encrypted.db.coll.insert_one({"_id": 0, "encrypted": "string0"}) else: raise RuntimeError("bypass_auto_encryption must be a bool") @@ -1825,7 +1819,7 @@ def setUp(self): def test_invalid_kms_certificate_expired(self): key = { "region": "us-east-1", - "key": "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", "endpoint": "mongodb://127.0.0.1:8000", } # Some examples: @@ -1837,7 +1831,7 @@ def test_invalid_kms_certificate_expired(self): def test_invalid_hostname_in_kms_certificate(self): key = { "region": "us-east-1", - "key": "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", "endpoint": "mongodb://127.0.0.1:8001", } # Some examples: diff --git a/test/test_errors.py b/test/test_errors.py index 8a225b6548..747da48472 100644 --- a/test/test_errors.py +++ b/test/test_errors.py @@ -49,11 +49,11 @@ def _test_unicode_strs(self, exc): if sys.implementation.name == "pypy" and sys.implementation.version < (7, 3, 7): # PyPy used to display unicode in repr differently. self.assertEqual( - "unicode \U0001f40d, full error: {" "'errmsg': 'unicode \\U0001f40d'}", str(exc) + "unicode \U0001f40d, full error: {'errmsg': 'unicode \\U0001f40d'}", str(exc) ) else: self.assertEqual( - "unicode \U0001f40d, full error: {" "'errmsg': 'unicode \U0001f40d'}", str(exc) + "unicode \U0001f40d, full error: {'errmsg': 'unicode \U0001f40d'}", str(exc) ) try: raise exc diff --git a/test/test_examples.py b/test/test_examples.py index 7354ac5be2..ccb48307e4 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -945,9 +945,7 @@ def update_employee_info(session): except (ConnectionFailure, OperationFailure) as exc: # Can retry commit if exc.has_error_label("UnknownTransactionCommitResult"): - print( - "UnknownTransactionCommitResult, retrying " "commit operation ..." - ) + print("UnknownTransactionCommitResult, retrying commit operation ...") continue else: print("Error during commit ...") @@ -970,11 +968,11 @@ def run_transaction_with_retry(txn_func, session): txn_func(session) # performs transaction break except (ConnectionFailure, OperationFailure) as exc: - print("Transaction aborted. Caught exception during " "transaction.") + print("Transaction aborted. Caught exception during transaction.") # If transient error, retry the whole transaction if exc.has_error_label("TransientTransactionError"): - print("TransientTransactionError, retrying" "transaction ...") + print("TransientTransactionError, retrying transaction ...") continue else: raise @@ -1000,7 +998,7 @@ def commit_with_retry(session): except (ConnectionFailure, OperationFailure) as exc: # Can retry commit if exc.has_error_label("UnknownTransactionCommitResult"): - print("UnknownTransactionCommitResult, retrying " "commit operation ...") + print("UnknownTransactionCommitResult, retrying commit operation ...") continue else: print("Error during commit ...") @@ -1036,7 +1034,7 @@ def run_transaction_with_retry(txn_func, session): except (ConnectionFailure, OperationFailure) as exc: # If transient error, retry the whole transaction if exc.has_error_label("TransientTransactionError"): - print("TransientTransactionError, retrying " "transaction ...") + print("TransientTransactionError, retrying transaction ...") continue else: raise @@ -1051,7 +1049,7 @@ def commit_with_retry(session): except (ConnectionFailure, OperationFailure) as exc: # Can retry commit if exc.has_error_label("UnknownTransactionCommitResult"): - print("UnknownTransactionCommitResult, retrying " "commit operation ...") + print("UnknownTransactionCommitResult, retrying commit operation ...") continue else: print("Error during commit ...") @@ -1282,7 +1280,7 @@ def strptime(s): with self.assertRaisesRegex( OperationFailure, - "Provided apiStrict:true, but the command " "count is not in API Version 1", + "Provided apiStrict:true, but the command count is not in API Version 1", ): client.db.command("count", "sales", query={}) # Start Versioned API Example 6 diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 27d82e242b..b9fdeacef7 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -580,7 +580,7 @@ def test_iterator(self): self.assertEqual([b"hello world"], list(g)) def test_read_unaligned_buffer_size(self): - in_data = b"This is a text that doesn't " b"quite fit in a single 16-byte chunk." + in_data = b"This is a text that doesn't quite fit in a single 16-byte chunk." f = GridIn(self.db.fs, chunkSize=16) f.write(in_data) f.close() diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 8b0a9a3936..d9bf0cf058 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -399,7 +399,7 @@ def test_download_to_stream(self): def test_download_to_stream_by_name(self): file1 = BytesIO(b"hello world") # Test with one chunk. - oid = self.fs.upload_from_stream("one_chunk", file1) + _ = self.fs.upload_from_stream("one_chunk", file1) self.assertEqual(1, self.db.fs.chunks.count_documents({})) file2 = BytesIO() self.fs.download_to_stream_by_name("one_chunk", file2) diff --git a/test/test_heartbeat_monitoring.py b/test/test_heartbeat_monitoring.py index cd4a875e9e..a14ab9a3a7 100644 --- a/test/test_heartbeat_monitoring.py +++ b/test/test_heartbeat_monitoring.py @@ -15,7 +15,6 @@ """Test the monitoring of the server heartbeats.""" import sys -import threading sys.path[0:0] = [""] diff --git a/test/test_json_util.py b/test/test_json_util.py index 203542e822..ee5b7abb49 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -331,7 +331,7 @@ def test_uuid(self): json_util.dumps(doc, json_options=LEGACY_JSON_OPTIONS), ) self.assertEqual( - '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', + '{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', json_util.dumps( doc, json_options=STRICT_JSON_OPTIONS.with_options( @@ -340,7 +340,7 @@ def test_uuid(self): ), ) self.assertEqual( - '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', + '{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', json_util.dumps( doc, json_options=JSONOptions( @@ -351,7 +351,7 @@ def test_uuid(self): self.assertEqual( doc, json_util.loads( - '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', + '{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', json_options=uuid_legacy_opts, ), ) @@ -364,7 +364,7 @@ def test_uuid(self): self.assertEqual( doc, json_util.loads( - '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', + '{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', json_options=options, ), ) @@ -420,32 +420,32 @@ def test_binary(self): json_bin_dump = json_util.dumps(md5_type_dict, json_options=LEGACY_JSON_OPTIONS) # Check order. self.assertEqual( - '{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==",' + ' "$type": "05"}}', json_bin_dump + '{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==", "$type": "05"}}', json_bin_dump ) self.assertEqual( md5_type_dict, - json_util.loads('{"md5": {"$type": 5, "$binary":' ' "IG43GK8JL9HRL4DK53HMrA=="}}'), + json_util.loads('{"md5": {"$type": 5, "$binary": "IG43GK8JL9HRL4DK53HMrA=="}}'), ) json_bin_dump = json_util.dumps(custom_type_dict, json_options=LEGACY_JSON_OPTIONS) self.assertIn('"$type": "80"', json_bin_dump) self.assertEqual( custom_type_dict, - json_util.loads('{"custom": {"$type": 128, "$binary":' ' "aGVsbG8="}}'), + json_util.loads('{"custom": {"$type": 128, "$binary": "aGVsbG8="}}'), ) # Handle mongoexport where subtype >= 128 self.assertEqual( 128, - json_util.loads('{"custom": {"$type": "ffffff80", "$binary":' ' "aGVsbG8="}}')[ + json_util.loads('{"custom": {"$type": "ffffff80", "$binary": "aGVsbG8="}}')[ "custom" ].subtype, ) self.assertEqual( 255, - json_util.loads('{"custom": {"$type": "ffffffff", "$binary":' ' "aGVsbG8="}}')[ + json_util.loads('{"custom": {"$type": "ffffffff", "$binary": "aGVsbG8="}}')[ "custom" ].subtype, ) diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index 547cf327d3..378ae33e03 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -158,7 +158,7 @@ def lock_pool(self): # Wait for the unlock flag. unlock_pool = self.unlock.wait(10) if not unlock_pool: - raise Exception("timed out waiting for unlock signal:" " deadlock?") + raise Exception("timed out waiting for unlock signal: deadlock?") if __name__ == "__main__": diff --git a/test/test_max_staleness.py b/test/test_max_staleness.py index 4c17701133..799083f3b4 100644 --- a/test/test_max_staleness.py +++ b/test/test_max_staleness.py @@ -52,21 +52,21 @@ def test_max_staleness(self): with self.assertRaises(ConfigurationError): # Read pref "primary" can't be used with max staleness. - MongoClient("mongodb://a/?readPreference=primary&" "maxStalenessSeconds=120") + MongoClient("mongodb://a/?readPreference=primary&maxStalenessSeconds=120") client = MongoClient("mongodb://host/?maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient("mongodb://host/?readPreference=primary&" "maxStalenessSeconds=-1") + client = MongoClient("mongodb://host/?readPreference=primary&maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient("mongodb://host/?readPreference=secondary&" "maxStalenessSeconds=120") + client = MongoClient("mongodb://host/?readPreference=secondary&maxStalenessSeconds=120") self.assertEqual(120, client.read_preference.max_staleness) - client = MongoClient("mongodb://a/?readPreference=secondary&" "maxStalenessSeconds=1") + client = MongoClient("mongodb://a/?readPreference=secondary&maxStalenessSeconds=1") self.assertEqual(1, client.read_preference.max_staleness) - client = MongoClient("mongodb://a/?readPreference=secondary&" "maxStalenessSeconds=-1") + client = MongoClient("mongodb://a/?readPreference=secondary&maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) client = MongoClient(maxStalenessSeconds=-1, readPreference="nearest") @@ -84,9 +84,7 @@ def test_max_staleness_float(self): with warnings.catch_warnings(record=True) as ctx: warnings.simplefilter("always") - client = MongoClient( - "mongodb://host/?maxStalenessSeconds=1.5" "&readPreference=nearest" - ) + client = MongoClient("mongodb://host/?maxStalenessSeconds=1.5&readPreference=nearest") # Option was ignored. self.assertEqual(-1, client.read_preference.max_staleness) @@ -101,7 +99,7 @@ def test_max_staleness_zero(self): with warnings.catch_warnings(record=True) as ctx: warnings.simplefilter("always") - client = MongoClient("mongodb://host/?maxStalenessSeconds=0" "&readPreference=nearest") + client = MongoClient("mongodb://host/?maxStalenessSeconds=0&readPreference=nearest") # Option was ignored. self.assertEqual(-1, client.read_preference.max_staleness) diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 1adb2983e4..0b8200c019 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -16,19 +16,12 @@ import datetime import sys import time -import warnings from typing import Any sys.path[0:0] = [""] from test import IntegrationTest, client_context, client_knobs, sanitize_cmd, unittest -from test.utils import ( - EventListener, - get_pool, - rs_or_single_client, - single_client, - wait_until, -) +from test.utils import EventListener, rs_or_single_client, single_client, wait_until from bson.int64 import Int64 from bson.objectid import ObjectId @@ -781,7 +774,7 @@ def test_non_bulk_writes(self): # delete_one self.listener.results.clear() - res2 = coll.delete_one({"x": 3}) + _ = coll.delete_one({"x": 3}) results = self.listener.results started = results["started"][0] succeeded = results["succeeded"][0] @@ -1242,19 +1235,19 @@ def test_server_event_repr(self): event = monitoring.ServerOpeningEvent(server_address, topology_id) self.assertEqual( repr(event), - "", + "", ) event = monitoring.ServerDescriptionChangedEvent( "PREV", "NEW", server_address, topology_id # type: ignore[arg-type] ) self.assertEqual( repr(event), - "", + "", ) event = monitoring.ServerClosedEvent(server_address, topology_id) self.assertEqual( repr(event), - "", + "", ) def test_topology_event_repr(self): diff --git a/test/test_mypy.py b/test/test_mypy.py index 5b9746f723..36fe2ed424 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -16,7 +16,6 @@ sample client code that uses PyMongo typings.""" import os -import sys import unittest from typing import Any, Dict, Iterable, List diff --git a/test/test_pooling.py b/test/test_pooling.py index 07dbc3643d..00b947f10a 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -320,7 +320,7 @@ def test_wait_queue_timeout(self): pool = self.create_pool(max_pool_size=1, wait_queue_timeout=wait_queue_timeout) self.addCleanup(pool.close) - with pool.get_socket() as sock_info: + with pool.get_socket(): start = time.time() with self.assertRaises(ConnectionFailure): with pool.get_socket(): diff --git a/test/test_saslprep.py b/test/test_saslprep.py index 1dd4727181..c07870dad6 100644 --- a/test/test_saslprep.py +++ b/test/test_saslprep.py @@ -24,7 +24,7 @@ class TestSASLprep(unittest.TestCase): def test_saslprep(self): try: - import stringprep + import stringprep # noqa except ImportError: self.assertRaises(TypeError, saslprep, "anything...") # Bytes strings are ignored. diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index fee751fbdc..d7b3744399 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -98,13 +98,13 @@ def compare_events(expected_dict, actual): ) if not compare_server_descriptions(expected["newDescription"], actual.new_description): - return (False, "New ServerDescription incorrect in" " ServerDescriptionChangedEvent") + return (False, "New ServerDescription incorrect in ServerDescriptionChangedEvent") if not compare_server_descriptions( expected["previousDescription"], actual.previous_description ): return ( False, - "Previous ServerDescription incorrect in" " ServerDescriptionChangedEvent", + "Previous ServerDescription incorrect in ServerDescriptionChangedEvent", ) elif expected_type == "server_closed_event": @@ -125,19 +125,19 @@ def compare_events(expected_dict, actual): if not isinstance(actual, monitoring.TopologyDescriptionChangedEvent): return ( False, - "Expected TopologyDescriptionChangedEvent," " got %s" % (actual.__class__), + "Expected TopologyDescriptionChangedEvent, got %s" % (actual.__class__), ) if not compare_topology_descriptions(expected["newDescription"], actual.new_description): return ( False, - "New TopologyDescription incorrect in " "TopologyDescriptionChangedEvent", + "New TopologyDescription incorrect in TopologyDescriptionChangedEvent", ) if not compare_topology_descriptions( expected["previousDescription"], actual.previous_description ): return ( False, - "Previous TopologyDescription incorrect in" " TopologyDescriptionChangedEvent", + "Previous TopologyDescription incorrect in TopologyDescriptionChangedEvent", ) elif expected_type == "topology_closed_event": diff --git a/test/test_server_description.py b/test/test_server_description.py index 1562711375..bb49141d2f 100644 --- a/test/test_server_description.py +++ b/test/test_server_description.py @@ -170,7 +170,7 @@ def test_all_hosts(self): def test_repr(self): s = parse_hello_response({"ok": 1, "msg": "isdbgrid"}) self.assertEqual( - repr(s), "" + repr(s), "" ) def test_topology_version(self): diff --git a/test/test_session.py b/test/test_session.py index 5a242d6c69..b7aa65a19d 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -145,7 +145,7 @@ def _test_ops(self, client, *ops): kw = copy.copy(kw) kw["session"] = s with self.assertRaisesRegex( - InvalidOperation, "Can only use session with the MongoClient" " that started it" + InvalidOperation, "Can only use session with the MongoClient that started it" ): f(*args, **kw) diff --git a/test/test_son.py b/test/test_son.py index 69beb81439..5c1f43594d 100644 --- a/test/test_son.py +++ b/test/test_son.py @@ -154,8 +154,8 @@ def test_contains_has(self): self.assertIn(1, test_son) self.assertTrue(2 in test_son, "in failed") self.assertFalse(22 in test_son, "in succeeded when it shouldn't") - self.assertTrue(test_son.has_key(2), "has_key failed") - self.assertFalse(test_son.has_key(22), "has_key succeeded when it shouldn't") + self.assertTrue(test_son.has_key(2), "has_key failed") # noqa + self.assertFalse(test_son.has_key(22), "has_key succeeded when it shouldn't") # noqa def test_clears(self): """ diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 6c240d7a78..0b54171dc9 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -96,7 +96,7 @@ class TestSrvPolling(unittest.TestCase): def setUp(self): if not _HAVE_DNSPYTHON: - raise unittest.SkipTest("SRV polling tests require the dnspython " "module") + raise unittest.SkipTest("SRV polling tests require the dnspython module") # Patch timeouts to ensure short rescan SRV interval. self.client_knobs = client_knobs( heartbeat_frequency=WAIT_TIME, @@ -318,7 +318,7 @@ def nodelist_callback(): with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = MongoClient( - "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName" "=customname" + "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname" ) with SrvPollingKnobs(nodelist_callback=nodelist_callback): self.assert_nodelist_change(response, client) diff --git a/test/test_ssl.py b/test/test_ssl.py index 7629c1fd88..0c45275fac 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -40,12 +40,12 @@ _HAVE_PYOPENSSL = False try: # All of these must be available to use PyOpenSSL - import OpenSSL - import requests - import service_identity + import OpenSSL # noqa + import requests # noqa + import service_identity # noqa # Ensure service_identity>=18.1 is installed - from service_identity.pyopenssl import verify_ip_address + from service_identity.pyopenssl import verify_ip_address # noqa from pymongo.ocsp_support import _load_trusted_ca_certs @@ -79,9 +79,7 @@ class TestClientSSL(unittest.TestCase): - @unittest.skipIf( - HAVE_SSL, "The ssl module is available, can't test what " "happens without it." - ) + @unittest.skipIf(HAVE_SSL, "The ssl module is available, can't test what happens without it.") def test_no_ssl_module(self): # Explicit self.assertRaises(ConfigurationError, MongoClient, ssl=True) @@ -406,7 +404,7 @@ def test_tlsCRLFile_support(self): ) ) - uri_fmt = "mongodb://localhost/?ssl=true&" "tlsCAFile=%s&serverSelectionTimeoutMS=100" + uri_fmt = "mongodb://localhost/?ssl=true&tlsCAFile=%s&serverSelectionTimeoutMS=100" connected(MongoClient(uri_fmt % (CA_PEM,), **self.credentials)) # type: ignore uri_fmt = ( @@ -569,7 +567,7 @@ def test_mongodb_x509_auth(self): else: self.assertEqual(names, ["authenticate", "find"]) - uri = "mongodb://%s@%s:%d/?authMechanism=" "MONGODB-X509" % ( + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( quote_plus(MONGODB_X509_USERNAME), host, port, @@ -589,7 +587,7 @@ def test_mongodb_x509_auth(self): # No error client.pymongo_test.test.find_one() # Auth should fail if username and certificate do not match - uri = "mongodb://%s@%s:%d/?authMechanism=" "MONGODB-X509" % ( + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( quote_plus("not the username"), host, port, @@ -617,7 +615,7 @@ def test_mongodb_x509_auth(self): bad_client.pymongo_test.test.find_one() # Invalid certificate (using CA certificate as client certificate) - uri = "mongodb://%s@%s:%d/?authMechanism=" "MONGODB-X509" % ( + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( quote_plus(MONGODB_X509_USERNAME), host, port, diff --git a/test/test_threads.py b/test/test_threads.py index 064008ee32..2c73de52e7 100644 --- a/test/test_threads.py +++ b/test/test_threads.py @@ -16,7 +16,7 @@ import threading from test import IntegrationTest, client_context, unittest -from test.utils import joinall, rs_or_single_client +from test.utils import joinall @client_context.require_connection diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index cfe21169fd..4fa288df44 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -59,11 +59,11 @@ def test_split_hosts(self): ) self.assertEqual( [("/tmp/mongodb-27017.sock", None), ("example.com", 27017)], - split_hosts("/tmp/mongodb-27017.sock," "example.com:27017"), + split_hosts("/tmp/mongodb-27017.sock,example.com:27017"), ) self.assertEqual( [("example.com", 27017), ("/tmp/mongodb-27017.sock", None)], - split_hosts("example.com:27017," "/tmp/mongodb-27017.sock"), + split_hosts("example.com:27017,/tmp/mongodb-27017.sock"), ) self.assertRaises(ValueError, split_hosts, "::1", 27017) self.assertRaises(ValueError, split_hosts, "[::1:27017") @@ -168,11 +168,11 @@ def test_parse_uri(self): res = copy.deepcopy(orig) res["nodelist"] = [("example1.com", 27017), ("example2.com", 27017)] - self.assertEqual(res, parse_uri("mongodb://example1.com:27017," "example2.com:27017")) + self.assertEqual(res, parse_uri("mongodb://example1.com:27017,example2.com:27017")) res = copy.deepcopy(orig) res["nodelist"] = [("localhost", 27017), ("localhost", 27018), ("localhost", 27019)] - self.assertEqual(res, parse_uri("mongodb://localhost," "localhost:27018,localhost:27019")) + self.assertEqual(res, parse_uri("mongodb://localhost,localhost:27018,localhost:27019")) res = copy.deepcopy(orig) res["database"] = "foo" @@ -182,21 +182,17 @@ def test_parse_uri(self): self.assertEqual(res, parse_uri("mongodb://localhost/")) res.update({"database": "test", "collection": "yield_historical.in"}) - self.assertEqual(res, parse_uri("mongodb://" "localhost/test.yield_historical.in")) + self.assertEqual(res, parse_uri("mongodb://localhost/test.yield_historical.in")) res.update({"username": "fred", "password": "foobar"}) - self.assertEqual( - res, parse_uri("mongodb://fred:foobar@localhost/" "test.yield_historical.in") - ) + self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost/test.yield_historical.in")) res = copy.deepcopy(orig) res["nodelist"] = [("example1.com", 27017), ("example2.com", 27017)] res.update({"database": "test", "collection": "yield_historical.in"}) self.assertEqual( res, - parse_uri( - "mongodb://example1.com:27017,example2.com" ":27017/test.yield_historical.in" - ), + parse_uri("mongodb://example1.com:27017,example2.com:27017/test.yield_historical.in"), ) # Test socket path without escaped characters. @@ -205,14 +201,14 @@ def test_parse_uri(self): # Test with escaped characters. res = copy.deepcopy(orig) res["nodelist"] = [("example2.com", 27017), ("/tmp/mongodb-27017.sock", None)] - self.assertEqual(res, parse_uri("mongodb://example2.com," "%2Ftmp%2Fmongodb-27017.sock")) + self.assertEqual(res, parse_uri("mongodb://example2.com,%2Ftmp%2Fmongodb-27017.sock")) res = copy.deepcopy(orig) res["nodelist"] = [("shoe.sock.pants.co.uk", 27017), ("/tmp/mongodb-27017.sock", None)] res["database"] = "nethers_db" self.assertEqual( res, - parse_uri("mongodb://shoe.sock.pants.co.uk," "%2Ftmp%2Fmongodb-27017.sock/nethers_db"), + parse_uri("mongodb://shoe.sock.pants.co.uk,%2Ftmp%2Fmongodb-27017.sock/nethers_db"), ) res = copy.deepcopy(orig) @@ -242,15 +238,13 @@ def test_parse_uri(self): res = copy.deepcopy(orig) res["nodelist"] = [("example2.com", 27017)] res.update({"database": "test", "collection": "yield_historical.sock"}) - self.assertEqual( - res, parse_uri("mongodb://example2.com:27017" "/test.yield_historical.sock") - ) + self.assertEqual(res, parse_uri("mongodb://example2.com:27017/test.yield_historical.sock")) res = copy.deepcopy(orig) res["nodelist"] = [("/tmp/mongodb-27017.sock", None)] res.update({"database": "test", "collection": "mongodb-27017.sock"}) self.assertEqual( - res, parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock" "/test.mongodb-27017.sock") + res, parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock/test.mongodb-27017.sock") ) res = copy.deepcopy(orig) @@ -275,9 +269,7 @@ def test_parse_uri(self): res = copy.deepcopy(orig) res.update({"username": "fred", "password": "foobar"}) res.update({"database": "test", "collection": "yield_historical.in"}) - self.assertEqual( - res, parse_uri("mongodb://fred:foobar@localhost/" "test.yield_historical.in") - ) + self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost/test.yield_historical.in")) res = copy.deepcopy(orig) res["database"] = "test" @@ -294,7 +286,7 @@ def test_parse_uri(self): res["username"] = "user" res["password"] = "password" self.assertEqual( - res, parse_uri("mongodb://user:password@localhost/" "?authMechanism=MONGODB-CR") + res, parse_uri("mongodb://user:password@localhost/?authMechanism=MONGODB-CR") ) res = copy.deepcopy(orig) @@ -305,7 +297,7 @@ def test_parse_uri(self): self.assertEqual( res, parse_uri( - "mongodb://user:password@localhost/foo" "?authSource=bar;authMechanism=MONGODB-CR" + "mongodb://user:password@localhost/foo?authSource=bar;authMechanism=MONGODB-CR" ), ) @@ -313,13 +305,13 @@ def test_parse_uri(self): res["options"] = {"authmechanism": "MONGODB-CR"} res["username"] = "user" res["password"] = "" - self.assertEqual(res, parse_uri("mongodb://user:@localhost/" "?authMechanism=MONGODB-CR")) + self.assertEqual(res, parse_uri("mongodb://user:@localhost/?authMechanism=MONGODB-CR")) res = copy.deepcopy(orig) res["username"] = "user@domain.com" res["password"] = "password" res["database"] = "foo" - self.assertEqual(res, parse_uri("mongodb://user%40domain.com:password" "@localhost/foo")) + self.assertEqual(res, parse_uri("mongodb://user%40domain.com:password@localhost/foo")) res = copy.deepcopy(orig) res["options"] = {"authmechanism": "GSSAPI"} @@ -328,7 +320,7 @@ def test_parse_uri(self): res["database"] = "foo" self.assertEqual( res, - parse_uri("mongodb://user%40domain.com:password" "@localhost/foo?authMechanism=GSSAPI"), + parse_uri("mongodb://user%40domain.com:password@localhost/foo?authMechanism=GSSAPI"), ) res = copy.deepcopy(orig) @@ -337,7 +329,7 @@ def test_parse_uri(self): res["password"] = "" res["database"] = "foo" self.assertEqual( - res, parse_uri("mongodb://user%40domain.com" "@localhost/foo?authMechanism=GSSAPI") + res, parse_uri("mongodb://user%40domain.com@localhost/foo?authMechanism=GSSAPI") ) res = copy.deepcopy(orig) @@ -410,7 +402,7 @@ def test_parse_uri(self): self.assertRaises( ValueError, parse_uri, - "mongodb://user%40domain.com:password" "@localhost/foo?uuidrepresentation=notAnOption", + "mongodb://user%40domain.com:password@localhost/foo?uuidrepresentation=notAnOption", ) def test_parse_ssl_paths(self): diff --git a/test/unified_format.py b/test/unified_format.py index ba1d063694..6f1e386932 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -826,14 +826,12 @@ def process_error(self, exception, spec): self.match_evaluator.match_result(expect_result, result) else: self.fail( - "expectResult can only be specified with %s " "exceptions" % (BulkWriteError,) + "expectResult can only be specified with %s exceptions" % (BulkWriteError,) ) def __raise_if_unsupported(self, opname, target, *target_types): if not isinstance(target, target_types): - self.fail( - "Operation %s not supported for entity " "of type %s" % (opname, type(target)) - ) + self.fail("Operation %s not supported for entity of type %s" % (opname, type(target))) def __entityOperation_createChangeStream(self, target, *args, **kwargs): if client_context.storage_engine == "mmapv1": @@ -891,7 +889,7 @@ def _collectionOperation_createFindCursor(self, target, *args, **kwargs): def _collectionOperation_listIndexes(self, target, *args, **kwargs): if "batch_size" in kwargs: - self.skipTest("PyMongo does not support batch_size for " "list_indexes") + self.skipTest("PyMongo does not support batch_size for list_indexes") return target.list_indexes(*args, **kwargs) def _sessionOperation_withTransaction(self, target, *args, **kwargs): @@ -1255,7 +1253,7 @@ def generate_test_classes( test_path, module=__name__, class_name_prefix="", - expected_failures=[], + expected_failures=[], # noqa: B006 bypass_test_generation_errors=False, **kwargs ): diff --git a/tools/clean.py b/tools/clean.py index 7196b00e90..0ea31fc3d9 100644 --- a/tools/clean.py +++ b/tools/clean.py @@ -23,24 +23,24 @@ try: os.remove("pymongo/_cmessage.so") os.remove("bson/_cbson.so") -except: +except BaseException: pass try: os.remove("pymongo/_cmessage.pyd") os.remove("bson/_cbson.pyd") -except: +except BaseException: pass try: - from pymongo import _cmessage # type: ignore[attr-defined] + from pymongo import _cmessage # type: ignore[attr-defined] # noqa: F401 sys.exit("could still import _cmessage") except ImportError: pass try: - from bson import _cbson + from bson import _cbson # noqa: F401 sys.exit("could still import _cbson") except ImportError: diff --git a/tools/fail_if_no_c.py b/tools/fail_if_no_c.py index a2d4954789..6cb82eed57 100644 --- a/tools/fail_if_no_c.py +++ b/tools/fail_if_no_c.py @@ -21,8 +21,8 @@ sys.path[0:0] = [""] -import bson -import pymongo +import bson # noqa: E402 +import pymongo # noqa: E402 if not pymongo.has_c() or not bson.has_c(): sys.exit("could not load C extensions") From f5eec45250781a3cd3369dc233d97f0272e905a2 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 17 Feb 2022 16:23:23 -0800 Subject: [PATCH 0080/1588] PYTHON-3111 Rename "Versioned API" to "Stable API" in documentation (#867) --- .evergreen/config.yml | 2 +- .evergreen/resync-specs.sh | 2 +- doc/api/pymongo/server_api.rst | 4 ++-- doc/changelog.rst | 2 +- pymongo/database.py | 2 +- pymongo/mongo_client.py | 6 +++--- pymongo/pool.py | 2 +- pymongo/server_api.py | 14 +++++++------- test/__init__.py | 2 +- 9 files changed, 18 insertions(+), 18 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 8edc43df20..9bfd57c805 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2561,7 +2561,7 @@ buildvariants: tasks: - name: atlas-data-lake-tests -- matrix_name: "versioned-api-tests" +- matrix_name: "stable-api-tests" matrix_spec: platform: ubuntu-18.04 python-version: ["3.6", "3.10"] diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index bf20f23037..d1bc26a91c 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -136,7 +136,7 @@ do uri|uri*options) cpjson uri-options/tests uri_options ;; - versioned-api) + stable-api) cpjson versioned-api/tests versioned-api ;; *) diff --git a/doc/api/pymongo/server_api.rst b/doc/api/pymongo/server_api.rst index d961d07f1a..de74411aa4 100644 --- a/doc/api/pymongo/server_api.rst +++ b/doc/api/pymongo/server_api.rst @@ -1,8 +1,8 @@ -:mod:`server_api` -- Support for MongoDB Versioned API +:mod:`server_api` -- Support for MongoDB Stable API ====================================================== .. automodule:: pymongo.server_api - :synopsis: Support for MongoDB Versioned API + :synopsis: Support for MongoDB Stable API .. autoclass:: pymongo.server_api.ServerApi :members: diff --git a/doc/changelog.rst b/doc/changelog.rst index de38f188e4..7dd57d5329 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -258,7 +258,7 @@ Notable improvements .................... - Added support for MongoDB 5.0. -- Support for MongoDB Versioned API, see :class:`~pymongo.server_api.ServerApi`. +- Support for MongoDB Stable API, see :class:`~pymongo.server_api.ServerApi`. - Support for snapshot reads on secondaries (see :ref:`snapshot-reads-ref`). - Support for Azure and GCP KMS providers for client side field level encryption. See the docstring for :class:`~pymongo.mongo_client.MongoClient`, diff --git a/pymongo/database.py b/pymongo/database.py index f92dbc8aed..f43f18d017 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -686,7 +686,7 @@ def command( .. note:: :meth:`command` does **not** apply any custom TypeDecoders when decoding the command response. - .. note:: If this client has been configured to use MongoDB Versioned + .. note:: If this client has been configured to use MongoDB Stable API (see :ref:`versioned-api-ref`), then :meth:`command` will automactically add API versioning options to the given command. Explicitly adding API versioning options in the command and diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 9414d71962..4965b5e439 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -515,12 +515,12 @@ def __init__( - ``bypass_auto_encrpytion=False`` is passed to :class:`~pymongo.encryption_options.AutoEncryptionOpts` - | **Versioned API options:** - | (If not set explicitly, Versioned API will not be enabled.) + | **Stable API options:** + | (If not set explicitly, Stable API will not be enabled.) - `server_api`: A :class:`~pymongo.server_api.ServerApi` which configures this - client to use Versioned API. See :ref:`versioned-api-ref` for + client to use Stable API. See :ref:`versioned-api-ref` for details. .. seealso:: The MongoDB documentation on `connections `_. diff --git a/pymongo/pool.py b/pymongo/pool.py index 09709ffbf4..4750163718 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -587,7 +587,7 @@ def unpin(self): def hello_cmd(self): # Handshake spec requires us to use OP_MSG+hello command for the - # initial handshake in load balanced or versioned api mode. + # initial handshake in load balanced or stable API mode. if self.opts.server_api or self.hello_ok or self.opts.load_balanced: self.op_msg_enabled = True return SON([(HelloCompat.CMD, 1)]) diff --git a/pymongo/server_api.py b/pymongo/server_api.py index 110406366a..e92d6e6179 100644 --- a/pymongo/server_api.py +++ b/pymongo/server_api.py @@ -12,11 +12,11 @@ # implied. See the License for the specific language governing # permissions and limitations under the License. -"""Support for MongoDB Versioned API. +"""Support for MongoDB Stable API. .. _versioned-api-ref: -MongoDB Versioned API +MongoDB Stable API ===================== Starting in MongoDB 5.0, applications can specify the server API version @@ -27,9 +27,9 @@ Declaring an API Version ```````````````````````` -.. attention:: Versioned API requires MongoDB >=5.0. +.. attention:: Stable API requires MongoDB >=5.0. -To configure MongoDB Versioned API, pass the ``server_api`` keyword option to +To configure MongoDB Stable API, pass the ``server_api`` keyword option to :class:`~pymongo.mongo_client.MongoClient`:: >>> from pymongo.mongo_client import MongoClient @@ -44,7 +44,7 @@ :meth:`~pymongo.database.Database.command` helper. .. note:: Declaring an API version on the - :class:`~pymongo.mongo_client.MongoClient` **and** specifying versioned + :class:`~pymongo.mongo_client.MongoClient` **and** specifying stable API options in :meth:`~pymongo.database.Database.command` command document is not supported and will lead to undefined behaviour. @@ -96,10 +96,10 @@ class ServerApiVersion: class ServerApi(object): - """MongoDB Versioned API.""" + """MongoDB Stable API.""" def __init__(self, version, strict=None, deprecation_errors=None): - """Options to configure MongoDB Versioned API. + """Options to configure MongoDB Stable API. :Parameters: - `version`: The API version string. Must be one of the values in diff --git a/test/__init__.py b/test/__init__.py index be0825025a..b2906481e9 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -913,7 +913,7 @@ def sanitize_cmd(cmd): cp.pop("$readPreference", None) cp.pop("lsid", None) if MONGODB_API_VERSION: - # Versioned api parameters + # Stable API parameters cp.pop("apiVersion", None) # OP_MSG encoding may move the payload type one field to the # end of the command. Do the same here. From e6b65860f59b432487fcdb385fde663345ce2917 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 17 Feb 2022 17:13:25 -0800 Subject: [PATCH 0081/1588] PYTHON-3136 [DevOps] Resync-specs.sh removes ignored files from working tree (#878) --- .evergreen/resync-specs.sh | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index d1bc26a91c..3042fd543b 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -56,10 +56,14 @@ cpjson () { cd "$SPECS"/source/$1 find . -name '*.json' | grep -Ev "${BLOCKLIST}" | cpio -pdm \ $PYMONGO/test/$2 - printf "\nIgnored files for ${PWD}" - printf "\n%s\n" "$(diff <(find . -name '*.json' | sort) \ + printf "\nIgnored files for ${PWD}\n" + IGNORED_FILES="$(printf "\n%s\n" "$(diff <(find . -name '*.json' | sort) \ <(find . -name '*.json' | grep -Ev "${BLOCKLIST}" | sort))" | \ - sed -e '/^[0-9]/d' | sed -e 's|< ./||g' + sed -e '/^[0-9]/d' | sed -e 's|< ./||g' )" + printf "%s\n" $IGNORED_FILES + cd "$PYMONGO"/test/$2 + printf "%s\n" $IGNORED_FILES | xargs git checkout master + } for spec in "$@" From dce5072dd1f100947565555d4eafb748239f4385 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 18 Feb 2022 10:43:07 -0800 Subject: [PATCH 0082/1588] PYTHON-3137 Handle falsey values for "let" parameter (#881) --- pymongo/aggregation.py | 2 +- pymongo/collection.py | 8 ++++---- pymongo/cursor.py | 2 +- test/test_collection.py | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index e190fefc56..84ecffe5fb 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -55,7 +55,7 @@ def __init__( self._performs_write = True common.validate_is_mapping("options", options) - if let: + if let is not None: common.validate_is_mapping("let", let) options["let"] = let if comment is not None: diff --git a/pymongo/collection.py b/pymongo/collection.py index 8de1fbeeaa..65bd1c54e7 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -728,7 +728,7 @@ def _update( hint = helpers._index_document(hint) update_doc["hint"] = hint command = SON([("update", self.name), ("ordered", ordered), ("updates", [update_doc])]) - if let: + if let is not None: common.validate_is_mapping("let", let) command["let"] = let if not write_concern.is_server_default: @@ -893,7 +893,7 @@ def replace_one( """ common.validate_is_mapping("filter", filter) common.validate_ok_for_replace(replacement) - if let: + if let is not None: common.validate_is_mapping("let", let) write_concern = self._write_concern_for(session) return UpdateResult( @@ -1189,7 +1189,7 @@ def _delete( if not write_concern.is_server_default: command["writeConcern"] = write_concern.document - if let: + if let is not None: common.validate_is_document_type("let", let) command["let"] = let @@ -2728,7 +2728,7 @@ def __find_and_modify( ) collation = validate_collation_or_none(kwargs.pop("collation", None)) cmd = SON([("findAndModify", self.__name), ("query", filter), ("new", return_document)]) - if let: + if let is not None: common.validate_is_mapping("let", let) cmd["let"] = let cmd.update(kwargs) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 02f1905df3..a2ccdf5860 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -248,7 +248,7 @@ def __init__( if projection is not None: projection = helpers._fields_list_to_dict(projection, "projection") - if let: + if let is not None: validate_is_document_type("let", let) self.__let = let diff --git a/test/test_collection.py b/test/test_collection.py index d9f51f530d..47636b495f 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -2123,7 +2123,7 @@ def test_helpers_with_let(self): (c.find_one_and_replace, ({}, {})), (c.aggregate, ([], {})), ] - for let in [10, "str"]: + for let in [10, "str", [], False]: for helper, args in helpers: with self.assertRaisesRegex(TypeError, "let must be an instance of dict"): helper(*args, let=let) # type: ignore From 52ff8c2e90cb90b3f734404400941c7b05c13c85 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 18 Feb 2022 10:43:56 -0800 Subject: [PATCH 0083/1588] PYTHON-3068 Support 'let' option in BulkWriteOptions (#874) --- pymongo/bulk.py | 8 +- pymongo/collection.py | 8 +- .../unified/bulkWrite-deleteMany-let.json | 200 ++++++++++++++ .../crud/unified/bulkWrite-deleteOne-let.json | 200 ++++++++++++++ .../unified/bulkWrite-replaceOne-let.json | 214 +++++++++++++++ .../unified/bulkWrite-updateMany-let.json | 243 +++++++++++++++++ .../crud/unified/bulkWrite-updateOne-let.json | 247 ++++++++++++++++++ 7 files changed, 1118 insertions(+), 2 deletions(-) create mode 100644 test/crud/unified/bulkWrite-deleteMany-let.json create mode 100644 test/crud/unified/bulkWrite-deleteOne-let.json create mode 100644 test/crud/unified/bulkWrite-replaceOne-let.json create mode 100644 test/crud/unified/bulkWrite-updateMany-let.json create mode 100644 test/crud/unified/bulkWrite-updateOne-let.json diff --git a/pymongo/bulk.py b/pymongo/bulk.py index c736bd7d6f..9055e40e98 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -22,6 +22,7 @@ from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument from bson.son import SON +from pymongo import common from pymongo.client_session import _validate_session_write_concern from pymongo.collation import validate_collation_or_none from pymongo.common import ( @@ -137,13 +138,16 @@ def _raise_bulk_write_error(full_result): class _Bulk(object): """The private guts of the bulk write API.""" - def __init__(self, collection, ordered, bypass_document_validation, comment=None): + def __init__(self, collection, ordered, bypass_document_validation, comment=None, let=None): """Initialize a _Bulk instance.""" self.collection = collection.with_options( codec_options=collection.codec_options._replace( unicode_decode_error_handler="replace", document_class=dict ) ) + self.let = let + if self.let is not None: + common.validate_is_document_type("let", self.let) self.comment = comment self.ordered = ordered self.ops = [] @@ -314,6 +318,8 @@ def _execute_command( cmd["writeConcern"] = write_concern.document if self.bypass_doc_val: cmd["bypassDocumentValidation"] = True + if self.let is not None and run.op_type in (_DELETE, _UPDATE): + cmd["let"] = self.let if session: # Start a new retryable write unless one was already # started for this command. diff --git a/pymongo/collection.py b/pymongo/collection.py index 65bd1c54e7..bfe2007d5a 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -423,6 +423,7 @@ def bulk_write( bypass_document_validation: bool = False, session: Optional["ClientSession"] = None, comment: Optional[Any] = None, + let: Optional[Mapping] = None, ) -> BulkWriteResult: """Send a batch of write operations to the server. @@ -474,6 +475,10 @@ def bulk_write( :class:`~pymongo.client_session.ClientSession`. - `comment` (optional): A user-provided comment to attach to this command. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). :Returns: An instance of :class:`~pymongo.results.BulkWriteResult`. @@ -485,6 +490,7 @@ def bulk_write( .. versionchanged:: 4.1 Added ``comment`` parameter. + Added ``let`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. @@ -496,7 +502,7 @@ def bulk_write( """ common.validate_list("requests", requests) - blk = _Bulk(self, ordered, bypass_document_validation, comment=comment) + blk = _Bulk(self, ordered, bypass_document_validation, comment=comment, let=let) for request in requests: try: request._add_to_bulk(blk) diff --git a/test/crud/unified/bulkWrite-deleteMany-let.json b/test/crud/unified/bulkWrite-deleteMany-let.json new file mode 100644 index 0000000000..c16161e4bc --- /dev/null +++ b/test/crud/unified/bulkWrite-deleteMany-let.json @@ -0,0 +1,200 @@ +{ + "description": "BulkWrite deleteMany-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite deleteMany with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 0 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Bulk Write deleteMany with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'delete.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-deleteOne-let.json b/test/crud/unified/bulkWrite-deleteOne-let.json new file mode 100644 index 0000000000..29ac34d3dc --- /dev/null +++ b/test/crud/unified/bulkWrite-deleteOne-let.json @@ -0,0 +1,200 @@ +{ + "description": "BulkWrite deleteOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite deleteOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Bulk Write deleteOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'delete.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-replaceOne-let.json b/test/crud/unified/bulkWrite-replaceOne-let.json new file mode 100644 index 0000000000..bdd1c27a0b --- /dev/null +++ b/test/crud/unified/bulkWrite-replaceOne-let.json @@ -0,0 +1,214 @@ +{ + "description": "BulkWrite replaceOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite replaceOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": 3 + } + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": { + "x": 3 + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 3 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Bulk Write replaceOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": 3 + } + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": { + "x": 3 + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateMany-let.json b/test/crud/unified/bulkWrite-updateMany-let.json new file mode 100644 index 0000000000..6d437e9011 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateMany-let.json @@ -0,0 +1,243 @@ +{ + "description": "BulkWrite updateMany-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 20 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateMany with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": 21 + } + } + ] + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": 21 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 21 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ] + }, + { + "description": "Bulk Write updateMany with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": 21 + } + } + ] + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": 21 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 20 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateOne-let.json b/test/crud/unified/bulkWrite-updateOne-let.json new file mode 100644 index 0000000000..e248779da3 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateOne-let.json @@ -0,0 +1,247 @@ +{ + "description": "BulkWrite updateOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 20 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": 22 + } + } + ] + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": 22 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ] + }, + { + "description": "Bulk Write updateOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": 22 + } + } + ] + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": 22 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 20 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ] + } + ] +} From 2f13a51cd42811d7364b789cbe45fd262afdfe61 Mon Sep 17 00:00:00 2001 From: Arie Bovenberg Date: Tue, 22 Feb 2022 19:27:16 +0100 Subject: [PATCH 0084/1588] PYTHON-3124 Remove overlapping slots from _WriteResult subclasses (#884) --- doc/contributors.rst | 1 + pymongo/results.py | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/contributors.rst b/doc/contributors.rst index 22cbee3215..4275209781 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -91,3 +91,4 @@ The following is a list of people who have contributed to - Khanh Nguyen (KN99HN) - Henri Froese (henrifroese) - Ishmum Jawad Khan (ishmum123) +- Arie Bovenberg (ariebovenberg) diff --git a/pymongo/results.py b/pymongo/results.py index 1cbb614bf3..5803900398 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -59,7 +59,7 @@ def acknowledged(self) -> bool: class InsertOneResult(_WriteResult): """The return type for :meth:`~pymongo.collection.Collection.insert_one`.""" - __slots__ = ("__inserted_id", "__acknowledged") + __slots__ = ("__inserted_id",) def __init__(self, inserted_id: Any, acknowledged: bool) -> None: self.__inserted_id = inserted_id @@ -74,7 +74,7 @@ def inserted_id(self) -> Any: class InsertManyResult(_WriteResult): """The return type for :meth:`~pymongo.collection.Collection.insert_many`.""" - __slots__ = ("__inserted_ids", "__acknowledged") + __slots__ = ("__inserted_ids",) def __init__(self, inserted_ids: List[Any], acknowledged: bool) -> None: self.__inserted_ids = inserted_ids @@ -98,7 +98,7 @@ class UpdateResult(_WriteResult): :meth:`~pymongo.collection.Collection.replace_one`. """ - __slots__ = ("__raw_result", "__acknowledged") + __slots__ = ("__raw_result",) def __init__(self, raw_result: Dict[str, Any], acknowledged: bool) -> None: self.__raw_result = raw_result @@ -136,7 +136,7 @@ class DeleteResult(_WriteResult): """The return type for :meth:`~pymongo.collection.Collection.delete_one` and :meth:`~pymongo.collection.Collection.delete_many`""" - __slots__ = ("__raw_result", "__acknowledged") + __slots__ = ("__raw_result",) def __init__(self, raw_result: Dict[str, Any], acknowledged: bool) -> None: self.__raw_result = raw_result @@ -157,7 +157,7 @@ def deleted_count(self) -> int: class BulkWriteResult(_WriteResult): """An object wrapper for bulk API write results.""" - __slots__ = ("__bulk_api_result", "__acknowledged") + __slots__ = ("__bulk_api_result",) def __init__(self, bulk_api_result: Dict[str, Any], acknowledged: bool) -> None: """Create a BulkWriteResult instance. From 2141621194715836325f3ce3b6a10e16712d909d Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 22 Feb 2022 14:45:56 -0800 Subject: [PATCH 0085/1588] PYTHON-3088 Test rapid releases with load balancers (#885) PYTHON-3088 [v3.13] Update load balancer tests to support dedicated load balancer port (#870) (cherry picked from commit 341d489) --- .evergreen/config.yml | 9 +++++---- pymongo/pool.py | 5 ----- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 9bfd57c805..2e3c12f3f8 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1714,12 +1714,9 @@ tasks: commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "latest" TOPOLOGY: "sharded_cluster" LOAD_BALANCER: true - func: "run load-balancer" - vars: - LOAD_BALANCER: true - func: "run tests" - name: "test-fips-standalone" @@ -1937,6 +1934,10 @@ axes: display_name: "MongoDB latest" variables: VERSION: "latest" + - id: "rapid" + display_name: "MongoDB rapid" + variables: + VERSION: "rapid" # Choice of Python runtime version - id: python-version @@ -2636,7 +2637,7 @@ buildvariants: - matrix_name: "load-balancer" matrix_spec: platform: ubuntu-18.04 - mongodb-version: ["5.0", "latest"] + mongodb-version: ["rapid", "latest"] auth-ssl: "*" python-version: "*" loadbalancer: "*" diff --git a/pymongo/pool.py b/pymongo/pool.py index 4750163718..c7bd21fc8f 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -630,8 +630,6 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): auth_ctx = None doc = self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable) - if not self.opts.load_balanced: - doc.pop("serviceId", None) hello = Hello(doc, awaitable=awaitable) self.is_writable = hello.is_writable self.max_wire_version = hello.max_wire_version @@ -676,9 +674,6 @@ def _next_reply(self): unpacked_docs = reply.unpack_response() response_doc = unpacked_docs[0] helpers._check_command_response(response_doc, self.max_wire_version) - # Remove after PYTHON-2712. - if not self.opts.load_balanced: - response_doc.pop("serviceId", None) return response_doc def command( From 8496ed4b3d9deccac490a5904d92dca3adb19f17 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 23 Feb 2022 11:52:55 -0800 Subject: [PATCH 0086/1588] PYTHON-3142 Stop using $where in test_maxConnecting (#886) --- test/test_pooling.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/test/test_pooling.py b/test/test_pooling.py index 00b947f10a..923c89d83b 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -378,12 +378,14 @@ def test_checkout_more_than_max_pool_size(self): def test_maxConnecting(self): client = rs_or_single_client() self.addCleanup(client.close) + self.client.test.test.insert_one({}) + self.addCleanup(self.client.test.test.delete_many, {}) pool = get_pool(client) docs = [] # Run 50 short running operations def find_one(): - docs.append(client.test.test.find_one({"$where": delay(0.001)})) + docs.append(client.test.test.find_one({})) threads = [threading.Thread(target=find_one) for _ in range(50)] for thread in threads: @@ -394,9 +396,8 @@ def find_one(): self.assertEqual(len(docs), 50) self.assertLessEqual(len(pool.sockets), 50) # TLS and auth make connection establishment more expensive than - # the artificially delayed query which leads to more threads - # hitting maxConnecting. The end result is fewer total connections - # and better latency. + # the query which leads to more threads hitting maxConnecting. + # The end result is fewer total connections and better latency. if client_context.tls and client_context.auth_enabled: self.assertLessEqual(len(pool.sockets), 30) else: From 6fb8d7afe854bfe277e46c9fd3bcfc150db9b1e1 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 23 Feb 2022 14:00:16 -0800 Subject: [PATCH 0087/1588] PYTHON-3129 Re-sync CRUD spec tests (#887) --- test/crud/unified/bulkWrite-deleteMany-let.json | 4 ++-- test/crud/unified/bulkWrite-deleteOne-let.json | 2 +- test/crud/unified/bulkWrite-replaceOne-let.json | 2 +- test/crud/unified/bulkWrite-updateMany-let.json | 2 +- test/crud/unified/bulkWrite-updateOne-let.json | 2 +- test/crud/unified/updateMany-let.json | 2 +- test/crud/unified/updateOne-let.json | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/test/crud/unified/bulkWrite-deleteMany-let.json b/test/crud/unified/bulkWrite-deleteMany-let.json index c16161e4bc..45c20ea49a 100644 --- a/test/crud/unified/bulkWrite-deleteMany-let.json +++ b/test/crud/unified/bulkWrite-deleteMany-let.json @@ -115,11 +115,11 @@ ] }, { - "description": "Bulk Write deleteMany with let option unsupported (server-side error)", + "description": "BulkWrite deleteMany with let option unsupported (server-side error)", "runOnRequirements": [ { "minServerVersion": "3.6.0", - "maxServerVersion": "4.9" + "maxServerVersion": "4.4.99" } ], "operations": [ diff --git a/test/crud/unified/bulkWrite-deleteOne-let.json b/test/crud/unified/bulkWrite-deleteOne-let.json index 29ac34d3dc..f3268163cb 100644 --- a/test/crud/unified/bulkWrite-deleteOne-let.json +++ b/test/crud/unified/bulkWrite-deleteOne-let.json @@ -115,7 +115,7 @@ ] }, { - "description": "Bulk Write deleteOne with let option unsupported (server-side error)", + "description": "BulkWrite deleteOne with let option unsupported (server-side error)", "runOnRequirements": [ { "minServerVersion": "3.6.0", diff --git a/test/crud/unified/bulkWrite-replaceOne-let.json b/test/crud/unified/bulkWrite-replaceOne-let.json index bdd1c27a0b..df4eafe62f 100644 --- a/test/crud/unified/bulkWrite-replaceOne-let.json +++ b/test/crud/unified/bulkWrite-replaceOne-let.json @@ -124,7 +124,7 @@ ] }, { - "description": "Bulk Write replaceOne with let option unsupported (server-side error)", + "description": "BulkWrite replaceOne with let option unsupported (server-side error)", "runOnRequirements": [ { "minServerVersion": "4.2", diff --git a/test/crud/unified/bulkWrite-updateMany-let.json b/test/crud/unified/bulkWrite-updateMany-let.json index 6d437e9011..3cc8da4c53 100644 --- a/test/crud/unified/bulkWrite-updateMany-let.json +++ b/test/crud/unified/bulkWrite-updateMany-let.json @@ -139,7 +139,7 @@ ] }, { - "description": "Bulk Write updateMany with let option unsupported (server-side error)", + "description": "BulkWrite updateMany with let option unsupported (server-side error)", "runOnRequirements": [ { "minServerVersion": "3.6.0", diff --git a/test/crud/unified/bulkWrite-updateOne-let.json b/test/crud/unified/bulkWrite-updateOne-let.json index e248779da3..2a3e4f79dc 100644 --- a/test/crud/unified/bulkWrite-updateOne-let.json +++ b/test/crud/unified/bulkWrite-updateOne-let.json @@ -141,7 +141,7 @@ ] }, { - "description": "Bulk Write updateOne with let option unsupported (server-side error)", + "description": "BulkWrite updateOne with let option unsupported (server-side error)", "runOnRequirements": [ { "minServerVersion": "3.6.0", diff --git a/test/crud/unified/updateMany-let.json b/test/crud/unified/updateMany-let.json index b4a4ddd800..8a19ac0933 100644 --- a/test/crud/unified/updateMany-let.json +++ b/test/crud/unified/updateMany-let.json @@ -158,7 +158,7 @@ "description": "updateMany with let option unsupported (server-side error)", "runOnRequirements": [ { - "minServerVersion": "3.6.0", + "minServerVersion": "4.2.0", "maxServerVersion": "4.4.99" } ], diff --git a/test/crud/unified/updateOne-let.json b/test/crud/unified/updateOne-let.json index 7b1cc4cf00..8237bef7e8 100644 --- a/test/crud/unified/updateOne-let.json +++ b/test/crud/unified/updateOne-let.json @@ -136,7 +136,7 @@ "description": "UpdateOne with let option unsupported (server-side error)", "runOnRequirements": [ { - "minServerVersion": "3.6.0", + "minServerVersion": "4.2.0", "maxServerVersion": "4.4.99" } ], From f8f34b043843125f162d2b79307c9df68c16b51b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 23 Feb 2022 14:00:43 -0800 Subject: [PATCH 0088/1588] PYTHON-2951 Test that handshake errors are retryable (#846) --- .../{ => legacy}/aggregate-merge.json | 0 .../{ => legacy}/aggregate-serverErrors.json | 0 .../{ => legacy}/aggregate.json | 0 ...angeStreams-client.watch-serverErrors.json | 0 .../changeStreams-client.watch.json | 0 ...ngeStreams-db.coll.watch-serverErrors.json | 0 .../changeStreams-db.coll.watch.json | 0 .../changeStreams-db.watch-serverErrors.json | 0 .../{ => legacy}/changeStreams-db.watch.json | 0 .../{ => legacy}/count-serverErrors.json | 0 test/retryable_reads/{ => legacy}/count.json | 0 .../countDocuments-serverErrors.json | 0 .../{ => legacy}/countDocuments.json | 0 .../{ => legacy}/distinct-serverErrors.json | 0 .../{ => legacy}/distinct.json | 0 .../estimatedDocumentCount-4.9.json | 0 .../estimatedDocumentCount-pre4.9.json | 0 ...timatedDocumentCount-serverErrors-4.9.json | 0 ...atedDocumentCount-serverErrors-pre4.9.json | 0 .../{ => legacy}/find-serverErrors.json | 0 test/retryable_reads/{ => legacy}/find.json | 0 .../{ => legacy}/findOne-serverErrors.json | 0 .../retryable_reads/{ => legacy}/findOne.json | 0 .../gridfs-download-serverErrors.json | 0 .../{ => legacy}/gridfs-download.json | 0 .../gridfs-downloadByName-serverErrors.json | 0 .../{ => legacy}/gridfs-downloadByName.json | 0 .../listCollectionNames-serverErrors.json | 0 .../{ => legacy}/listCollectionNames.json | 0 .../listCollectionObjects-serverErrors.json | 0 .../{ => legacy}/listCollectionObjects.json | 0 .../listCollections-serverErrors.json | 0 .../{ => legacy}/listCollections.json | 0 .../listDatabaseNames-serverErrors.json | 0 .../{ => legacy}/listDatabaseNames.json | 0 .../listDatabaseObjects-serverErrors.json | 0 .../{ => legacy}/listDatabaseObjects.json | 0 .../listDatabases-serverErrors.json | 0 .../{ => legacy}/listDatabases.json | 0 .../listIndexNames-serverErrors.json | 0 .../{ => legacy}/listIndexNames.json | 0 .../listIndexes-serverErrors.json | 0 .../{ => legacy}/listIndexes.json | 0 .../{ => legacy}/mapReduce.json | 0 .../unified/handshakeError.json | 257 ++++++++++++++++ .../unified/handshakeError.json | 279 ++++++++++++++++++ test/test_retryable_reads.py | 2 +- test/test_retryable_reads_unified.py | 32 ++ .../unified/retryable-abort-handshake.json | 204 +++++++++++++ .../unified/retryable-commit-handshake.json | 211 +++++++++++++ test/unified_format.py | 3 +- 51 files changed, 985 insertions(+), 3 deletions(-) rename test/retryable_reads/{ => legacy}/aggregate-merge.json (100%) rename test/retryable_reads/{ => legacy}/aggregate-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/aggregate.json (100%) rename test/retryable_reads/{ => legacy}/changeStreams-client.watch-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/changeStreams-client.watch.json (100%) rename test/retryable_reads/{ => legacy}/changeStreams-db.coll.watch-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/changeStreams-db.coll.watch.json (100%) rename test/retryable_reads/{ => legacy}/changeStreams-db.watch-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/changeStreams-db.watch.json (100%) rename test/retryable_reads/{ => legacy}/count-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/count.json (100%) rename test/retryable_reads/{ => legacy}/countDocuments-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/countDocuments.json (100%) rename test/retryable_reads/{ => legacy}/distinct-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/distinct.json (100%) rename test/retryable_reads/{ => legacy}/estimatedDocumentCount-4.9.json (100%) rename test/retryable_reads/{ => legacy}/estimatedDocumentCount-pre4.9.json (100%) rename test/retryable_reads/{ => legacy}/estimatedDocumentCount-serverErrors-4.9.json (100%) rename test/retryable_reads/{ => legacy}/estimatedDocumentCount-serverErrors-pre4.9.json (100%) rename test/retryable_reads/{ => legacy}/find-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/find.json (100%) rename test/retryable_reads/{ => legacy}/findOne-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/findOne.json (100%) rename test/retryable_reads/{ => legacy}/gridfs-download-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/gridfs-download.json (100%) rename test/retryable_reads/{ => legacy}/gridfs-downloadByName-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/gridfs-downloadByName.json (100%) rename test/retryable_reads/{ => legacy}/listCollectionNames-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/listCollectionNames.json (100%) rename test/retryable_reads/{ => legacy}/listCollectionObjects-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/listCollectionObjects.json (100%) rename test/retryable_reads/{ => legacy}/listCollections-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/listCollections.json (100%) rename test/retryable_reads/{ => legacy}/listDatabaseNames-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/listDatabaseNames.json (100%) rename test/retryable_reads/{ => legacy}/listDatabaseObjects-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/listDatabaseObjects.json (100%) rename test/retryable_reads/{ => legacy}/listDatabases-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/listDatabases.json (100%) rename test/retryable_reads/{ => legacy}/listIndexNames-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/listIndexNames.json (100%) rename test/retryable_reads/{ => legacy}/listIndexes-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/listIndexes.json (100%) rename test/retryable_reads/{ => legacy}/mapReduce.json (100%) create mode 100644 test/retryable_reads/unified/handshakeError.json create mode 100644 test/retryable_writes/unified/handshakeError.json create mode 100644 test/test_retryable_reads_unified.py create mode 100644 test/transactions/unified/retryable-abort-handshake.json create mode 100644 test/transactions/unified/retryable-commit-handshake.json diff --git a/test/retryable_reads/aggregate-merge.json b/test/retryable_reads/legacy/aggregate-merge.json similarity index 100% rename from test/retryable_reads/aggregate-merge.json rename to test/retryable_reads/legacy/aggregate-merge.json diff --git a/test/retryable_reads/aggregate-serverErrors.json b/test/retryable_reads/legacy/aggregate-serverErrors.json similarity index 100% rename from test/retryable_reads/aggregate-serverErrors.json rename to test/retryable_reads/legacy/aggregate-serverErrors.json diff --git a/test/retryable_reads/aggregate.json b/test/retryable_reads/legacy/aggregate.json similarity index 100% rename from test/retryable_reads/aggregate.json rename to test/retryable_reads/legacy/aggregate.json diff --git a/test/retryable_reads/changeStreams-client.watch-serverErrors.json b/test/retryable_reads/legacy/changeStreams-client.watch-serverErrors.json similarity index 100% rename from test/retryable_reads/changeStreams-client.watch-serverErrors.json rename to test/retryable_reads/legacy/changeStreams-client.watch-serverErrors.json diff --git a/test/retryable_reads/changeStreams-client.watch.json b/test/retryable_reads/legacy/changeStreams-client.watch.json similarity index 100% rename from test/retryable_reads/changeStreams-client.watch.json rename to test/retryable_reads/legacy/changeStreams-client.watch.json diff --git a/test/retryable_reads/changeStreams-db.coll.watch-serverErrors.json b/test/retryable_reads/legacy/changeStreams-db.coll.watch-serverErrors.json similarity index 100% rename from test/retryable_reads/changeStreams-db.coll.watch-serverErrors.json rename to test/retryable_reads/legacy/changeStreams-db.coll.watch-serverErrors.json diff --git a/test/retryable_reads/changeStreams-db.coll.watch.json b/test/retryable_reads/legacy/changeStreams-db.coll.watch.json similarity index 100% rename from test/retryable_reads/changeStreams-db.coll.watch.json rename to test/retryable_reads/legacy/changeStreams-db.coll.watch.json diff --git a/test/retryable_reads/changeStreams-db.watch-serverErrors.json b/test/retryable_reads/legacy/changeStreams-db.watch-serverErrors.json similarity index 100% rename from test/retryable_reads/changeStreams-db.watch-serverErrors.json rename to test/retryable_reads/legacy/changeStreams-db.watch-serverErrors.json diff --git a/test/retryable_reads/changeStreams-db.watch.json b/test/retryable_reads/legacy/changeStreams-db.watch.json similarity index 100% rename from test/retryable_reads/changeStreams-db.watch.json rename to test/retryable_reads/legacy/changeStreams-db.watch.json diff --git a/test/retryable_reads/count-serverErrors.json b/test/retryable_reads/legacy/count-serverErrors.json similarity index 100% rename from test/retryable_reads/count-serverErrors.json rename to test/retryable_reads/legacy/count-serverErrors.json diff --git a/test/retryable_reads/count.json b/test/retryable_reads/legacy/count.json similarity index 100% rename from test/retryable_reads/count.json rename to test/retryable_reads/legacy/count.json diff --git a/test/retryable_reads/countDocuments-serverErrors.json b/test/retryable_reads/legacy/countDocuments-serverErrors.json similarity index 100% rename from test/retryable_reads/countDocuments-serverErrors.json rename to test/retryable_reads/legacy/countDocuments-serverErrors.json diff --git a/test/retryable_reads/countDocuments.json b/test/retryable_reads/legacy/countDocuments.json similarity index 100% rename from test/retryable_reads/countDocuments.json rename to test/retryable_reads/legacy/countDocuments.json diff --git a/test/retryable_reads/distinct-serverErrors.json b/test/retryable_reads/legacy/distinct-serverErrors.json similarity index 100% rename from test/retryable_reads/distinct-serverErrors.json rename to test/retryable_reads/legacy/distinct-serverErrors.json diff --git a/test/retryable_reads/distinct.json b/test/retryable_reads/legacy/distinct.json similarity index 100% rename from test/retryable_reads/distinct.json rename to test/retryable_reads/legacy/distinct.json diff --git a/test/retryable_reads/estimatedDocumentCount-4.9.json b/test/retryable_reads/legacy/estimatedDocumentCount-4.9.json similarity index 100% rename from test/retryable_reads/estimatedDocumentCount-4.9.json rename to test/retryable_reads/legacy/estimatedDocumentCount-4.9.json diff --git a/test/retryable_reads/estimatedDocumentCount-pre4.9.json b/test/retryable_reads/legacy/estimatedDocumentCount-pre4.9.json similarity index 100% rename from test/retryable_reads/estimatedDocumentCount-pre4.9.json rename to test/retryable_reads/legacy/estimatedDocumentCount-pre4.9.json diff --git a/test/retryable_reads/estimatedDocumentCount-serverErrors-4.9.json b/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-4.9.json similarity index 100% rename from test/retryable_reads/estimatedDocumentCount-serverErrors-4.9.json rename to test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-4.9.json diff --git a/test/retryable_reads/estimatedDocumentCount-serverErrors-pre4.9.json b/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-pre4.9.json similarity index 100% rename from test/retryable_reads/estimatedDocumentCount-serverErrors-pre4.9.json rename to test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-pre4.9.json diff --git a/test/retryable_reads/find-serverErrors.json b/test/retryable_reads/legacy/find-serverErrors.json similarity index 100% rename from test/retryable_reads/find-serverErrors.json rename to test/retryable_reads/legacy/find-serverErrors.json diff --git a/test/retryable_reads/find.json b/test/retryable_reads/legacy/find.json similarity index 100% rename from test/retryable_reads/find.json rename to test/retryable_reads/legacy/find.json diff --git a/test/retryable_reads/findOne-serverErrors.json b/test/retryable_reads/legacy/findOne-serverErrors.json similarity index 100% rename from test/retryable_reads/findOne-serverErrors.json rename to test/retryable_reads/legacy/findOne-serverErrors.json diff --git a/test/retryable_reads/findOne.json b/test/retryable_reads/legacy/findOne.json similarity index 100% rename from test/retryable_reads/findOne.json rename to test/retryable_reads/legacy/findOne.json diff --git a/test/retryable_reads/gridfs-download-serverErrors.json b/test/retryable_reads/legacy/gridfs-download-serverErrors.json similarity index 100% rename from test/retryable_reads/gridfs-download-serverErrors.json rename to test/retryable_reads/legacy/gridfs-download-serverErrors.json diff --git a/test/retryable_reads/gridfs-download.json b/test/retryable_reads/legacy/gridfs-download.json similarity index 100% rename from test/retryable_reads/gridfs-download.json rename to test/retryable_reads/legacy/gridfs-download.json diff --git a/test/retryable_reads/gridfs-downloadByName-serverErrors.json b/test/retryable_reads/legacy/gridfs-downloadByName-serverErrors.json similarity index 100% rename from test/retryable_reads/gridfs-downloadByName-serverErrors.json rename to test/retryable_reads/legacy/gridfs-downloadByName-serverErrors.json diff --git a/test/retryable_reads/gridfs-downloadByName.json b/test/retryable_reads/legacy/gridfs-downloadByName.json similarity index 100% rename from test/retryable_reads/gridfs-downloadByName.json rename to test/retryable_reads/legacy/gridfs-downloadByName.json diff --git a/test/retryable_reads/listCollectionNames-serverErrors.json b/test/retryable_reads/legacy/listCollectionNames-serverErrors.json similarity index 100% rename from test/retryable_reads/listCollectionNames-serverErrors.json rename to test/retryable_reads/legacy/listCollectionNames-serverErrors.json diff --git a/test/retryable_reads/listCollectionNames.json b/test/retryable_reads/legacy/listCollectionNames.json similarity index 100% rename from test/retryable_reads/listCollectionNames.json rename to test/retryable_reads/legacy/listCollectionNames.json diff --git a/test/retryable_reads/listCollectionObjects-serverErrors.json b/test/retryable_reads/legacy/listCollectionObjects-serverErrors.json similarity index 100% rename from test/retryable_reads/listCollectionObjects-serverErrors.json rename to test/retryable_reads/legacy/listCollectionObjects-serverErrors.json diff --git a/test/retryable_reads/listCollectionObjects.json b/test/retryable_reads/legacy/listCollectionObjects.json similarity index 100% rename from test/retryable_reads/listCollectionObjects.json rename to test/retryable_reads/legacy/listCollectionObjects.json diff --git a/test/retryable_reads/listCollections-serverErrors.json b/test/retryable_reads/legacy/listCollections-serverErrors.json similarity index 100% rename from test/retryable_reads/listCollections-serverErrors.json rename to test/retryable_reads/legacy/listCollections-serverErrors.json diff --git a/test/retryable_reads/listCollections.json b/test/retryable_reads/legacy/listCollections.json similarity index 100% rename from test/retryable_reads/listCollections.json rename to test/retryable_reads/legacy/listCollections.json diff --git a/test/retryable_reads/listDatabaseNames-serverErrors.json b/test/retryable_reads/legacy/listDatabaseNames-serverErrors.json similarity index 100% rename from test/retryable_reads/listDatabaseNames-serverErrors.json rename to test/retryable_reads/legacy/listDatabaseNames-serverErrors.json diff --git a/test/retryable_reads/listDatabaseNames.json b/test/retryable_reads/legacy/listDatabaseNames.json similarity index 100% rename from test/retryable_reads/listDatabaseNames.json rename to test/retryable_reads/legacy/listDatabaseNames.json diff --git a/test/retryable_reads/listDatabaseObjects-serverErrors.json b/test/retryable_reads/legacy/listDatabaseObjects-serverErrors.json similarity index 100% rename from test/retryable_reads/listDatabaseObjects-serverErrors.json rename to test/retryable_reads/legacy/listDatabaseObjects-serverErrors.json diff --git a/test/retryable_reads/listDatabaseObjects.json b/test/retryable_reads/legacy/listDatabaseObjects.json similarity index 100% rename from test/retryable_reads/listDatabaseObjects.json rename to test/retryable_reads/legacy/listDatabaseObjects.json diff --git a/test/retryable_reads/listDatabases-serverErrors.json b/test/retryable_reads/legacy/listDatabases-serverErrors.json similarity index 100% rename from test/retryable_reads/listDatabases-serverErrors.json rename to test/retryable_reads/legacy/listDatabases-serverErrors.json diff --git a/test/retryable_reads/listDatabases.json b/test/retryable_reads/legacy/listDatabases.json similarity index 100% rename from test/retryable_reads/listDatabases.json rename to test/retryable_reads/legacy/listDatabases.json diff --git a/test/retryable_reads/listIndexNames-serverErrors.json b/test/retryable_reads/legacy/listIndexNames-serverErrors.json similarity index 100% rename from test/retryable_reads/listIndexNames-serverErrors.json rename to test/retryable_reads/legacy/listIndexNames-serverErrors.json diff --git a/test/retryable_reads/listIndexNames.json b/test/retryable_reads/legacy/listIndexNames.json similarity index 100% rename from test/retryable_reads/listIndexNames.json rename to test/retryable_reads/legacy/listIndexNames.json diff --git a/test/retryable_reads/listIndexes-serverErrors.json b/test/retryable_reads/legacy/listIndexes-serverErrors.json similarity index 100% rename from test/retryable_reads/listIndexes-serverErrors.json rename to test/retryable_reads/legacy/listIndexes-serverErrors.json diff --git a/test/retryable_reads/listIndexes.json b/test/retryable_reads/legacy/listIndexes.json similarity index 100% rename from test/retryable_reads/listIndexes.json rename to test/retryable_reads/legacy/listIndexes.json diff --git a/test/retryable_reads/mapReduce.json b/test/retryable_reads/legacy/mapReduce.json similarity index 100% rename from test/retryable_reads/mapReduce.json rename to test/retryable_reads/legacy/mapReduce.json diff --git a/test/retryable_reads/unified/handshakeError.json b/test/retryable_reads/unified/handshakeError.json new file mode 100644 index 0000000000..2cf1d173f8 --- /dev/null +++ b/test/retryable_reads/unified/handshakeError.json @@ -0,0 +1,257 @@ +{ + "description": "retryable reads handshake failures", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionCheckOutStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-handshake-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "find succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + } + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 2 + } + }, + "databaseName": "retryable-handshake-tests" + } + } + ] + } + ] + }, + { + "description": "find succeeds after retryable handshake network error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + } + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 2 + } + }, + "databaseName": "retryable-handshake-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/handshakeError.json b/test/retryable_writes/unified/handshakeError.json new file mode 100644 index 0000000000..6d6b4ac491 --- /dev/null +++ b/test/retryable_writes/unified/handshakeError.json @@ -0,0 +1,279 @@ +{ + "description": "retryable writes handshake failures", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionCheckOutStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-handshake-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "InsertOne succeeds after retryable handshake error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-handshake-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after retryable handshake error ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-handshake-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 808477a8c0..01fe6901ae 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -46,7 +46,7 @@ from pymongo.write_concern import WriteConcern # Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_reads") +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_reads", "legacy") class TestClientOptions(PyMongoTestCase): diff --git a/test/test_retryable_reads_unified.py b/test/test_retryable_reads_unified.py new file mode 100644 index 0000000000..6bf4157763 --- /dev/null +++ b/test/test_retryable_reads_unified.py @@ -0,0 +1,32 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Retryable Reads unified spec tests.""" + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_reads", "unified") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/transactions/unified/retryable-abort-handshake.json b/test/transactions/unified/retryable-abort-handshake.json new file mode 100644 index 0000000000..4ad56e2f2f --- /dev/null +++ b/test/transactions/unified/retryable-abort-handshake.json @@ -0,0 +1,204 @@ +{ + "description": "retryable abortTransaction on handshake errors", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid", + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionCheckOutStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-handshake-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "AbortTransaction succeeds after handshake network error", + "skipReason": "DRIVERS-2032: Pinned servers need to be checked if they are still selectable", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "session": "session1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "session": "session1" + }, + "expectError": { + "isError": true + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "startTransaction": true + }, + "commandName": "insert", + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-commit-handshake.json b/test/transactions/unified/retryable-commit-handshake.json new file mode 100644 index 0000000000..d9315a8fc6 --- /dev/null +++ b/test/transactions/unified/retryable-commit-handshake.json @@ -0,0 +1,211 @@ +{ + "description": "retryable commitTransaction on handshake errors", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid", + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionCheckOutStartedEvent" + ], + "uriOptions": { + "retryWrites": false + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-handshake-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "CommitTransaction succeeds after handshake network error", + "skipReason": "DRIVERS-2032: Pinned servers need to be checked if they are still selectable", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "session": "session1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "session": "session1" + }, + "expectError": { + "isError": true + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "startTransaction": true + }, + "commandName": "insert", + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index 6f1e386932..5bf98c5451 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1153,8 +1153,7 @@ def check_events(self, spec): self.assertEqual(actual_events, []) continue - if len(events) > len(actual_events): - self.fail("Expected to see %s events, got %s" % (len(events), len(actual_events))) + self.assertGreaterEqual(len(actual_events), len(events), actual_events) for idx, expected_event in enumerate(events): self.match_evaluator.match_event(event_type, expected_event, actual_events[idx]) From a3f0f9158814ba0d4881bedc2cb78be52986ddfa Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 25 Feb 2022 10:36:05 -0800 Subject: [PATCH 0089/1588] PYTHON-3123 Convert sessions spec tests to unified test format (#888) Create implicit session _before_ starting a retryable read. --- pymongo/collection.py | 45 +- .../driver-sessions-dirty-session-errors.json | 969 ++++++++++++++++++ .../driver-sessions-server-support.json | 256 +++++ .../sessions/legacy/dirty-session-errors.json | 671 ------------ test/sessions/legacy/server-support.json | 181 ---- ...t-sessions-not-supported-client-error.json | 0 ...t-sessions-not-supported-server-error.json | 0 .../snapshot-sessions-unsupported-ops.json | 0 .../{unified => }/snapshot-sessions.json | 0 test/test_session.py | 53 +- test/test_sessions_unified.py | 2 +- 11 files changed, 1251 insertions(+), 926 deletions(-) create mode 100644 test/sessions/driver-sessions-dirty-session-errors.json create mode 100644 test/sessions/driver-sessions-server-support.json delete mode 100644 test/sessions/legacy/dirty-session-errors.json delete mode 100644 test/sessions/legacy/server-support.json rename test/sessions/{unified => }/snapshot-sessions-not-supported-client-error.json (100%) rename test/sessions/{unified => }/snapshot-sessions-not-supported-server-error.json (100%) rename test/sessions/{unified => }/snapshot-sessions-unsupported-ops.json (100%) rename test/sessions/{unified => }/snapshot-sessions.json (100%) diff --git a/pymongo/collection.py b/pymongo/collection.py index bfe2007d5a..46916f98f8 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1720,9 +1720,9 @@ def _cmd(session, server, sock_info, read_preference): # MongoDB < 4.9 cmd = SON([("count", self.__name)]) cmd.update(kwargs) - return self._count_cmd(None, sock_info, read_preference, cmd, collation=None) + return self._count_cmd(session, sock_info, read_preference, cmd, collation=None) - return self.__database.client._retryable_read(_cmd, self.read_preference, None) + return self._retryable_non_cursor_read(_cmd, None) def count_documents( self, @@ -1807,9 +1807,13 @@ def _cmd(session, server, sock_info, read_preference): return 0 return result["n"] - return self.__database.client._retryable_read( - _cmd, self._read_preference_for(session), session - ) + return self._retryable_non_cursor_read(_cmd, session) + + def _retryable_non_cursor_read(self, func, session): + """Non-cursor read helper to handle implicit session creation.""" + client = self.__database.client + with client._tmp_session(session) as s: + return client._retryable_read(func, self._read_preference_for(s), s) def create_indexes( self, @@ -2157,30 +2161,31 @@ def list_indexes( codec_options=codec_options, read_preference=ReadPreference.PRIMARY ) read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + explicit_session = session is not None def _cmd(session, server, sock_info, read_preference): cmd = SON([("listIndexes", self.__name), ("cursor", {})]) if comment is not None: cmd["comment"] = comment - with self.__database.client._tmp_session(session, False) as s: - try: - cursor = self._command( - sock_info, cmd, read_preference, codec_options, session=s - )["cursor"] - except OperationFailure as exc: - # Ignore NamespaceNotFound errors to match the behavior - # of reading from *.system.indexes. - if exc.code != 26: - raise - cursor = {"id": 0, "firstBatch": []} + try: + cursor = self._command( + sock_info, cmd, read_preference, codec_options, session=session + )["cursor"] + except OperationFailure as exc: + # Ignore NamespaceNotFound errors to match the behavior + # of reading from *.system.indexes. + if exc.code != 26: + raise + cursor = {"id": 0, "firstBatch": []} cmd_cursor = CommandCursor( - coll, cursor, sock_info.address, session=s, explicit_session=session is not None + coll, cursor, sock_info.address, session=session, explicit_session=explicit_session ) cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor - return self.__database.client._retryable_read(_cmd, read_pref, session) + with self.__database.client._tmp_session(session, False) as s: + return self.__database.client._retryable_read(_cmd, read_pref, s) def index_information( self, @@ -2701,9 +2706,7 @@ def _cmd(session, server, sock_info, read_preference): user_fields={"values": 1}, )["values"] - return self.__database.client._retryable_read( - _cmd, self._read_preference_for(session), session - ) + return self._retryable_non_cursor_read(_cmd, session) def _write_concern_for_cmd(self, cmd, session): raw_wc = cmd.get("writeConcern") diff --git a/test/sessions/driver-sessions-dirty-session-errors.json b/test/sessions/driver-sessions-dirty-session-errors.json new file mode 100644 index 0000000000..88a9171db1 --- /dev/null +++ b/test/sessions/driver-sessions-dirty-session-errors.json @@ -0,0 +1,969 @@ +{ + "description": "driver-sessions-dirty-session-errors", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "session-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Dirty explicit session is discarded (insert)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 2 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + }, + { + "description": "Dirty explicit session is discarded (findAndModify)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1 + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + ] + }, + { + "description": "Dirty implicit session is discarded (insert)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$type": "object" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$type": "object" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Dirty implicit session is discarded (findAndModify)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1 + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": { + "$$type": "object" + }, + "txnNumber": 1, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": { + "$$type": "object" + }, + "txnNumber": 1, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + ] + }, + { + "description": "Dirty implicit session is discarded (read returning cursor)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 1 + } + ] + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "lsid": { + "$$type": "object" + } + }, + "commandName": "aggregate", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "lsid": { + "$$type": "object" + } + }, + "commandName": "aggregate", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Dirty implicit session is discarded (read not returning cursor)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectResult": 1 + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "lsid": { + "$$type": "object" + } + }, + "commandName": "aggregate", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "lsid": { + "$$type": "object" + } + }, + "commandName": "aggregate", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/sessions/driver-sessions-server-support.json b/test/sessions/driver-sessions-server-support.json new file mode 100644 index 0000000000..55312b32eb --- /dev/null +++ b/test/sessions/driver-sessions-server-support.json @@ -0,0 +1,256 @@ +{ + "description": "driver-sessions-server-support", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "session-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Server supports explicit sessions", + "operations": [ + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Server supports implicit sessions", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$type": "object" + } + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/sessions/legacy/dirty-session-errors.json b/test/sessions/legacy/dirty-session-errors.json deleted file mode 100644 index 77f71c7623..0000000000 --- a/test/sessions/legacy/dirty-session-errors.json +++ /dev/null @@ -1,671 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "session-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - } - ], - "tests": [ - { - "description": "Dirty explicit session is discarded", - "clientOptions": { - "retryWrites": true - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "assertSessionNotDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "assertSessionDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "assertSessionDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "endSession", - "object": "session0" - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertDifferentLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - } - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - } - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - } - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - } - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - } - }, - { - "description": "Dirty explicit session is discarded (non-bulk write)", - "clientOptions": { - "retryWrites": true - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "assertSessionNotDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - }, - "result": { - "_id": 1 - } - }, - { - "name": "assertSessionDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "endSession", - "object": "session0" - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertDifferentLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - } - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 1 - } - ] - } - } - }, - { - "description": "Dirty implicit session is discarded (write)", - "clientOptions": { - "retryWrites": true - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertDifferentLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "txnNumber": { - "$numberLong": "1" - } - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "txnNumber": { - "$numberLong": "1" - } - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - } - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "Dirty implicit session is discarded (non-bulk write)", - "clientOptions": { - "retryWrites": true - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - }, - "result": { - "_id": 1 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertDifferentLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "txnNumber": { - "$numberLong": "1" - }, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "txnNumber": { - "$numberLong": "1" - }, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - } - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 1 - } - ] - } - } - }, - { - "description": "Dirty implicit session is discarded (read)", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ] - }, - "error": true - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertDifferentLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "Dirty implicit session is discarded (non-cursor returning read)", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertDifferentLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/sessions/legacy/server-support.json b/test/sessions/legacy/server-support.json deleted file mode 100644 index 967c9143fd..0000000000 --- a/test/sessions/legacy/server-support.json +++ /dev/null @@ -1,181 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.6.0" - } - ], - "database_name": "session-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - } - ], - "tests": [ - { - "description": "Server supports explicit sessions", - "operations": [ - { - "name": "assertSessionNotDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "assertSessionNotDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "endSession", - "object": "session0" - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertSameLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0" - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - }, - "lsid": "session0" - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "Server supports implicit sessions", - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertSameLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - } - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - } - ] -} diff --git a/test/sessions/unified/snapshot-sessions-not-supported-client-error.json b/test/sessions/snapshot-sessions-not-supported-client-error.json similarity index 100% rename from test/sessions/unified/snapshot-sessions-not-supported-client-error.json rename to test/sessions/snapshot-sessions-not-supported-client-error.json diff --git a/test/sessions/unified/snapshot-sessions-not-supported-server-error.json b/test/sessions/snapshot-sessions-not-supported-server-error.json similarity index 100% rename from test/sessions/unified/snapshot-sessions-not-supported-server-error.json rename to test/sessions/snapshot-sessions-not-supported-server-error.json diff --git a/test/sessions/unified/snapshot-sessions-unsupported-ops.json b/test/sessions/snapshot-sessions-unsupported-ops.json similarity index 100% rename from test/sessions/unified/snapshot-sessions-unsupported-ops.json rename to test/sessions/snapshot-sessions-unsupported-ops.json diff --git a/test/sessions/unified/snapshot-sessions.json b/test/sessions/snapshot-sessions.json similarity index 100% rename from test/sessions/unified/snapshot-sessions.json rename to test/sessions/snapshot-sessions.json diff --git a/test/test_session.py b/test/test_session.py index b7aa65a19d..ec39bb2411 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -15,7 +15,6 @@ """Test the client_session module.""" import copy -import os import sys import time from io import BytesIO @@ -26,8 +25,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, SkipTest, client_context, unittest -from test.utils import EventListener, TestCreator, rs_or_single_client, wait_until -from test.utils_spec_runner import SpecRunner +from test.utils import EventListener, rs_or_single_client, wait_until from bson import DBRef from gridfs import GridFS, GridFSBucket @@ -1095,54 +1093,5 @@ def insert_and_aggregate(): ) -class TestSpec(SpecRunner): - RUN_ON_SERVERLESS = True - # Location of JSON test specifications. - TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sessions", "legacy") - - def last_two_command_events(self): - """Return the last two command started events.""" - started_events = self.listener.results["started"][-2:] - self.assertEqual(2, len(started_events)) - return started_events - - def assert_same_lsid_on_last_two_commands(self): - """Run the assertSameLsidOnLastTwoCommands test operation.""" - event1, event2 = self.last_two_command_events() - self.assertEqual(event1.command["lsid"], event2.command["lsid"]) - - def assert_different_lsid_on_last_two_commands(self): - """Run the assertDifferentLsidOnLastTwoCommands test operation.""" - event1, event2 = self.last_two_command_events() - self.assertNotEqual(event1.command["lsid"], event2.command["lsid"]) - - def assert_session_dirty(self, session): - """Run the assertSessionDirty test operation. - - Assert that the given session is dirty. - """ - self.assertIsNotNone(session._server_session) - self.assertTrue(session._server_session.dirty) - - def assert_session_not_dirty(self, session): - """Run the assertSessionNotDirty test operation. - - Assert that the given session is not dirty. - """ - self.assertIsNotNone(session._server_session) - self.assertFalse(session._server_session.dirty) - - -def create_test(scenario_def, test, name): - @client_context.require_test_commands - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - -test_creator = TestCreator(create_test, TestSpec, TestSpec.TEST_PATH) -test_creator.create_tests() - if __name__ == "__main__": unittest.main() diff --git a/test/test_sessions_unified.py b/test/test_sessions_unified.py index 2320d52718..8a6b8bc9bf 100644 --- a/test/test_sessions_unified.py +++ b/test/test_sessions_unified.py @@ -23,7 +23,7 @@ from test.unified_format import generate_test_classes # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sessions", "unified") +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sessions") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) From 0672d2d1c3092cacdaa695151a8c6a306c9d60d5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 28 Feb 2022 17:02:53 -0600 Subject: [PATCH 0090/1588] PYTHON-3141 Add slotscheck to pre-commit checks (#890) --- .pre-commit-config.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5c1e92f5b7..2fc5100787 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -34,6 +34,7 @@ repos: rev: 3.9.2 hooks: - id: flake8 + files: \.py$ additional_dependencies: [ 'flake8-bugbear==20.1.4', 'flake8-logging-format==0.6.0', @@ -57,3 +58,11 @@ repos: files: ^\.github/workflows/ types: [yaml] args: ["--schemafile", "https://json.schemastore.org/github-workflow"] + +- repo: https://github.com/ariebovenberg/slotscheck + rev: v0.14.0 + hooks: + - id: slotscheck + files: \.py$ + exclude: "^(test|tools)/" + stages: [manual] From 782c5517e09a532de4c2f68d7776b0caed2062cb Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 1 Mar 2022 14:10:57 -0600 Subject: [PATCH 0091/1588] PYTHON-3146 Test Failure - Could not import extension sphinxcontrib.shellcheck (#889) --- doc/conf.py | 11 ++++++++++- doc/examples/bulk.rst | 2 +- doc/examples/geo.rst | 2 +- doc/faq.rst | 7 +------ 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index c2f97dabfe..714e6121d4 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -21,9 +21,18 @@ "sphinx.ext.coverage", "sphinx.ext.todo", "sphinx.ext.intersphinx", - "sphinxcontrib.shellcheck", ] + +# Add optional extensions +try: + import sphinxcontrib.shellcheck # noqa + + extensions += ["sphinxcontrib.shellcheck"] +except ImportError: + pass + + # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] diff --git a/doc/examples/bulk.rst b/doc/examples/bulk.rst index 23505268f0..23367dd2c5 100644 --- a/doc/examples/bulk.rst +++ b/doc/examples/bulk.rst @@ -145,7 +145,7 @@ and fourth operations succeed. 'index': 0,... 'op': {'_id': 1}}, {'code': 11000, - 'errmsg': '...E11000...duplicate key error...', + 'errmsg': '...', 'index': 2,... 'op': {'_id': 3}}]} diff --git a/doc/examples/geo.rst b/doc/examples/geo.rst index 9fe62f910b..2234a20757 100644 --- a/doc/examples/geo.rst +++ b/doc/examples/geo.rst @@ -36,7 +36,7 @@ insert a couple of example locations: >>> result = db.places.insert_many([{"loc": [2, 5]}, ... {"loc": [30, 5]}, ... {"loc": [1, 2]}, - ... {"loc": [4, 4]}]) # doctest: +ELLIPSIS + ... {"loc": [4, 4]}]) >>> result.inserted_ids [ObjectId('...'), ObjectId('...'), ObjectId('...'), ObjectId('...')] diff --git a/doc/faq.rst b/doc/faq.rst index c2a6fc7f7f..44c1c9a981 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -264,12 +264,7 @@ collection, configured to use :class:`~bson.son.SON` instead of dict: >>> from bson import CodecOptions, SON >>> opts = CodecOptions(document_class=SON) >>> opts - CodecOptions(document_class=, - tz_aware=False, - uuid_representation=UuidRepresentation.UNSPECIFIED, - unicode_decode_error_handler='strict', - tzinfo=None, type_registry=TypeRegistry(type_codecs=[], - fallback_encoder=None)) + CodecOptions(document_class=...SON..., tz_aware=False, uuid_representation=UuidRepresentation.UNSPECIFIED, unicode_decode_error_handler='strict', tzinfo=None, type_registry=TypeRegistry(type_codecs=[], fallback_encoder=None)) >>> collection_son = collection.with_options(codec_options=opts) Now, documents and subdocuments in query results are represented with From b737b843e974d9524fdbfbc8d18e0004b7743715 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 1 Mar 2022 15:44:05 -0800 Subject: [PATCH 0092/1588] PYTHON-2956 Drivers should check out an implicit session only after checking out a connection (#876) --- pymongo/client_session.py | 23 ++++++++++++- pymongo/mongo_client.py | 9 ++++- pymongo/topology.py | 16 +++++---- test/test_session.py | 69 +++++++++++++++++++++++++++++++++++++-- 4 files changed, 106 insertions(+), 11 deletions(-) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 4cf41b2c70..20d36fb062 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -947,9 +947,16 @@ def _txn_read_preference(self): return self._transaction.opts.read_preference return None + def _materialize(self): + if isinstance(self._server_session, _EmptyServerSession): + old = self._server_session + self._server_session = self._client._topology.get_server_session() + if old.started_retryable_write: + self._server_session.inc_transaction_id() + def _apply_to(self, command, is_retryable, read_preference, sock_info): self._check_ended() - + self._materialize() if self.options.snapshot: self._update_read_concern(command, sock_info) @@ -1000,6 +1007,20 @@ def __copy__(self): raise TypeError("A ClientSession cannot be copied, create a new session instead") +class _EmptyServerSession: + __slots__ = "dirty", "started_retryable_write" + + def __init__(self): + self.dirty = False + self.started_retryable_write = False + + def mark_dirty(self): + self.dirty = True + + def inc_transaction_id(self): + self.started_retryable_write = True + + class _ServerSession(object): def __init__(self, generation): # Ensure id is type 4, regardless of CodecOptions.uuid_representation. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 4965b5e439..4ac4a5ba8f 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -66,6 +66,7 @@ ) from pymongo.change_stream import ChangeStream, ClusterChangeStream from pymongo.client_options import ClientOptions +from pymongo.client_session import _EmptyServerSession from pymongo.command_cursor import CommandCursor from pymongo.errors import ( AutoReconnect, @@ -1601,7 +1602,11 @@ def _process_periodic_tasks(self): def __start_session(self, implicit, **kwargs): # Raises ConfigurationError if sessions are not supported. - server_session = self._get_server_session() + if implicit: + self._topology._check_implicit_session_support() + server_session = _EmptyServerSession() + else: + server_session = self._get_server_session() opts = client_session.SessionOptions(**kwargs) return client_session.ClientSession(self, server_session, opts, implicit) @@ -1641,6 +1646,8 @@ def _get_server_session(self): def _return_server_session(self, server_session, lock): """Internal: return a _ServerSession to the pool.""" + if isinstance(server_session, _EmptyServerSession): + return return self._topology.return_server_session(server_session, lock) def _ensure_session(self, session=None): diff --git a/pymongo/topology.py b/pymongo/topology.py index 6134b8201b..03e0d4ee17 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -514,8 +514,15 @@ def pop_all_sessions(self): with self._lock: return self._session_pool.pop_all() + def _check_implicit_session_support(self): + with self._lock: + self._check_session_support() + def _check_session_support(self): - """Internal check for session support on non-load balanced clusters.""" + """Internal check for session support on clusters.""" + if self._settings.load_balanced: + # Sessions never time out in load balanced mode. + return float("inf") session_timeout = self._description.logical_session_timeout_minutes if session_timeout is None: # Maybe we need an initial scan? Can raise ServerSelectionError. @@ -537,12 +544,7 @@ def _check_session_support(self): def get_server_session(self): """Start or resume a server session, or raise ConfigurationError.""" with self._lock: - # Sessions are always supported in load balanced mode. - if not self._settings.load_balanced: - session_timeout = self._check_session_support() - else: - # Sessions never time out in load balanced mode. - session_timeout = float("inf") + session_timeout = self._check_session_support() return self._session_pool.get_server_session(session_timeout) def return_server_session(self, server_session, lock): diff --git a/test/test_session.py b/test/test_session.py index ec39bb2411..53609c70cb 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -18,20 +18,28 @@ import sys import time from io import BytesIO -from typing import Set +from typing import Any, Callable, List, Set, Tuple from pymongo.mongo_client import MongoClient sys.path[0:0] = [""] from test import IntegrationTest, SkipTest, client_context, unittest -from test.utils import EventListener, rs_or_single_client, wait_until +from test.utils import ( + EventListener, + ExceptionCatchingThread, + rs_or_single_client, + wait_until, +) from bson import DBRef from gridfs import GridFS, GridFSBucket from pymongo import ASCENDING, IndexModel, InsertOne, monitoring +from pymongo.command_cursor import CommandCursor from pymongo.common import _MAX_END_SESSIONS +from pymongo.cursor import Cursor from pymongo.errors import ConfigurationError, InvalidOperation, OperationFailure +from pymongo.operations import UpdateOne from pymongo.read_concern import ReadConcern @@ -171,6 +179,63 @@ def _test_ops(self, client, *ops): "%s did not return implicit session to pool" % (f.__name__,), ) + def test_implicit_sessions_checkout(self): + # "To confirm that implicit sessions only allocate their server session after a + # successful connection checkout" test from Driver Sessions Spec. + succeeded = False + failures = 0 + for _ in range(5): + listener = EventListener() + client = rs_or_single_client( + event_listeners=[listener], maxPoolSize=1, retryWrites=True + ) + cursor = client.db.test.find({}) + ops: List[Tuple[Callable, List[Any]]] = [ + (client.db.test.find_one, [{"_id": 1}]), + (client.db.test.delete_one, [{}]), + (client.db.test.update_one, [{}, {"$set": {"x": 2}}]), + (client.db.test.bulk_write, [[UpdateOne({}, {"$set": {"x": 2}})]]), + (client.db.test.find_one_and_delete, [{}]), + (client.db.test.find_one_and_update, [{}, {"$set": {"x": 1}}]), + (client.db.test.find_one_and_replace, [{}, {}]), + (client.db.test.aggregate, [[{"$limit": 1}]]), + (client.db.test.find, []), + (client.server_info, [{}]), + (client.db.aggregate, [[{"$listLocalSessions": {}}, {"$limit": 1}]]), + (cursor.distinct, ["_id"]), + (client.db.list_collections, []), + ] + threads = [] + listener.results.clear() + + def thread_target(op, *args): + res = op(*args) + if isinstance(res, (Cursor, CommandCursor)): + list(res) + + for op, args in ops: + threads.append( + ExceptionCatchingThread( + target=thread_target, args=[op, *args], name=op.__name__ + ) + ) + threads[-1].start() + self.assertEqual(len(threads), len(ops)) + for thread in threads: + thread.join() + self.assertIsNone(thread.exc) + client.close() + lsid_set = set() + for i in listener.results["started"]: + if i.command.get("lsid"): + lsid_set.add(i.command.get("lsid")["id"]) + if len(lsid_set) == 1: + succeeded = True + else: + failures += 1 + print(failures) + self.assertTrue(succeeded) + def test_pool_lifo(self): # "Pool is LIFO" test from Driver Sessions Spec. a = self.client.start_session() From a61ea0660a0fa2aa9e6f67384e88ed60df803229 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 2 Mar 2022 13:10:15 -0600 Subject: [PATCH 0093/1588] PYTHON-3090 Clean up Database Command Typing (#879) --- .github/workflows/test-python.yml | 2 +- bson/codec_options.py | 3 +- mypy.ini | 5 +- pymongo/database.py | 7 +-- pymongo/encryption.py | 2 +- pymongo/mongo_client.py | 13 +++-- pymongo/typings.py | 6 +-- test/mypy_fails/raw_bson_document.py | 13 +++++ test/mypy_fails/typedict_client.py | 18 +++++++ test/test_client.py | 2 +- test/test_mypy.py | 81 +++++++++++++++++++++++++++- 11 files changed, 131 insertions(+), 21 deletions(-) create mode 100644 test/mypy_fails/raw_bson_document.py create mode 100644 test/mypy_fails/typedict_client.py diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 046915b04a..5fda9b8817 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -64,4 +64,4 @@ jobs: - name: Run mypy run: | mypy --install-types --non-interactive bson gridfs tools pymongo - mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --exclude "test/mypy_fails/*.*" test + mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test diff --git a/bson/codec_options.py b/bson/codec_options.py index 8e5f97df30..b4436dfdb8 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -115,6 +115,7 @@ class TypeCodec(TypeEncoder, TypeDecoder): _Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] _Fallback = Callable[[Any], Any] +_DocumentClass = Union[Type[MutableMapping], Type["RawBSONDocument"]] class TypeRegistry(object): @@ -293,7 +294,7 @@ class CodecOptions(_options_base): def __new__( cls: Type["CodecOptions"], - document_class: Union[Type[MutableMapping], Type["RawBSONDocument"]] = dict, + document_class: _DocumentClass = dict, tz_aware: bool = False, uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, unicode_decode_error_handler: Optional[str] = "strict", diff --git a/mypy.ini b/mypy.ini index 91b1121cd5..9b1348472c 100644 --- a/mypy.ini +++ b/mypy.ini @@ -32,9 +32,8 @@ ignore_missing_imports = True [mypy-snappy.*] ignore_missing_imports = True -[mypy-test.*] -allow_redefinition = true -allow_untyped_globals = true +[mypy-test.test_mypy] +warn_unused_ignores = false [mypy-winkerberos.*] ignore_missing_imports = True diff --git a/pymongo/database.py b/pymongo/database.py index f43f18d017..c2f2eb4bc0 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -24,6 +24,7 @@ Optional, Sequence, Union, + cast, ) from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions @@ -37,7 +38,7 @@ from pymongo.command_cursor import CommandCursor from pymongo.errors import CollectionInvalid, InvalidName from pymongo.read_preferences import ReadPreference, _ServerMode -from pymongo.typings import _CollationIn, _DocumentType, _Pipeline +from pymongo.typings import _CollationIn, _DocumentOut, _DocumentType, _Pipeline def _check_name(name): @@ -620,7 +621,7 @@ def command( session: Optional["ClientSession"] = None, comment: Optional[Any] = None, **kwargs: Any, - ) -> Dict[str, Any]: + ) -> _DocumentOut: """Issue a MongoDB command. Send command `command` to the database and return the @@ -974,7 +975,7 @@ def validate_collection( if background is not None: cmd["background"] = background - result = self.command(cmd, session=session) + result = cast(dict, self.command(cmd, session=session)) valid = True # Pre 1.9 results diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 9616ac89cd..502c83e47b 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -290,7 +290,7 @@ def _get_internal_client(encrypter, mongo_client): db, coll = opts._key_vault_namespace.split(".", 1) key_vault_coll = key_vault_client[db][coll] - mongocryptd_client = MongoClient( + mongocryptd_client: MongoClient = MongoClient( opts._mongocryptd_uri, connect=False, serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS ) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 4ac4a5ba8f..cd9067f463 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -120,7 +120,7 @@ def __init__( self, host: Optional[Union[str, Sequence[str]]] = None, port: Optional[int] = None, - document_class: Type[_DocumentType] = dict, + document_class: Optional[Type[_DocumentType]] = None, tz_aware: Optional[bool] = None, connect: Optional[bool] = None, type_registry: Optional[TypeRegistry] = None, @@ -652,7 +652,7 @@ def __init__( self.__init_kwargs: Dict[str, Any] = { "host": host, "port": port, - "document_class": document_class, + "document_class": document_class or dict, "tz_aware": tz_aware, "connect": connect, "type_registry": type_registry, @@ -676,7 +676,7 @@ def __init__( # Parse options passed as kwargs. keyword_opts = common._CaseInsensitiveDictionary(kwargs) - keyword_opts["document_class"] = document_class + keyword_opts["document_class"] = document_class or dict seeds = set() username = None @@ -1717,8 +1717,11 @@ def server_info(self, session: Optional[client_session.ClientSession] = None) -> .. versionchanged:: 3.6 Added ``session`` parameter. """ - return self.admin.command( - "buildinfo", read_preference=ReadPreference.PRIMARY, session=session + return cast( + dict, + self.admin.command( + "buildinfo", read_preference=ReadPreference.PRIMARY, session=session + ), ) def list_databases( diff --git a/pymongo/typings.py b/pymongo/typings.py index 19d92b2381..14e059a8f0 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -16,7 +16,6 @@ from typing import ( TYPE_CHECKING, Any, - Dict, Mapping, MutableMapping, Optional, @@ -36,6 +35,5 @@ _CollationIn = Union[Mapping[str, Any], "Collation"] _DocumentIn = Union[MutableMapping[str, Any], "RawBSONDocument"] _Pipeline = Sequence[Mapping[str, Any]] -_DocumentType = TypeVar( - "_DocumentType", Mapping[str, Any], MutableMapping[str, Any], Dict[str, Any] -) +_DocumentOut = _DocumentIn +_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) diff --git a/test/mypy_fails/raw_bson_document.py b/test/mypy_fails/raw_bson_document.py new file mode 100644 index 0000000000..427140dfac --- /dev/null +++ b/test/mypy_fails/raw_bson_document.py @@ -0,0 +1,13 @@ +from bson.raw_bson import RawBSONDocument +from pymongo import MongoClient + +client = MongoClient(document_class=RawBSONDocument) +coll = client.test.test +doc = {"my": "doc"} +coll.insert_one(doc) +retreived = coll.find_one({"_id": doc["_id"]}) +assert retreived is not None +assert len(retreived.raw) > 0 +retreived[ + "foo" +] = "bar" # error: Unsupported target for indexed assignment ("RawBSONDocument") [index] diff --git a/test/mypy_fails/typedict_client.py b/test/mypy_fails/typedict_client.py new file mode 100644 index 0000000000..24dd84ee28 --- /dev/null +++ b/test/mypy_fails/typedict_client.py @@ -0,0 +1,18 @@ +from typing import TypedDict + +from pymongo import MongoClient + + +class Movie(TypedDict): + name: str + year: int + + +client: MongoClient[Movie] = MongoClient() +coll = client.test.test +retreived = coll.find_one({"_id": "foo"}) +assert retreived is not None +assert retreived["year"] == 1 +assert ( + retreived["name"] == 2 +) # error: Non-overlapping equality check (left operand type: "str", right operand type: "Literal[2]") [comparison-overlap] diff --git a/test/test_client.py b/test/test_client.py index 9f01c1c054..29a5b0f1d5 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -709,7 +709,7 @@ def test_repr(self): # Used to test 'eval' below. import bson # noqa: F401 - client = MongoClient( + client = MongoClient( # type: ignore[type-var] "mongodb://localhost:27017,localhost:27018/?replicaSet=replset" "&connectTimeoutMS=12345&w=1&wtimeoutms=100", connect=False, diff --git a/test/test_mypy.py b/test/test_mypy.py index 36fe2ed424..55794e138e 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -17,17 +17,32 @@ import os import unittest -from typing import Any, Dict, Iterable, List +from typing import TYPE_CHECKING, Any, Dict, Iterable, List + +try: + from typing import TypedDict # type: ignore[attr-defined] + + # Not available in Python 3.6 and Python 3.7 + class Movie(TypedDict): # type: ignore[misc] + name: str + year: int + +except ImportError: + TypeDict = None + try: from mypy import api except ImportError: - api = None + api = None # type: ignore[assignment] from test import IntegrationTest +from test.utils import rs_or_single_client +from bson.raw_bson import RawBSONDocument from bson.son import SON from pymongo.collection import Collection +from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mypy_fails") @@ -48,6 +63,8 @@ def ensure_mypy_fails(self, filename: str) -> None: def test_mypy_failures(self) -> None: for filename in get_tests(): + if filename == "typeddict_client.py" and TypedDict is None: + continue with self.subTest(filename=filename): self.ensure_mypy_fails(filename) @@ -87,6 +104,66 @@ def test_bulk_write(self) -> None: result = self.coll.bulk_write(requests) self.assertTrue(result.acknowledged) + def test_command(self) -> None: + result = self.client.admin.command("ping") + items = result.items() + + def test_list_collections(self) -> None: + cursor = self.client.test.list_collections() + value = cursor.next() + items = value.items() + + def test_list_databases(self) -> None: + cursor = self.client.list_databases() + value = cursor.next() + value.items() + + def test_default_document_type(self) -> None: + client = rs_or_single_client() + coll = client.test.test + doc = {"my": "doc"} + coll.insert_one(doc) + retreived = coll.find_one({"_id": doc["_id"]}) + assert retreived is not None + retreived["a"] = 1 + + def test_explicit_document_type(self) -> None: + if not TYPE_CHECKING: + raise unittest.SkipTest("Do not use raw MongoClient") + client: MongoClient[Dict[str, Any]] = MongoClient() + coll = client.test.test + retreived = coll.find_one({"_id": "foo"}) + assert retreived is not None + retreived["a"] = 1 + + def test_typeddict_document_type(self) -> None: + if not TYPE_CHECKING: + raise unittest.SkipTest("Do not use raw MongoClient") + client: MongoClient[Movie] = MongoClient() + coll = client.test.test + retreived = coll.find_one({"_id": "foo"}) + assert retreived is not None + assert retreived["year"] == 1 + assert retreived["name"] == "a" + + def test_raw_bson_document_type(self) -> None: + if not TYPE_CHECKING: + raise unittest.SkipTest("Do not use raw MongoClient") + client = MongoClient(document_class=RawBSONDocument) + coll = client.test.test + retreived = coll.find_one({"_id": "foo"}) + assert retreived is not None + assert len(retreived.raw) > 0 + + def test_son_document_type(self) -> None: + if not TYPE_CHECKING: + raise unittest.SkipTest("Do not use raw MongoClient") + client = MongoClient(document_class=SON[str, Any]) + coll = client.test.test + retreived = coll.find_one({"_id": "foo"}) + assert retreived is not None + retreived["a"] = 1 + def test_aggregate_pipeline(self) -> None: coll3 = self.client.test.test3 coll3.insert_many( From 671d1e622c03c6ba8453be8929a15d514f20abaf Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 3 Mar 2022 12:47:36 -0800 Subject: [PATCH 0094/1588] PYTHON-3147 Fix pip install in MONGODB-AWS auth tests (#892) --- .evergreen/run-mongodb-aws-ecs-test.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index e7bcf1cda5..3484f41f43 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -23,9 +23,13 @@ set -o xtrace if command -v virtualenv ; then VIRTUALENV=$(command -v virtualenv) else + if ! python3 -m pip --version ; then + echo "Installing pip..." + apt-get update + apt install python3-pip -y + fi echo "Installing virtualenv..." - apt install python3-pip -y - pip3 install --user virtualenv + python3 -m pip install --user virtualenv VIRTUALENV='python3 -m virtualenv' fi From f081297a8634abb77f85a8f06c552c76a51f1120 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 4 Mar 2022 17:29:33 -0800 Subject: [PATCH 0095/1588] PYTHON-3159 Fix typo in zlib compression support (#894) --- pymongo/compression_support.py | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index ed7021494f..c9632a43d3 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -13,7 +13,6 @@ # limitations under the License. import warnings -from typing import Callable try: import snappy @@ -105,12 +104,6 @@ def get_compression_context(self, compressors): return ZstdContext() -def _zlib_no_compress(data, level=None): - """Compress data with zlib level 0.""" - cobj = zlib.compressobj(0) - return b"".join([cobj.compress(data), cobj.flush()]) - - class SnappyContext(object): compressor_id = 1 @@ -123,16 +116,10 @@ class ZlibContext(object): compressor_id = 2 def __init__(self, level): - self.compress: Callable[[bytes], bytes] - - # Jython zlib.compress doesn't support -1 - if level == -1: - self.compress = zlib.compress - # Jython zlib.compress also doesn't support 0 - elif level == 0: - self.compress = _zlib_no_compress - else: - self.compresss = lambda data, _: zlib.compress(data, level) + self.level = level + + def compress(self, data: bytes) -> bytes: + return zlib.compress(data, self.level) class ZstdContext(object): From 225d131c2d3f6f0b4c46c130abb3e1452010ad40 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 9 Mar 2022 11:13:18 -0800 Subject: [PATCH 0096/1588] PYTHON-2970 Prioritize electionId over setVersion for stale primary check (#845) --- doc/changelog.rst | 15 ++ pymongo/topology_description.py | 29 ++-- .../rs/electionId_precedence_setVersion.json | 92 +++++++++++ .../rs/null_election_id.json | 30 ++-- .../rs/secondary_ignore_ok_0.json | 2 +- .../rs/set_version_can_rollback.json | 149 ++++++++++++++++++ ...tversion_equal_max_without_electionid.json | 84 ++++++++++ ...on_greaterthan_max_without_electionid.json | 84 ++++++++++ .../rs/setversion_without_electionid.json | 12 +- .../rs/use_setversion_without_electionid.json | 32 ++-- test/test_discovery_and_monitoring.py | 2 + 11 files changed, 481 insertions(+), 50 deletions(-) create mode 100644 test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json create mode 100644 test/discovery_and_monitoring/rs/set_version_can_rollback.json create mode 100644 test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json create mode 100644 test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 7dd57d5329..61e2b659ec 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -26,6 +26,21 @@ PyMongo 4.1 brings a number of improvements including: - :meth:`gridfs.GridOut.seek` now returns the new position in the file, to conform to the behavior of :meth:`io.IOBase.seek`. +Bug fixes +......... + +- Fixed a bug where the client could be unable to discover the new primary + after a simultaneous replica set election and reconfig (`PYTHON-2970`_). + +.. _PYTHON-2970: https://jira.mongodb.org/browse/PYTHON-2970 + +Issues Resolved +............... + +See the `PyMongo 4.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=30619 Changes in Version 4.0 ---------------------- diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index b3dd60680f..9f718376ef 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -17,6 +17,7 @@ from random import sample from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple +from bson.min_key import MinKey from bson.objectid import ObjectId from pymongo import common from pymongo.errors import ConfigurationError @@ -531,24 +532,16 @@ def _update_rs_from_primary( sds.pop(server_description.address) return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) - max_election_tuple = max_set_version, max_election_id - if None not in server_description.election_tuple: - if ( - None not in max_election_tuple - and max_election_tuple > server_description.election_tuple - ): - - # Stale primary, set to type Unknown. - sds[server_description.address] = server_description.to_unknown() - return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) - - max_election_id = server_description.election_id - - if server_description.set_version is not None and ( - max_set_version is None or server_description.set_version > max_set_version - ): - - max_set_version = server_description.set_version + new_election_tuple = server_description.election_id, server_description.set_version + max_election_tuple = max_election_id, max_set_version + new_election_safe = tuple(MinKey() if i is None else i for i in new_election_tuple) + max_election_safe = tuple(MinKey() if i is None else i for i in max_election_tuple) + if new_election_safe >= max_election_safe: + max_election_id, max_set_version = new_election_tuple + else: + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown() + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id # We've heard from the primary. Is it the same primary as before? for server in sds.values(): diff --git a/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json new file mode 100644 index 0000000000..a7b49e2b97 --- /dev/null +++ b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json @@ -0,0 +1,92 @@ +{ + "description": "ElectionId is considered higher precedence than setVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "setVersion": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/null_election_id.json b/test/discovery_and_monitoring/rs/null_election_id.json index 62120e8448..8eb519595a 100644 --- a/test/discovery_and_monitoring/rs/null_election_id.json +++ b/test/discovery_and_monitoring/rs/null_election_id.json @@ -123,16 +123,19 @@ "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - }, - "b:27017": { "type": "Unknown", "setName": null, + "setVersion": null, "electionId": null }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, "c:27017": { "type": "Unknown", "setName": null, @@ -174,16 +177,19 @@ "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - }, - "b:27017": { "type": "Unknown", "setName": null, + "setVersion": null, "electionId": null }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, "c:27017": { "type": "Unknown", "setName": null, diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json index 4c1cb011a5..ee9519930b 100644 --- a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json @@ -1,5 +1,5 @@ { - "description": "New primary", + "description": "Secondary ignored when ok is zero", "uri": "mongodb://a,b/?replicaSet=rs", "phases": [ { diff --git a/test/discovery_and_monitoring/rs/set_version_can_rollback.json b/test/discovery_and_monitoring/rs/set_version_can_rollback.json new file mode 100644 index 0000000000..28ecbeefca --- /dev/null +++ b/test/discovery_and_monitoring/rs/set_version_can_rollback.json @@ -0,0 +1,149 @@ +{ + "description": "Set version rolls back after new primary with higher election Id", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "hello": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "_comment": "Response from new primary with newer election Id", + "responses": [ + [ + "b:27017", + { + "ok": 1, + "hello": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "_comment": "Response from stale primary", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "hello": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json new file mode 100644 index 0000000000..91e84d4fa0 --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json @@ -0,0 +1,84 @@ +{ + "description": "setVersion version that is equal is treated the same as greater than if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json new file mode 100644 index 0000000000..b15fd5c1a7 --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json @@ -0,0 +1,84 @@ +{ + "description": "setVersion that is greater than maxSetVersion is used if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_without_electionid.json index 2f68287f1d..f59c162ae1 100644 --- a/test/discovery_and_monitoring/rs/setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid.json @@ -1,5 +1,5 @@ { - "description": "setVersion is ignored if there is no electionId", + "description": "setVersion that is less than maxSetVersion is ignored if there is no electionId", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -63,14 +63,14 @@ "outcome": { "servers": { "a:27017": { - "type": "Unknown", - "setName": null, + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, "electionId": null }, "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, + "type": "Unknown", + "setName": null, "electionId": null } }, diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json index 421ff57c8d..6dd753d5d8 100644 --- a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json @@ -71,20 +71,23 @@ "outcome": { "servers": { "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { "type": "Unknown", "setName": null, "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 2 } }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, "setName": "rs", - "maxSetVersion": 2, + "maxSetVersion": 1, "maxElectionId": { "$oid": "000000000000000000000001" } @@ -115,22 +118,25 @@ "outcome": { "servers": { "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "b:27017": { "type": "Unknown", "setName": null, "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 2 } }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, "setName": "rs", - "maxSetVersion": 2, + "maxSetVersion": 1, "maxElectionId": { - "$oid": "000000000000000000000001" + "$oid": "000000000000000000000002" } } } diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index d17a0d4166..a97eb65432 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -220,6 +220,8 @@ def create_tests(): dirname = os.path.split(dirpath)[-1] for filename in filenames: + if os.path.splitext(filename)[1] != ".json": + continue with open(os.path.join(dirpath, filename)) as scenario_stream: scenario_def = json_util.loads(scenario_stream.read()) From 087950d869096cf44a797f6c402985a73ffec16e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 15 Mar 2022 15:49:11 -0500 Subject: [PATCH 0097/1588] PYTHON-3164 Outdated link from PyMongo docs to community forum (#895) --- .github/workflows/test-python.yml | 18 ++++++++++++++++++ bson/binary.py | 2 +- doc/atlas.rst | 2 +- doc/changelog.rst | 2 +- doc/conf.py | 7 +++++++ doc/developer/periodic_executor.rst | 2 +- doc/examples/high_availability.rst | 2 +- doc/examples/mod_wsgi.rst | 6 +++--- doc/examples/tailable.rst | 2 +- doc/examples/tls.rst | 2 +- doc/faq.rst | 2 +- doc/index.rst | 2 +- doc/migrate-to-pymongo4.rst | 7 +++---- doc/tools.rst | 20 ++++++-------------- pymongo/change_stream.py | 2 +- pymongo/collection.py | 8 ++++---- pymongo/database.py | 2 +- pymongo/mongo_client.py | 2 +- pymongo/operations.py | 2 +- 19 files changed, 54 insertions(+), 38 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 5fda9b8817..8eec9d9bf1 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -65,3 +65,21 @@ jobs: run: | mypy --install-types --non-interactive bson gridfs tools pymongo mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test + + linkcheck: + name: Check Links + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + cache: 'pip' + cache-dependency-path: 'setup.py' + - name: Install dependencies + run: | + python -m pip install -U pip + python -m pip install sphinx + - name: Check links + run: | + cd doc + make linkcheck diff --git a/bson/binary.py b/bson/binary.py index 93c43ee40c..a270eae8d2 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -208,7 +208,7 @@ class Binary(bytes): - `data`: the binary data to represent. Can be any bytes-like type that implements the buffer protocol. - `subtype` (optional): the `binary subtype - `_ + `_ to use .. versionchanged:: 3.9 diff --git a/doc/atlas.rst b/doc/atlas.rst index 6100e9d3c5..6685cf9fb8 100644 --- a/doc/atlas.rst +++ b/doc/atlas.rst @@ -35,7 +35,7 @@ Connections to Atlas require TLS/SSL. You can read more about TLS versions and their security implications here: - ``_ + ``_ .. _python.org: https://www.python.org/downloads/ .. _homebrew: https://brew.sh/ diff --git a/doc/changelog.rst b/doc/changelog.rst index 61e2b659ec..73e2ea9ba4 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1072,7 +1072,7 @@ Changes and Deprecations: - Deprecated the MongoClient option `socketKeepAlive`. It now defaults to true and disabling it is not recommended, see `does TCP keepalive time affect MongoDB Deployments? - `_ + `_ - Deprecated :meth:`~pymongo.collection.Collection.initialize_ordered_bulk_op`, :meth:`~pymongo.collection.Collection.initialize_unordered_bulk_op`, and :class:`~pymongo.bulk.BulkOperationBuilder`. Use diff --git a/doc/conf.py b/doc/conf.py index 714e6121d4..a5c5be2694 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -80,6 +80,13 @@ # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] +# Options for link checking +# The anchors on the rendered markdown page are created after the fact, +# so this link results in a 404. +linkcheck_ignore = [ + "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check" +] + # -- Options for extensions ---------------------------------------------------- autoclass_content = "init" diff --git a/doc/developer/periodic_executor.rst b/doc/developer/periodic_executor.rst index 6327cfd835..9cb0ce0eb9 100644 --- a/doc/developer/periodic_executor.rst +++ b/doc/developer/periodic_executor.rst @@ -106,7 +106,7 @@ Thus the current design of periodic executors is surprisingly simple: they do a simple `time.sleep` for a half-second, check if it is time to wake or terminate, and sleep again. -.. _Server Discovery And Monitoring Spec: https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#requesting-an-immediate-check +.. _Server Discovery And Monitoring Spec: https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check .. _PYTHON-863: https://jira.mongodb.org/browse/PYTHON-863 diff --git a/doc/examples/high_availability.rst b/doc/examples/high_availability.rst index 19b48f7d01..efd7a66cc6 100644 --- a/doc/examples/high_availability.rst +++ b/doc/examples/high_availability.rst @@ -308,7 +308,7 @@ milliseconds of the closest member's ping time. replica set *through* a mongos. The equivalent is the localThreshold_ command line option. -.. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption--localThreshold +.. _localThreshold: https://docs.mongodb.com/manual/reference/program/mongos/#std-option-mongos.--localThreshold .. _health-monitoring: diff --git a/doc/examples/mod_wsgi.rst b/doc/examples/mod_wsgi.rst index 832d779fd8..96d6ce892f 100644 --- a/doc/examples/mod_wsgi.rst +++ b/doc/examples/mod_wsgi.rst @@ -3,7 +3,7 @@ PyMongo and mod_wsgi ==================== -To run your application under `mod_wsgi `_, +To run your application under `mod_wsgi `_, follow these guidelines: * Run ``mod_wsgi`` in daemon mode with the ``WSGIDaemonProcess`` directive. @@ -48,9 +48,9 @@ interpreter. Python C extensions in general have issues running in multiple Python sub interpreters. These difficulties are explained in the documentation for -`Py_NewInterpreter `_ +`Py_NewInterpreter `_ and in the `Multiple Python Sub Interpreters -`_ +`_ section of the ``mod_wsgi`` documentation. Beginning with PyMongo 2.7, the C extension for BSON detects when it is running diff --git a/doc/examples/tailable.rst b/doc/examples/tailable.rst index 482b049c56..1242e9ddf5 100644 --- a/doc/examples/tailable.rst +++ b/doc/examples/tailable.rst @@ -5,7 +5,7 @@ By default, MongoDB will automatically close a cursor when the client has exhausted all results in the cursor. However, for `capped collections `_ you may use a `tailable cursor -`_ +`_ that remains open after the client exhausts the results in the initial cursor. The following is a basic example of using a tailable cursor to tail the oplog diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 03ac63a633..f6920ad278 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -32,7 +32,7 @@ MongoDB. You can read more about TLS versions and their security implications here: - ``_ + ``_ .. _python.org: https://www.python.org/downloads/ .. _homebrew: https://brew.sh/ diff --git a/doc/faq.rst b/doc/faq.rst index 44c1c9a981..a7f7c87bdd 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -314,7 +314,7 @@ when it is serialized to BSON and used as a query. Thus you can create a subdocument that exactly matches the subdocument in the collection. .. seealso:: `MongoDB Manual entry on subdocument matching - `_. + `_. What does *CursorNotFound* cursor id not valid at server mean? -------------------------------------------------------------- diff --git a/doc/index.rst b/doc/index.rst index da05bf80ae..8fd357b4cd 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -54,7 +54,7 @@ everything you need to know to use **PyMongo**. Getting Help ------------ If you're having trouble or have questions about PyMongo, ask your question on -our `MongoDB Community Forum `_. +our `MongoDB Community Forum `_. You may also want to consider a `commercial support subscription `_. Once you get an answer, it'd be great if you could work it back into this diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index b993e32f4e..6fcbdf5011 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -88,8 +88,7 @@ The socketKeepAlive parameter is removed Removed the ``socketKeepAlive`` keyword argument to :class:`~pymongo.mongo_client.MongoClient`. PyMongo now always enables TCP -keepalive. For more information see: -https://docs.mongodb.com/manual/faq/diagnostics/#does-tcp-keepalive-time-affect-mongodb-deployments +keepalive. For more information see the `documentation `_. Renamed URI options ................... @@ -545,8 +544,8 @@ Can be changed to this:: .. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ .. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/ -.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center -.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere +.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/ +.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/ Collection.initialize_ordered_bulk_op and initialize_unordered_bulk_op is removed ................................................................................. diff --git a/doc/tools.rst b/doc/tools.rst index 304a1eaf5c..69ee64448b 100644 --- a/doc/tools.rst +++ b/doc/tools.rst @@ -34,8 +34,7 @@ PyMODM libraries to target platforms like Django. At the same time, PyMODM is powerful enough to be used for developing applications on its own. Complete documentation is available on `readthedocs - `_ in addition to a `Gitter channel - `_ for discussing the project. + `_. Humongolus `Humongolus `_ is a lightweight ORM @@ -72,7 +71,7 @@ MongoEngine documents and query collections using syntax inspired by the Django ORM. The code is available on `GitHub `_; for more information, see - the `tutorial `_. + the `tutorial `_. MotorEngine `MotorEngine `_ is a port of @@ -122,10 +121,10 @@ Framework Tools This section lists tools and adapters that have been designed to work with various Python frameworks and libraries. -* `Djongo `_ is a connector for using +* `Djongo `_ is a connector for using Django with MongoDB as the database backend. Use the Django Admin GUI to add and modify documents in MongoDB. - The `Djongo Source Code `_ is hosted on GitHub + The `Djongo Source Code `_ is hosted on GitHub and the `Djongo package `_ is on pypi. * `Django MongoDB Engine `_ is a MongoDB @@ -138,24 +137,17 @@ various Python frameworks and libraries. `_ is a MongoDB backend for Django, an `example: `_. - For more information ``_ + For more information see ``_ * `mongodb_beaker `_ is a - project to enable using MongoDB as a backend for `beaker's - `_ caching / session system. + project to enable using MongoDB as a backend for `beakers `_ caching / session system. `The source is on GitHub `_. * `Log4Mongo `_ is a flexible Python logging handler that can store logs in MongoDB using normal and capped collections. * `MongoLog `_ is a Python logging handler that stores logs in MongoDB using a capped collection. -* `c5t `_ is a content-management system - using TurboGears and MongoDB. * `rod.recipe.mongodb `_ is a ZC Buildout recipe for downloading and installing MongoDB. -* `repoze-what-plugins-mongodb - `_ is a project - working to support a plugin for using MongoDB as a backend for - :mod:`repoze.what`. * `mongobox `_ is a tool to run a sandboxed MongoDB instance from within a python app. * `Flask-MongoAlchemy `_ Add diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index d054046bda..db33999788 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -78,7 +78,7 @@ class ChangeStream(Generic[_DocumentType]): :meth:`pymongo.mongo_client.MongoClient.watch` instead. .. versionadded:: 3.6 - .. seealso:: The MongoDB documentation on `changeStreams `_. + .. seealso:: The MongoDB documentation on `changeStreams `_. """ def __init__( diff --git a/pymongo/collection.py b/pymongo/collection.py index 46916f98f8..ad75fb760c 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1784,8 +1784,8 @@ def count_documents( .. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ .. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/ - .. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center - .. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere + .. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/ + .. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/ """ pipeline = [{"$match": filter}] if "skip" in kwargs: @@ -2011,7 +2011,7 @@ def create_index( .. seealso:: The MongoDB documentation on `indexes `_. - .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/#wildcard-index-core + .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/ """ cmd_options = {} if "maxTimeMS" in kwargs: @@ -2557,7 +2557,7 @@ def watch( .. versionadded:: 3.6 - .. seealso:: The MongoDB documentation on `changeStreams `_. + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst diff --git a/pymongo/database.py b/pymongo/database.py index c2f2eb4bc0..934b502191 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -558,7 +558,7 @@ def watch( .. versionadded:: 3.7 - .. seealso:: The MongoDB documentation on `changeStreams `_. + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index cd9067f463..ee89279812 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -932,7 +932,7 @@ def watch( .. versionadded:: 3.7 - .. seealso:: The MongoDB documentation on `changeStreams `_. + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst diff --git a/pymongo/operations.py b/pymongo/operations.py index 8f264c48c2..e528f2a2df 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -488,7 +488,7 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: Added the ``partialFilterExpression`` option to support partial indexes. - .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/#wildcard-index-core + .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/ """ keys = _index_list(keys) if "name" not in kwargs: From 9ada6543d58714a48e42daeb60ff7d95b0ae3f17 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 15 Mar 2022 14:52:47 -0700 Subject: [PATCH 0098/1588] PYTHON-3174 Remove noisy running Topology check for main test client (#898) --- test/__init__.py | 30 ++++++++++++++---------------- test/test_client.py | 11 +++++++++++ 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index b2906481e9..c432b26098 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -286,13 +286,8 @@ def hello(self): return self._hello def _connect(self, host, port, **kwargs): - # Jython takes a long time to connect. - if sys.platform.startswith("java"): - timeout_ms = 10000 - else: - timeout_ms = 5000 kwargs.update(self.default_client_options) - client = pymongo.MongoClient(host, port, serverSelectionTimeoutMS=timeout_ms, **kwargs) + client = pymongo.MongoClient(host, port, serverSelectionTimeoutMS=5000, **kwargs) try: try: client.admin.command(HelloCompat.LEGACY_CMD) # Can we connect? @@ -1037,21 +1032,26 @@ def _get_executors(topology): return [e for e in executors if e is not None] -def all_executors_stopped(topology): +def print_running_topology(topology): running = [e for e in _get_executors(topology) if not e._stopped] if running: print( - " Topology %s has THREADS RUNNING: %s, created at: %s" - % (topology, running, topology._settings._stack) + "WARNING: found Topology with running threads:\n" + " Threads: %s\n" + " Topology: %s\n" + " Creation traceback:\n%s" % (running, topology, topology._settings._stack) ) - return False - return True -def print_unclosed_clients(): +def print_running_clients(): from pymongo.topology import Topology processed = set() + # Avoid false positives on the main test client. + # XXX: Can be removed after PYTHON-1634 or PYTHON-1896. + c = client_context.client + if c: + processed.add(c._topology._topology_id) # Call collect to manually cleanup any would-be gc'd clients to avoid # false positives. gc.collect() @@ -1061,7 +1061,7 @@ def print_unclosed_clients(): # Avoid printing the same Topology multiple times. if obj._topology_id in processed: continue - all_executors_stopped(obj) + print_running_topology(obj) processed.add(obj._topology_id) except ReferenceError: pass @@ -1086,9 +1086,7 @@ def teardown(): c.drop_database("pymongo_test_bernie") c.close() - # Jython does not support gc.get_objects. - if not sys.platform.startswith("java"): - print_unclosed_clients() + print_running_clients() class PymongoTestRunner(unittest.TextTestRunner): diff --git a/test/test_client.py b/test/test_client.py index 29a5b0f1d5..a0d6e22d53 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -759,6 +759,7 @@ def test_list_databases(self): for doc in helper_docs: self.assertIs(type(doc), dict) client = rs_or_single_client(document_class=SON) + self.addCleanup(client.close) for doc in client.list_databases(): self.assertIs(type(doc), dict) @@ -979,6 +980,7 @@ def test_unix_socket(self): uri = "mongodb://%s" % encoded_socket # Confirm we can do operations via the socket. client = rs_or_single_client(uri) + self.addCleanup(client.close) client.pymongo_test.test.insert_one({"dummy": "object"}) dbs = client.list_database_names() self.assertTrue("pymongo_test" in dbs) @@ -1002,6 +1004,7 @@ def test_document_class(self): self.assertFalse(isinstance(db.test.find_one(), SON)) c = rs_or_single_client(document_class=SON) + self.addCleanup(c.close) db = c.pymongo_test self.assertEqual(SON, c.codec_options.document_class) @@ -1040,6 +1043,7 @@ def test_socket_timeout(self): no_timeout = self.client timeout_sec = 1 timeout = rs_or_single_client(socketTimeoutMS=1000 * timeout_sec) + self.addCleanup(timeout.close) no_timeout.pymongo_test.drop_collection("test") no_timeout.pymongo_test.test.insert_one({"x": 1}) @@ -1095,6 +1099,7 @@ def test_tz_aware(self): self.assertRaises(ValueError, MongoClient, tz_aware="foo") aware = rs_or_single_client(tz_aware=True) + self.addCleanup(aware.close) naive = self.client aware.pymongo_test.drop_collection("test") @@ -1124,6 +1129,7 @@ def test_ipv6(self): uri += "/?replicaSet=" + (client_context.replica_set_name or "") client = rs_or_single_client_noauth(uri) + self.addCleanup(client.close) client.pymongo_test.test.insert_one({"dummy": "object"}) client.pymongo_test_bernie.test.insert_one({"dummy": "object"}) @@ -1222,6 +1228,7 @@ def test_operation_failure(self): # to avoid race conditions caused by replica set failover or idle # socket reaping. client = single_client() + self.addCleanup(client.close) client.pymongo_test.test.find_one() pool = get_pool(client) socket_count = len(pool.sockets) @@ -1245,18 +1252,21 @@ def test_lazy_connect_w0(self): self.addCleanup(client_context.client.drop_database, "test_lazy_connect_w0") client = rs_or_single_client(connect=False, w=0) + self.addCleanup(client.close) client.test_lazy_connect_w0.test.insert_one({}) wait_until( lambda: client.test_lazy_connect_w0.test.count_documents({}) == 1, "find one document" ) client = rs_or_single_client(connect=False, w=0) + self.addCleanup(client.close) client.test_lazy_connect_w0.test.update_one({}, {"$set": {"x": 1}}) wait_until( lambda: client.test_lazy_connect_w0.test.find_one().get("x") == 1, "update one document" ) client = rs_or_single_client(connect=False, w=0) + self.addCleanup(client.close) client.test_lazy_connect_w0.test.delete_one({}) wait_until( lambda: client.test_lazy_connect_w0.test.count_documents({}) == 0, "delete one document" @@ -1267,6 +1277,7 @@ def test_exhaust_network_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. client = rs_or_single_client(maxPoolSize=1, retryReads=False) + self.addCleanup(client.close) collection = client.pymongo_test.test pool = get_pool(client) pool._check_interval_seconds = None # Never check. From 474420b2e5b8318c58f596a9f5b4d3ed6a871ccd Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 16 Mar 2022 05:56:07 -0500 Subject: [PATCH 0099/1588] PYTHON-3085 Audit consistent and correct types for _DocumentOut (#893) --- .evergreen/config.yml | 2 +- .github/workflows/test-python.yml | 2 + README.rst | 2 +- bson/__init__.py | 111 +++++++------ bson/_cbsonmodule.c | 2 +- bson/codec_options.py | 63 ++++---- bson/codec_options.pyi | 100 ++++++++++++ doc/changelog.rst | 6 +- doc/examples/tls.rst | 2 +- doc/faq.rst | 2 +- doc/installation.rst | 4 +- doc/migrate-to-pymongo4.rst | 2 +- doc/python3.rst | 18 +-- pymongo/collection.py | 2 +- pymongo/database.py | 13 +- pymongo/encryption.py | 4 +- pymongo/message.py | 8 +- pymongo/mongo_client.py | 5 +- pymongo/monitoring.py | 11 +- setup.py | 6 +- test/test_binary.py | 20 +-- test/test_bson.py | 23 ++- test/test_bson_corpus.py | 4 +- test/test_custom_types.py | 4 +- test/test_mypy.py | 249 ++++++++++++++++++++++++++---- 25 files changed, 495 insertions(+), 170 deletions(-) create mode 100644 bson/codec_options.pyi diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 2e3c12f3f8..ef60eaf7d7 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1726,7 +1726,7 @@ tasks: vars: VERSION: "latest" TOPOLOGY: "server" - PYTHON_BINARY: "/opt/mongodbtoolchain/v2/bin/python3" + PYTHON_BINARY: "/opt/mongodbtoolchain/v3/bin/python3" - func: "run tests" # }}} - name: "coverage-report" diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 8eec9d9bf1..ba9b99e06b 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -64,6 +64,8 @@ jobs: - name: Run mypy run: | mypy --install-types --non-interactive bson gridfs tools pymongo + # Test overshadowed codec_options.py file + mypy --install-types --non-interactive bson/codec_options.py mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test linkcheck: diff --git a/README.rst b/README.rst index 390599a6cf..fedb9e14d4 100644 --- a/README.rst +++ b/README.rst @@ -88,7 +88,7 @@ is incompatible with PyMongo. Dependencies ============ -PyMongo supports CPython 3.6+ and PyPy3.6+. +PyMongo supports CPython 3.6.2+ and PyPy3.6+. Optional dependencies: diff --git a/bson/__init__.py b/bson/__init__.py index a287db1801..343fbecb25 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -76,6 +76,7 @@ List, Mapping, MutableMapping, + Optional, Sequence, Tuple, Type, @@ -95,7 +96,12 @@ UuidRepresentation, ) from bson.code import Code -from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, _raw_document_class +from bson.codec_options import ( + DEFAULT_CODEC_OPTIONS, + CodecOptions, + _DocumentType, + _raw_document_class, +) from bson.dbref import DBRef from bson.decimal128 import Decimal128 from bson.errors import InvalidBSON, InvalidDocument, InvalidStringData @@ -108,13 +114,11 @@ from bson.timestamp import Timestamp from bson.tz_util import utc -# Import RawBSONDocument for type-checking only to avoid circular dependency. +# Import some modules for type-checking only. if TYPE_CHECKING: from array import array from mmap import mmap - from bson.raw_bson import RawBSONDocument - try: from bson import _cbson # type: ignore[attr-defined] @@ -181,7 +185,7 @@ def _get_int( return _UNPACK_INT_FROM(data, position)[0], position + 4 -def _get_c_string(data: Any, view: Any, position: int, opts: Any) -> Tuple[str, int]: +def _get_c_string(data: Any, view: Any, position: int, opts: CodecOptions) -> Tuple[str, int]: """Decode a BSON 'C' string to python str.""" end = data.index(b"\x00", position) return _utf_8_decode(view[position:end], opts.unicode_decode_error_handler, True)[0], end + 1 @@ -195,7 +199,7 @@ def _get_float( def _get_string( - data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy: Any + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, dummy: Any ) -> Tuple[str, int]: """Decode a BSON string to python str.""" length = _UNPACK_INT_FROM(data, position)[0] @@ -226,7 +230,7 @@ def _get_object_size(data: Any, position: int, obj_end: int) -> Tuple[int, int]: def _get_object( - data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy: Any + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, dummy: Any ) -> Tuple[Any, int]: """Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef.""" obj_size, end = _get_object_size(data, position, obj_end) @@ -247,7 +251,7 @@ def _get_object( def _get_array( - data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, element_name: str ) -> Tuple[Any, int]: """Decode a BSON array to python list.""" size = _UNPACK_INT_FROM(data, position)[0] @@ -289,7 +293,7 @@ def _get_array( def _get_binary( - data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy1: Any + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, dummy1: Any ) -> Tuple[Union[Binary, uuid.UUID], int]: """Decode a BSON binary to bson.binary.Binary or python UUID.""" length, subtype = _UNPACK_LENGTH_SUBTYPE_FROM(data, position) @@ -347,14 +351,14 @@ def _get_boolean( def _get_date( - data: Any, view: Any, position: int, dummy0: int, opts: Any, dummy1: Any + data: Any, view: Any, position: int, dummy0: int, opts: CodecOptions, dummy1: Any ) -> Tuple[datetime.datetime, int]: """Decode a BSON datetime to python datetime.datetime.""" return _millis_to_datetime(_UNPACK_LONG_FROM(data, position)[0], opts), position + 8 def _get_code( - data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, element_name: str ) -> Tuple[Code, int]: """Decode a BSON code to bson.code.Code.""" code, position = _get_string(data, view, position, obj_end, opts, element_name) @@ -362,7 +366,7 @@ def _get_code( def _get_code_w_scope( - data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, element_name: str ) -> Tuple[Code, int]: """Decode a BSON code_w_scope to bson.code.Code.""" code_end = position + _UNPACK_INT_FROM(data, position)[0] @@ -374,7 +378,7 @@ def _get_code_w_scope( def _get_regex( - data: Any, view: Any, position: int, dummy0: Any, opts: Any, dummy1: Any + data: Any, view: Any, position: int, dummy0: Any, opts: CodecOptions, dummy1: Any ) -> Tuple[Regex, int]: """Decode a BSON regex to bson.regex.Regex or a python pattern object.""" pattern, position = _get_c_string(data, view, position, opts) @@ -384,7 +388,7 @@ def _get_regex( def _get_ref( - data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, element_name: str ) -> Tuple[DBRef, int]: """Decode (deprecated) BSON DBPointer to bson.dbref.DBRef.""" collection, position = _get_string(data, view, position, obj_end, opts, element_name) @@ -448,12 +452,16 @@ def _get_decimal128( if _USE_C: - def _element_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: Any) -> Any: + def _element_to_dict( + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions + ) -> Any: return _cbson._element_to_dict(data, position, obj_end, opts) else: - def _element_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: Any) -> Any: + def _element_to_dict( + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions + ) -> Any: """Decode a single key, value pair.""" element_type = data[position] position += 1 @@ -476,13 +484,13 @@ def _element_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: An _T = TypeVar("_T", bound=MutableMapping[Any, Any]) -def _raw_to_dict(data: Any, position: int, obj_end: int, opts: Any, result: _T) -> _T: +def _raw_to_dict(data: Any, position: int, obj_end: int, opts: CodecOptions, result: _T) -> _T: data, view = get_data_and_view(data) return _elements_to_dict(data, view, position, obj_end, opts, result) def _elements_to_dict( - data: Any, view: Any, position: int, obj_end: int, opts: Any, result: Any = None + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, result: Any = None ) -> Any: """Decode a BSON document into result.""" if result is None: @@ -496,7 +504,7 @@ def _elements_to_dict( return result -def _bson_to_dict(data: Any, opts: Any) -> Any: +def _bson_to_dict(data: Any, opts: CodecOptions) -> Any: """Decode a BSON string to document_class.""" data, view = get_data_and_view(data) try: @@ -586,7 +594,7 @@ def _encode_bytes(name: bytes, value: bytes, dummy0: Any, dummy1: Any) -> bytes: return b"\x05" + name + _PACK_INT(len(value)) + b"\x00" + value -def _encode_mapping(name: bytes, value: Any, check_keys: bool, opts: Any) -> bytes: +def _encode_mapping(name: bytes, value: Any, check_keys: bool, opts: CodecOptions) -> bytes: """Encode a mapping type.""" if _raw_document_class(value): return b"\x03" + name + value.raw @@ -594,7 +602,7 @@ def _encode_mapping(name: bytes, value: Any, check_keys: bool, opts: Any) -> byt return b"\x03" + name + _PACK_INT(len(data) + 5) + data + b"\x00" -def _encode_dbref(name: bytes, value: DBRef, check_keys: bool, opts: Any) -> bytes: +def _encode_dbref(name: bytes, value: DBRef, check_keys: bool, opts: CodecOptions) -> bytes: """Encode bson.dbref.DBRef.""" buf = bytearray(b"\x03" + name + b"\x00\x00\x00\x00") begin = len(buf) - 4 @@ -611,7 +619,7 @@ def _encode_dbref(name: bytes, value: DBRef, check_keys: bool, opts: Any) -> byt return bytes(buf) -def _encode_list(name: bytes, value: Sequence[Any], check_keys: bool, opts: Any) -> bytes: +def _encode_list(name: bytes, value: Sequence[Any], check_keys: bool, opts: CodecOptions) -> bytes: """Encode a list/tuple.""" lname = gen_list_name() data = b"".join([_name_value_to_bson(next(lname), item, check_keys, opts) for item in value]) @@ -620,8 +628,8 @@ def _encode_list(name: bytes, value: Sequence[Any], check_keys: bool, opts: Any) def _encode_text(name: bytes, value: str, dummy0: Any, dummy1: Any) -> bytes: """Encode a python str.""" - value = _utf_8_encode(value)[0] - return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00" # type: ignore + bvalue = _utf_8_encode(value)[0] + return b"\x02" + name + _PACK_INT(len(bvalue) + 1) + bvalue + b"\x00" def _encode_binary(name: bytes, value: Binary, dummy0: Any, dummy1: Any) -> bytes: @@ -632,7 +640,7 @@ def _encode_binary(name: bytes, value: Binary, dummy0: Any, dummy1: Any) -> byte return b"\x05" + name + _PACK_LENGTH_SUBTYPE(len(value), subtype) + value -def _encode_uuid(name: bytes, value: uuid.UUID, dummy: Any, opts: Any) -> bytes: +def _encode_uuid(name: bytes, value: uuid.UUID, dummy: Any, opts: CodecOptions) -> bytes: """Encode uuid.UUID.""" uuid_representation = opts.uuid_representation binval = Binary.from_uuid(value, uuid_representation=uuid_representation) @@ -686,7 +694,7 @@ def _encode_regex(name: bytes, value: Regex, dummy0: Any, dummy1: Any) -> bytes: return b"\x0B" + name + _make_c_string_check(value.pattern) + sflags -def _encode_code(name: bytes, value: Code, dummy: Any, opts: Any) -> bytes: +def _encode_code(name: bytes, value: Code, dummy: Any, opts: CodecOptions) -> bytes: """Encode bson.code.Code.""" cstring = _make_c_string(value) cstrlen = len(cstring) @@ -790,7 +798,7 @@ def _name_value_to_bson( name: bytes, value: Any, check_keys: bool, - opts: Any, + opts: CodecOptions, in_custom_call: bool = False, in_fallback_call: bool = False, ) -> bytes: @@ -843,7 +851,7 @@ def _name_value_to_bson( raise InvalidDocument("cannot encode object: %r, of type: %r" % (value, type(value))) -def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: Any) -> bytes: +def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: CodecOptions) -> bytes: """Encode a single key, value pair.""" if not isinstance(key, str): raise InvalidDocument("documents must have only string keys, key was %r" % (key,)) @@ -857,7 +865,7 @@ def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: Any) -> bytes return _name_value_to_bson(name, value, check_keys, opts) -def _dict_to_bson(doc: Any, check_keys: bool, opts: Any, top_level: bool = True) -> bytes: +def _dict_to_bson(doc: Any, check_keys: bool, opts: CodecOptions, top_level: bool = True) -> bytes: """Encode a document to BSON.""" if _raw_document_class(doc): return cast(bytes, doc.raw) @@ -879,7 +887,7 @@ def _dict_to_bson(doc: Any, check_keys: bool, opts: Any, top_level: bool = True) _dict_to_bson = _cbson._dict_to_bson # noqa: F811 -def _millis_to_datetime(millis: int, opts: Any) -> datetime.datetime: +def _millis_to_datetime(millis: int, opts: CodecOptions) -> datetime.datetime: """Convert milliseconds since epoch UTC to datetime.""" diff = ((millis % 1000) + 1000) % 1000 seconds = (millis - diff) // 1000 @@ -904,7 +912,6 @@ def _datetime_to_millis(dtm: datetime.datetime) -> int: _DocumentIn = Mapping[str, Any] -_DocumentOut = Union[MutableMapping[str, Any], "RawBSONDocument"] _ReadableBuffer = Union[bytes, memoryview, "mmap", "array"] @@ -940,8 +947,8 @@ def encode( def decode( - data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS -) -> Dict[str, Any]: + data: _ReadableBuffer, codec_options: "Optional[CodecOptions[_DocumentType]]" = None +) -> _DocumentType: """Decode BSON to a document. By default, returns a BSON document represented as a Python @@ -967,15 +974,16 @@ def decode( .. versionadded:: 3.9 """ - if not isinstance(codec_options, CodecOptions): + opts: CodecOptions = codec_options or DEFAULT_CODEC_OPTIONS + if not isinstance(opts, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR - return _bson_to_dict(data, codec_options) + return _bson_to_dict(data, opts) def decode_all( - data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS -) -> List[Dict[str, Any]]: + data: _ReadableBuffer, codec_options: "Optional[CodecOptions[_DocumentType]]" = None +) -> List[_DocumentType]: """Decode BSON data to multiple documents. `data` must be a bytes-like object implementing the buffer protocol that @@ -998,15 +1006,16 @@ def decode_all( Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with `codec_options`. """ + opts = codec_options or DEFAULT_CODEC_OPTIONS data, view = get_data_and_view(data) - if not isinstance(codec_options, CodecOptions): + if not isinstance(opts, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR data_len = len(data) - docs = [] + docs: List[_DocumentType] = [] position = 0 end = data_len - 1 - use_raw = _raw_document_class(codec_options.document_class) + use_raw = _raw_document_class(opts.document_class) try: while position < end: obj_size = _UNPACK_INT_FROM(data, position)[0] @@ -1017,10 +1026,10 @@ def decode_all( raise InvalidBSON("bad eoo") if use_raw: docs.append( - codec_options.document_class(data[position : obj_end + 1], codec_options) + opts.document_class(data[position : obj_end + 1], codec_options) # type: ignore ) else: - docs.append(_elements_to_dict(data, view, position + 4, obj_end, codec_options)) + docs.append(_elements_to_dict(data, view, position + 4, obj_end, opts)) position += obj_size return docs except InvalidBSON: @@ -1110,8 +1119,8 @@ def _decode_all_selective(data: Any, codec_options: CodecOptions, fields: Any) - def decode_iter( - data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS -) -> Iterator[_DocumentOut]: + data: bytes, codec_options: "Optional[CodecOptions[_DocumentType]]" = None +) -> Iterator[_DocumentType]: """Decode BSON data to multiple documents as a generator. Works similarly to the decode_all function, but yields one document at a @@ -1131,7 +1140,8 @@ def decode_iter( .. versionadded:: 2.8 """ - if not isinstance(codec_options, CodecOptions): + opts = codec_options or DEFAULT_CODEC_OPTIONS + if not isinstance(opts, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR position = 0 @@ -1141,12 +1151,12 @@ def decode_iter( elements = data[position : position + obj_size] position += obj_size - yield _bson_to_dict(elements, codec_options) + yield _bson_to_dict(elements, opts) def decode_file_iter( - file_obj: Union[BinaryIO, IO], codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS -) -> Iterator[_DocumentOut]: + file_obj: Union[BinaryIO, IO], codec_options: "Optional[CodecOptions[_DocumentType]]" = None +) -> Iterator[_DocumentType]: """Decode bson data from a file to multiple documents as a generator. Works similarly to the decode_all function, but reads from the file object @@ -1163,6 +1173,7 @@ def decode_file_iter( .. versionadded:: 2.8 """ + opts = codec_options or DEFAULT_CODEC_OPTIONS while True: # Read size of next object. size_data = file_obj.read(4) @@ -1172,7 +1183,7 @@ def decode_file_iter( raise InvalidBSON("cut off in middle of objsize") obj_size = _UNPACK_INT_FROM(size_data, 0)[0] - 4 elements = size_data + file_obj.read(max(0, obj_size)) - yield _bson_to_dict(elements, codec_options) + yield _bson_to_dict(elements, opts) def is_valid(bson: bytes) -> bool: @@ -1233,7 +1244,7 @@ def encode( """ return cls(encode(document, check_keys, codec_options)) - def decode(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Dict[str, Any]: # type: ignore[override] + def decode(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> _DocumentType: # type: ignore[override] """Decode this BSON data. By default, returns a BSON document represented as a Python diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 93610f7c58..8100e951cf 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -2600,7 +2600,7 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { if (!PyArg_ParseTuple(args, "O|O", &bson, &options_obj)) { return NULL; } - if (PyTuple_GET_SIZE(args) < 2) { + if ((PyTuple_GET_SIZE(args) < 2) || (options_obj == Py_None)) { if (!default_codec_options(GETSTATE(self), &options)) { return NULL; } diff --git a/bson/codec_options.py b/bson/codec_options.py index b4436dfdb8..4eaff59ea7 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -16,17 +16,17 @@ import abc import datetime -from collections import namedtuple from collections.abc import MutableMapping as _MutableMapping from typing import ( - TYPE_CHECKING, Any, Callable, Dict, Iterable, - MutableMapping, + Mapping, + NamedTuple, Optional, Type, + TypeVar, Union, cast, ) @@ -37,10 +37,6 @@ UuidRepresentation, ) -# Import RawBSONDocument for type-checking only to avoid circular dependency. -if TYPE_CHECKING: - from bson.raw_bson import RawBSONDocument - def _abstractproperty(func: Callable[..., Any]) -> property: return property(abc.abstractmethod(func)) @@ -115,7 +111,7 @@ class TypeCodec(TypeEncoder, TypeDecoder): _Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] _Fallback = Callable[[Any], Any] -_DocumentClass = Union[Type[MutableMapping], Type["RawBSONDocument"]] +_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) class TypeRegistry(object): @@ -152,8 +148,8 @@ def __init__( ) -> None: self.__type_codecs = list(type_codecs or []) self._fallback_encoder = fallback_encoder - self._encoder_map = {} - self._decoder_map = {} + self._encoder_map: Dict[Any, Any] = {} + self._decoder_map: Dict[Any, Any] = {} if self._fallback_encoder is not None: if not callable(fallback_encoder): @@ -202,20 +198,16 @@ def __eq__(self, other: Any) -> Any: ) -_options_base = namedtuple( # type: ignore - "CodecOptions", - ( - "document_class", - "tz_aware", - "uuid_representation", - "unicode_decode_error_handler", - "tzinfo", - "type_registry", - ), -) +class _BaseCodecOptions(NamedTuple): + document_class: Type[Mapping[str, Any]] + tz_aware: bool + uuid_representation: int + unicode_decode_error_handler: str + tzinfo: Optional[datetime.tzinfo] + type_registry: TypeRegistry -class CodecOptions(_options_base): +class CodecOptions(_BaseCodecOptions): """Encapsulates options used encoding and / or decoding BSON. The `document_class` option is used to define a custom type for use @@ -250,7 +242,7 @@ class CodecOptions(_options_base): See :doc:`/examples/datetimes` for examples using the `tz_aware` and `tzinfo` options. - See :doc:`examples/uuid` for examples using the `uuid_representation` + See :doc:`/examples/uuid` for examples using the `uuid_representation` option. :Parameters: @@ -294,18 +286,27 @@ class CodecOptions(_options_base): def __new__( cls: Type["CodecOptions"], - document_class: _DocumentClass = dict, + document_class: Optional[Type[Mapping[str, Any]]] = None, tz_aware: bool = False, uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, - unicode_decode_error_handler: Optional[str] = "strict", + unicode_decode_error_handler: str = "strict", tzinfo: Optional[datetime.tzinfo] = None, type_registry: Optional[TypeRegistry] = None, ) -> "CodecOptions": - if not (issubclass(document_class, _MutableMapping) or _raw_document_class(document_class)): + doc_class = document_class or dict + # issubclass can raise TypeError for generic aliases like SON[str, Any]. + # In that case we can use the base class for the comparison. + is_mapping = False + try: + is_mapping = issubclass(doc_class, _MutableMapping) + except TypeError: + if hasattr(doc_class, "__origin__"): + is_mapping = issubclass(doc_class.__origin__, _MutableMapping) # type: ignore[union-attr] + if not (is_mapping or _raw_document_class(doc_class)): raise TypeError( "document_class must be dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or a " - "sublass of collections.abc.MutableMapping" + "subclass of collections.abc.MutableMapping" ) if not isinstance(tz_aware, bool): raise TypeError("tz_aware must be True or False") @@ -313,8 +314,8 @@ def __new__( raise ValueError( "uuid_representation must be a value from bson.binary.UuidRepresentation" ) - if not isinstance(unicode_decode_error_handler, (str, None)): # type: ignore - raise ValueError("unicode_decode_error_handler must be a string or None") + if not isinstance(unicode_decode_error_handler, str): + raise ValueError("unicode_decode_error_handler must be a string") if tzinfo is not None: if not isinstance(tzinfo, datetime.tzinfo): raise TypeError("tzinfo must be an instance of datetime.tzinfo") @@ -329,7 +330,7 @@ def __new__( return tuple.__new__( cls, ( - document_class, + doc_class, tz_aware, uuid_representation, unicode_decode_error_handler, @@ -392,7 +393,7 @@ def with_options(self, **kwargs: Any) -> "CodecOptions": return CodecOptions(**opts) -DEFAULT_CODEC_OPTIONS: CodecOptions = CodecOptions() +DEFAULT_CODEC_OPTIONS = CodecOptions() def _parse_codec_options(options: Any) -> CodecOptions: diff --git a/bson/codec_options.pyi b/bson/codec_options.pyi new file mode 100644 index 0000000000..9d5f5c2656 --- /dev/null +++ b/bson/codec_options.pyi @@ -0,0 +1,100 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Workaround for https://bugs.python.org/issue43923. +Ideally we would have done this with a single class, but +generic subclasses *must* take a parameter, and prior to Python 3.9 +or in Python 3.7 and 3.8 with `from __future__ import annotations`, +you get the error: "TypeError: 'type' object is not subscriptable". +""" + +import datetime +import abc +from typing import Tuple, Generic, Optional, Mapping, Any, TypeVar, Type, Dict, Iterable, Tuple, MutableMapping, Callable, Union + + +class TypeEncoder(abc.ABC, metaclass=abc.ABCMeta): + @property + @abc.abstractmethod + def python_type(self) -> Any: ... + @abc.abstractmethod + def transform_python(self, value: Any) -> Any: ... + +class TypeDecoder(abc.ABC, metaclass=abc.ABCMeta): + @property + @abc.abstractmethod + def bson_type(self) -> Any: ... + @abc.abstractmethod + def transform_bson(self, value: Any) -> Any: ... + +class TypeCodec(TypeEncoder, TypeDecoder, metaclass=abc.ABCMeta): ... + +Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] +Fallback = Callable[[Any], Any] + +class TypeRegistry: + _decoder_map: Dict[Any, Any] + _encoder_map: Dict[Any, Any] + _fallback_encoder: Optional[Fallback] + + def __init__(self, type_codecs: Optional[Iterable[Codec]] = ..., fallback_encoder: Optional[Fallback] = ...) -> None: ... + def __eq__(self, other: Any) -> Any: ... + + +_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) + + +class CodecOptions(Tuple, Generic[_DocumentType]): + document_class: Type[_DocumentType] + tz_aware: bool + uuid_representation: int + unicode_decode_error_handler: Optional[str] + tzinfo: Optional[datetime.tzinfo] + type_registry: TypeRegistry + + def __new__( + cls: Type[CodecOptions], + document_class: Optional[Type[_DocumentType]] = ..., + tz_aware: bool = ..., + uuid_representation: Optional[int] = ..., + unicode_decode_error_handler: Optional[str] = ..., + tzinfo: Optional[datetime.tzinfo] = ..., + type_registry: Optional[TypeRegistry] = ..., + ) -> CodecOptions[_DocumentType]: ... + + # CodecOptions API + def with_options(self, **kwargs: Any) -> CodecOptions[_DocumentType]: ... + + def _arguments_repr(self) -> str: ... + + def _options_dict(self) -> Dict[Any, Any]: ... + + # NamedTuple API + @classmethod + def _make(cls, obj: Iterable) -> CodecOptions[_DocumentType]: ... + + def _asdict(self) -> Dict[str, Any]: ... + + def _replace(self, **kwargs: Any) -> CodecOptions[_DocumentType]: ... + + _source: str + _fields: Tuple[str] + + +DEFAULT_CODEC_OPTIONS: CodecOptions[MutableMapping[str, Any]] +_RAW_BSON_DOCUMENT_MARKER: int + +def _raw_document_class(document_class: Any) -> bool: ... + +def _parse_codec_options(options: Any) -> CodecOptions: ... diff --git a/doc/changelog.rst b/doc/changelog.rst index 73e2ea9ba4..d326c24b32 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -26,6 +26,10 @@ PyMongo 4.1 brings a number of improvements including: - :meth:`gridfs.GridOut.seek` now returns the new position in the file, to conform to the behavior of :meth:`io.IOBase.seek`. +Breaking Changes in 4.1 +....................... +- Removed support for Python 3.6.0 and 3.6.1, Python 3.6.2+ is now required. + Bug fixes ......... @@ -57,7 +61,7 @@ before upgrading from PyMongo 3.x. Breaking Changes in 4.0 ....................... -- Removed support for Python 2.7, 3.4, and 3.5. Python 3.6+ is now required. +- Removed support for Python 2.7, 3.4, and 3.5. Python 3.6.2+ is now required. - The default uuid_representation for :class:`~bson.codec_options.CodecOptions`, :class:`~bson.json_util.JSONOptions`, and :class:`~pymongo.mongo_client.MongoClient` has been changed from diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index f6920ad278..9c3c2c829c 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -181,7 +181,7 @@ server's certificate:: This often occurs because OpenSSL does not have access to the system's root certificates or the certificates are out of date. Linux users should ensure that they have the latest root certificate updates installed from -their Linux vendor. macOS users using Python 3.6.0 or newer downloaded +their Linux vendor. macOS users using Python 3.6.2 or newer downloaded from python.org `may have to run a script included with python `_ to install root certificates:: diff --git a/doc/faq.rst b/doc/faq.rst index a7f7c87bdd..0d045f7629 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -145,7 +145,7 @@ they are returned to the pool. Does PyMongo support Python 3? ------------------------------ -PyMongo supports CPython 3.6+ and PyPy3.6+. See the :doc:`python3` for details. +PyMongo supports CPython 3.6.2+ and PyPy3.6+. See the :doc:`python3` for details. Does PyMongo support asynchronous frameworks like Gevent, asyncio, Tornado, or Twisted? --------------------------------------------------------------------------------------- diff --git a/doc/installation.rst b/doc/installation.rst index 9c9d80c7a1..4f14b31125 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -28,7 +28,7 @@ To upgrade using pip:: Dependencies ------------ -PyMongo supports CPython 3.6+ and PyPy3.6+. +PyMongo supports CPython 3.6.2+ and PyPy3.6+. Optional dependencies: @@ -133,7 +133,7 @@ See `http://bugs.python.org/issue11623 `_ for a more detailed explanation. **Lion (10.7) and newer** - PyMongo's C extensions can be built against -versions of Python 3.6+ downloaded from python.org. In all cases Xcode must be +versions of Python 3.6.2+ downloaded from python.org. In all cases Xcode must be installed with 'UNIX Development Support'. **Xcode 5.1**: Starting with version 5.1 the version of clang that ships with diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 6fcbdf5011..6d290dd51b 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -34,7 +34,7 @@ Python 3.6+ ----------- PyMongo 4.0 drops support for Python 2.7, 3.4, and 3.5. Users who wish to -upgrade to 4.x must first upgrade to Python 3.6+. Users upgrading from +upgrade to 4.x must first upgrade to Python 3.6.2+. Users upgrading from Python 2 should consult the :doc:`python3`. Enable Deprecation Warnings diff --git a/doc/python3.rst b/doc/python3.rst index e001c55c8e..c14224166a 100644 --- a/doc/python3.rst +++ b/doc/python3.rst @@ -6,7 +6,7 @@ Python 3 FAQ What Python 3 versions are supported? ------------------------------------- -PyMongo supports CPython 3.6+ and PyPy3.6+. +PyMongo supports CPython 3.6.2+ and PyPy3.6+. Are there any PyMongo behavior changes with Python 3? ----------------------------------------------------- @@ -20,8 +20,8 @@ with subtype 0. For example, let's insert a :class:`bytes` instance using Python 3 then read it back. Notice the byte string is decoded back to :class:`bytes`:: - Python 3.6.1 (v3.6.1:69c0db5050, Mar 21 2017, 01:21:04) - [GCC 4.9.3] on linux + Python 3.6.8 (v3.6.8:3c6b436a57, Dec 24 2018, 02:04:31) + [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pymongo >>> c = pymongo.MongoClient() @@ -49,8 +49,8 @@ decoded to :class:`~bson.binary.Binary` with subtype 0. For example, let's decode a JSON binary subtype 0 using Python 3. Notice the byte string is decoded to :class:`bytes`:: - Python 3.6.1 (v3.6.1:69c0db5050, Mar 21 2017, 01:21:04) - [GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] on darwin + Python 3.6.8 (v3.6.8:3c6b436a57, Dec 24 2018, 02:04:31) + [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> from bson.json_util import loads >>> loads('{"b": {"$binary": "dGhpcyBpcyBhIGJ5dGUgc3RyaW5n", "$type": "00"}}') @@ -86,8 +86,8 @@ Python 3 you must pass ``encoding='latin-1'`` to pickle.loads:: >>> pickle.dumps(oid) 'ccopy_reg\n_reconstructor\np0\n(cbson.objectid\...' - Python 3.6.1 (v3.6.1:69c0db5050, Mar 21 2017, 01:21:04) - [GCC 4.9.3] on linux + Python 3.6.8 (v3.6.8:3c6b436a57, Dec 24 2018, 02:04:31) + [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pickle >>> pickle.loads(b'ccopy_reg\n_reconstructor\np0\n(cbson.objectid\...', encoding='latin-1') @@ -97,8 +97,8 @@ Python 3 you must pass ``encoding='latin-1'`` to pickle.loads:: If you need to pickle ObjectIds using Python 3 and unpickle them using Python 2 you must use ``protocol <= 2``:: - Python 3.6.5 (default, Jun 21 2018, 15:09:09) - [GCC 7.3.0] on linux + Python 3.6.8 (v3.6.8:3c6b436a57, Dec 24 2018, 02:04:31) + [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pickle >>> from bson.objectid import ObjectId diff --git a/pymongo/collection.py b/pymongo/collection.py index ad75fb760c..dc344b640f 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2156,7 +2156,7 @@ def list_indexes( .. versionadded:: 3.0 """ - codec_options = CodecOptions(SON) + codec_options: CodecOptions = CodecOptions(SON) coll = self.with_options( codec_options=codec_options, read_preference=ReadPreference.PRIMARY ) diff --git a/pymongo/database.py b/pymongo/database.py index 934b502191..6f2d0fd5cc 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -23,6 +23,7 @@ MutableMapping, Optional, Sequence, + TypeVar, Union, cast, ) @@ -38,7 +39,7 @@ from pymongo.command_cursor import CommandCursor from pymongo.errors import CollectionInvalid, InvalidName from pymongo.read_preferences import ReadPreference, _ServerMode -from pymongo.typings import _CollationIn, _DocumentOut, _DocumentType, _Pipeline +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline def _check_name(name): @@ -58,6 +59,9 @@ def _check_name(name): from pymongo.write_concern import WriteConcern +_CodecDocumentType = TypeVar("_CodecDocumentType", bound=Mapping[str, Any]) + + class Database(common.BaseObject, Generic[_DocumentType]): """A Mongo database.""" @@ -617,11 +621,11 @@ def command( check: bool = True, allowable_errors: Optional[Sequence[Union[str, int]]] = None, read_preference: Optional[_ServerMode] = None, - codec_options: Optional[CodecOptions] = DEFAULT_CODEC_OPTIONS, + codec_options: "Optional[CodecOptions[_CodecDocumentType]]" = None, session: Optional["ClientSession"] = None, comment: Optional[Any] = None, **kwargs: Any, - ) -> _DocumentOut: + ) -> _CodecDocumentType: """Issue a MongoDB command. Send command `command` to the database and return the @@ -707,6 +711,7 @@ def command( .. seealso:: The MongoDB documentation on `commands `_. """ + opts = codec_options or DEFAULT_CODEC_OPTIONS if comment is not None: kwargs["comment"] = comment @@ -723,7 +728,7 @@ def command( check, allowable_errors, read_preference, - codec_options, + opts, session=session, **kwargs, ) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 502c83e47b..1e06f7062d 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -56,7 +56,7 @@ _KMS_CONNECT_TIMEOUT = 10 # TODO: CDRIVER-3262 will define this value. _MONGOCRYPTD_TIMEOUT_MS = 10000 -_DATA_KEY_OPTS = CodecOptions(document_class=SON, uuid_representation=STANDARD) +_DATA_KEY_OPTS: CodecOptions = CodecOptions(document_class=SON, uuid_representation=STANDARD) # Use RawBSONDocument codec options to avoid needlessly decoding # documents from the key vault. _KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument, uuid_representation=STANDARD) @@ -572,7 +572,7 @@ def encrypt( encrypted_doc = self._encryption.encrypt( doc, algorithm, key_id=key_id, key_alt_name=key_alt_name ) - return decode(encrypted_doc)["v"] + return decode(encrypted_doc)["v"] # type: ignore[index] def decrypt(self, value: Binary) -> Any: """Decrypt an encrypted value. diff --git a/pymongo/message.py b/pymongo/message.py index 92d59c3ebd..58f71629d6 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -24,7 +24,7 @@ import random import struct from io import BytesIO as _BytesIO -from typing import Any +from typing import Any, Dict import bson from bson import CodecOptions, _decode_selective, _dict_to_bson, _make_c_string, encode @@ -76,7 +76,9 @@ } _FIELD_MAP = {"insert": "documents", "update": "updates", "delete": "deletes"} -_UNICODE_REPLACE_CODEC_OPTIONS = CodecOptions(unicode_decode_error_handler="replace") +_UNICODE_REPLACE_CODEC_OPTIONS: "CodecOptions[Dict[str, Any]]" = CodecOptions( + unicode_decode_error_handler="replace" +) def _randint(): @@ -1259,7 +1261,7 @@ def raw_response(self, cursor_id=None, user_fields=None): errobj = {"ok": 0, "errmsg": msg, "code": 43} raise CursorNotFound(msg, 43, errobj) elif self.flags & 2: - error_object = bson.BSON(self.documents).decode() + error_object: dict = bson.BSON(self.documents).decode() # Fake the ok field if it doesn't exist. error_object.setdefault("ok", 0) if error_object["$err"].startswith(HelloCompat.LEGACY_ERROR): diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ee89279812..4231db95ae 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -649,10 +649,11 @@ def __init__( client.__my_database__ """ + doc_class = document_class or dict self.__init_kwargs: Dict[str, Any] = { "host": host, "port": port, - "document_class": document_class or dict, + "document_class": doc_class, "tz_aware": tz_aware, "connect": connect, "type_registry": type_registry, @@ -676,7 +677,7 @@ def __init__( # Parse options passed as kwargs. keyword_opts = common._CaseInsensitiveDictionary(kwargs) - keyword_opts["document_class"] = document_class or dict + keyword_opts["document_class"] = doc_class seeds = set() username = None diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 4798542dc7..ad604f3f16 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -182,12 +182,12 @@ def connection_checked_in(self, event): import datetime from collections import abc, namedtuple -from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional +from typing import TYPE_CHECKING, Any, Dict, Optional from bson.objectid import ObjectId from pymongo.hello import Hello, HelloCompat from pymongo.helpers import _handle_exception -from pymongo.typings import _Address +from pymongo.typings import _Address, _DocumentOut if TYPE_CHECKING: from pymongo.server_description import ServerDescription @@ -208,9 +208,6 @@ def connection_checked_in(self, event): _LISTENERS = _Listeners([], [], [], [], []) -_DocumentOut = Mapping[str, Any] - - class _EventListener(object): """Abstract base class for all event listeners.""" @@ -635,7 +632,7 @@ def __init__( ) cmd_name = command_name.lower() if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, command): - self.__cmd: Mapping[str, Any] = {} + self.__cmd: _DocumentOut = {} else: self.__cmd = command self.__db = database_name @@ -693,7 +690,7 @@ def __init__( self.__duration_micros = _to_micros(duration) cmd_name = command_name.lower() if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, reply): - self.__reply: Mapping[str, Any] = {} + self.__reply: _DocumentOut = {} else: self.__reply = reply diff --git a/setup.py b/setup.py index 699ced1f85..5bae7dc211 100755 --- a/setup.py +++ b/setup.py @@ -4,8 +4,8 @@ import sys import warnings -if sys.version_info[:2] < (3, 6): - raise RuntimeError("Python version >= 3.6 required.") +if sys.version_info[:3] < (3, 6, 2): + raise RuntimeError("Python version >= 3.6.2 required.") # Hack to silence atexit traceback in some Python versions @@ -321,7 +321,7 @@ def build_extension(self, ext): keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], install_requires=[], license="Apache License, Version 2.0", - python_requires=">=3.6", + python_requires=">=3.6.2", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", diff --git a/test/test_binary.py b/test/test_binary.py index 7d0ef2ce2e..65abdca796 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -158,19 +158,19 @@ def test_uuid_subtype_4(self): def test_legacy_java_uuid(self): # Test decoding data = self.java_data - docs = bson.decode_all(data, CodecOptions(SON, False, PYTHON_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, PYTHON_LEGACY)) for d in docs: self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, STANDARD)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, STANDARD)) for d in docs: self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, CSHARP_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) for d in docs: self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, JAVA_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) for d in docs: self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) @@ -198,7 +198,7 @@ def test_legacy_java_uuid(self): @client_context.require_connection def test_legacy_java_uuid_roundtrip(self): data = self.java_data - docs = bson.decode_all(data, CodecOptions(SON, False, JAVA_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) client_context.client.pymongo_test.drop_collection("java_uuid") db = client_context.client.pymongo_test @@ -218,19 +218,19 @@ def test_legacy_csharp_uuid(self): data = self.csharp_data # Test decoding - docs = bson.decode_all(data, CodecOptions(SON, False, PYTHON_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, PYTHON_LEGACY)) for d in docs: self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, STANDARD)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, STANDARD)) for d in docs: self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, JAVA_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) for d in docs: self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, CSHARP_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) for d in docs: self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) @@ -258,7 +258,7 @@ def test_legacy_csharp_uuid(self): @client_context.require_connection def test_legacy_csharp_uuid_roundtrip(self): data = self.csharp_data - docs = bson.decode_all(data, CodecOptions(SON, False, CSHARP_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) client_context.client.pymongo_test.drop_collection("csharp_uuid") db = client_context.client.pymongo_test diff --git a/test/test_bson.py b/test/test_bson.py index 9bf8df897a..b0dce7db4e 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -744,12 +744,12 @@ def test_dates(self): def test_custom_class(self): self.assertIsInstance(decode(encode({})), dict) self.assertNotIsInstance(decode(encode({})), SON) - self.assertIsInstance(decode(encode({}), CodecOptions(document_class=SON)), SON) + self.assertIsInstance(decode(encode({}), CodecOptions(document_class=SON)), SON) # type: ignore[type-var] - self.assertEqual(1, decode(encode({"x": 1}), CodecOptions(document_class=SON))["x"]) + self.assertEqual(1, decode(encode({"x": 1}), CodecOptions(document_class=SON))["x"]) # type: ignore[type-var] x = encode({"x": [{"y": 1}]}) - self.assertIsInstance(decode(x, CodecOptions(document_class=SON))["x"][0], SON) + self.assertIsInstance(decode(x, CodecOptions(document_class=SON))["x"][0], SON) # type: ignore[type-var] def test_subclasses(self): # make sure we can serialize subclasses of native Python types. @@ -772,7 +772,7 @@ class _myunicode(str): def test_ordered_dict(self): d = OrderedDict([("one", 1), ("two", 2), ("three", 3), ("four", 4)]) - self.assertEqual(d, decode(encode(d), CodecOptions(document_class=OrderedDict))) + self.assertEqual(d, decode(encode(d), CodecOptions(document_class=OrderedDict))) # type: ignore[type-var] def test_bson_regex(self): # Invalid Python regex, though valid PCRE. @@ -954,7 +954,7 @@ def __repr__(self): class TestCodecOptions(unittest.TestCase): def test_document_class(self): self.assertRaises(TypeError, CodecOptions, document_class=object) - self.assertIs(SON, CodecOptions(document_class=SON).document_class) + self.assertIs(SON, CodecOptions(document_class=SON).document_class) # type: ignore[type-var] def test_tz_aware(self): self.assertRaises(TypeError, CodecOptions, tz_aware=1) @@ -993,6 +993,19 @@ def test_decode_all_defaults(self): with self.assertRaisesRegex(ValueError, "cannot encode native uuid"): bson.decode_all(bson.encode({"uuid": uuid.uuid4()})) + def test_decode_all_no_options(self): + # Test decode_all()'s default document_class is dict and tz_aware is + # False. + doc = {"sub_document": {}, "dt": datetime.datetime.utcnow()} + + decoded = bson.decode_all(bson.encode(doc), None)[0] + self.assertIsInstance(decoded["sub_document"], dict) + self.assertIsNone(decoded["dt"].tzinfo) + + doc2 = {"id": Binary.from_uuid(uuid.uuid4())} + decoded = bson.decode_all(bson.encode(doc2), None)[0] + self.assertIsInstance(decoded["id"], Binary) + def test_unicode_decode_error_handler(self): enc = encode({"keystr": "foobar"}) diff --git a/test/test_bson_corpus.py b/test/test_bson_corpus.py index 4f8fc7413a..193a6dff3d 100644 --- a/test/test_bson_corpus.py +++ b/test/test_bson_corpus.py @@ -71,8 +71,8 @@ # Need to set tz_aware=True in order to use "strict" dates in extended JSON. -codec_options = CodecOptions(tz_aware=True, document_class=SON) -codec_options_no_tzaware = CodecOptions(document_class=SON) +codec_options: CodecOptions = CodecOptions(tz_aware=True, document_class=SON) +codec_options_no_tzaware: CodecOptions = CodecOptions(document_class=SON) # We normally encode UUID as binary subtype 0x03, # but we'll need to encode to subtype 0x04 for one of the tests. codec_options_uuid_04 = codec_options._replace(uuid_representation=STANDARD) diff --git a/test/test_custom_types.py b/test/test_custom_types.py index a7073cde93..e11b5ebe00 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -538,10 +538,10 @@ def transform_bson(self, value): self.assertEqual( type_registry._encoder_map, - {MyIntEncoder.python_type: codec_instances[1].transform_python}, # type: ignore[has-type] + {MyIntEncoder.python_type: codec_instances[1].transform_python}, ) self.assertEqual( - type_registry._decoder_map, {MyIntDecoder.bson_type: codec_instances[0].transform_bson} # type: ignore[has-type] + type_registry._decoder_map, {MyIntDecoder.bson_type: codec_instances[0].transform_bson} ) def test_initialize_fail(self): diff --git a/test/test_mypy.py b/test/test_mypy.py index 55794e138e..6cf3eb2c87 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -16,8 +16,9 @@ sample client code that uses PyMongo typings.""" import os +import tempfile import unittest -from typing import TYPE_CHECKING, Any, Dict, Iterable, List +from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List try: from typing import TypedDict # type: ignore[attr-defined] @@ -39,6 +40,7 @@ class Movie(TypedDict): # type: ignore[misc] from test import IntegrationTest from test.utils import rs_or_single_client +from bson import CodecOptions, decode, decode_all, decode_file_iter, decode_iter, encode from bson.raw_bson import RawBSONDocument from bson.son import SON from pymongo.collection import Collection @@ -54,6 +56,15 @@ def get_tests() -> Iterable[str]: yield os.path.join(dirpath, filename) +def only_type_check(func): + def inner(*args, **kwargs): + if not TYPE_CHECKING: + raise unittest.SkipTest("Used for Type Checking Only") + func(*args, **kwargs) + + return inner + + class TestMypyFails(unittest.TestCase): def ensure_mypy_fails(self, filename: str) -> None: if api is None: @@ -105,7 +116,7 @@ def test_bulk_write(self) -> None: self.assertTrue(result.acknowledged) def test_command(self) -> None: - result = self.client.admin.command("ping") + result: Dict = self.client.admin.command("ping") items = result.items() def test_list_collections(self) -> None: @@ -127,18 +138,154 @@ def test_default_document_type(self) -> None: assert retreived is not None retreived["a"] = 1 + def test_aggregate_pipeline(self) -> None: + coll3 = self.client.test.test3 + coll3.insert_many( + [ + {"x": 1, "tags": ["dog", "cat"]}, + {"x": 2, "tags": ["cat"]}, + {"x": 2, "tags": ["mouse", "cat", "dog"]}, + {"x": 3, "tags": []}, + ] + ) + + class mydict(Dict[str, Any]): + pass + + result = coll3.aggregate( + [ + mydict({"$unwind": "$tags"}), + {"$group": {"_id": "$tags", "count": {"$sum": 1}}}, + {"$sort": SON([("count", -1), ("_id", -1)])}, + ] + ) + self.assertTrue(len(list(result))) + + +class TestDecode(unittest.TestCase): + def test_bson_decode(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + rt_document: Dict[str, Any] = decode(bsonbytes) + assert rt_document["_id"] == 1 + rt_document["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options = CodecOptions(document_class=MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options) + rt_document2 = decode(bsonbytes2, codec_options=codec_options) + assert rt_document2.foo() == "bar" + + codec_options2 = CodecOptions(document_class=RawBSONDocument) + bsonbytes3 = encode(doc, codec_options=codec_options2) + rt_document3 = decode(bsonbytes2, codec_options=codec_options2) + assert rt_document3.raw + + def test_bson_decode_all(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + bsonbytes += encode(doc) + rt_documents: List[Dict[str, Any]] = decode_all(bsonbytes) + assert rt_documents[0]["_id"] == 1 + rt_documents[0]["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options2 = CodecOptions(MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options2) + bsonbytes2 += encode(doc, codec_options=codec_options2) + rt_documents2 = decode_all(bsonbytes2, codec_options2) + assert rt_documents2[0].foo() == "bar" + + codec_options3 = CodecOptions(RawBSONDocument) + bsonbytes3 = encode(doc, codec_options=codec_options3) + bsonbytes3 += encode(doc, codec_options=codec_options3) + rt_documents3 = decode_all(bsonbytes3, codec_options3) + assert rt_documents3[0].raw + + def test_bson_decode_iter(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + bsonbytes += encode(doc) + rt_documents: Iterator[Dict[str, Any]] = decode_iter(bsonbytes) + assert next(rt_documents)["_id"] == 1 + next(rt_documents)["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options2 = CodecOptions(MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options2) + bsonbytes2 += encode(doc, codec_options=codec_options2) + rt_documents2 = decode_iter(bsonbytes2, codec_options2) + assert next(rt_documents2).foo() == "bar" + + codec_options3 = CodecOptions(RawBSONDocument) + bsonbytes3 = encode(doc, codec_options=codec_options3) + bsonbytes3 += encode(doc, codec_options=codec_options3) + rt_documents3 = decode_iter(bsonbytes3, codec_options3) + assert next(rt_documents3).raw + + def make_tempfile(self, content: bytes) -> Any: + fileobj = tempfile.TemporaryFile() + fileobj.write(content) + fileobj.seek(0) + self.addCleanup(fileobj.close) + return fileobj + + def test_bson_decode_file_iter(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + bsonbytes += encode(doc) + fileobj = self.make_tempfile(bsonbytes) + rt_documents: Iterator[Dict[str, Any]] = decode_file_iter(fileobj) + assert next(rt_documents)["_id"] == 1 + next(rt_documents)["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options2 = CodecOptions(MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options2) + bsonbytes2 += encode(doc, codec_options=codec_options2) + fileobj2 = self.make_tempfile(bsonbytes2) + rt_documents2 = decode_file_iter(fileobj2, codec_options2) + assert next(rt_documents2).foo() == "bar" + + codec_options3 = CodecOptions(RawBSONDocument) + bsonbytes3 = encode(doc, codec_options=codec_options3) + bsonbytes3 += encode(doc, codec_options=codec_options3) + fileobj3 = self.make_tempfile(bsonbytes3) + rt_documents3 = decode_file_iter(fileobj3, codec_options3) + assert next(rt_documents3).raw + + +class TestDocumentType(unittest.TestCase): + @only_type_check + def test_default(self) -> None: + client: MongoClient = MongoClient() + coll = client.test.test + retreived = coll.find_one({"_id": "foo"}) + assert retreived is not None + retreived["a"] = 1 + + @only_type_check def test_explicit_document_type(self) -> None: - if not TYPE_CHECKING: - raise unittest.SkipTest("Do not use raw MongoClient") client: MongoClient[Dict[str, Any]] = MongoClient() coll = client.test.test retreived = coll.find_one({"_id": "foo"}) assert retreived is not None retreived["a"] = 1 + @only_type_check def test_typeddict_document_type(self) -> None: - if not TYPE_CHECKING: - raise unittest.SkipTest("Do not use raw MongoClient") client: MongoClient[Movie] = MongoClient() coll = client.test.test retreived = coll.find_one({"_id": "foo"}) @@ -146,46 +293,88 @@ def test_typeddict_document_type(self) -> None: assert retreived["year"] == 1 assert retreived["name"] == "a" + @only_type_check def test_raw_bson_document_type(self) -> None: - if not TYPE_CHECKING: - raise unittest.SkipTest("Do not use raw MongoClient") client = MongoClient(document_class=RawBSONDocument) coll = client.test.test retreived = coll.find_one({"_id": "foo"}) assert retreived is not None assert len(retreived.raw) > 0 + @only_type_check def test_son_document_type(self) -> None: - if not TYPE_CHECKING: - raise unittest.SkipTest("Do not use raw MongoClient") client = MongoClient(document_class=SON[str, Any]) coll = client.test.test retreived = coll.find_one({"_id": "foo"}) assert retreived is not None retreived["a"] = 1 - def test_aggregate_pipeline(self) -> None: - coll3 = self.client.test.test3 - coll3.insert_many( - [ - {"x": 1, "tags": ["dog", "cat"]}, - {"x": 2, "tags": ["cat"]}, - {"x": 2, "tags": ["mouse", "cat", "dog"]}, - {"x": 3, "tags": []}, - ] - ) - class mydict(Dict[str, Any]): - pass +class TestCommandDocumentType(unittest.TestCase): + @only_type_check + def test_default(self) -> None: + client: MongoClient = MongoClient() + result: Dict = client.admin.command("ping") + result["a"] = 1 - result = coll3.aggregate( - [ - mydict({"$unwind": "$tags"}), - {"$group": {"_id": "$tags", "count": {"$sum": 1}}}, - {"$sort": SON([("count", -1), ("_id", -1)])}, - ] - ) - self.assertTrue(len(list(result))) + @only_type_check + def test_explicit_document_type(self) -> None: + client: MongoClient = MongoClient() + codec_options: CodecOptions[Dict[str, Any]] = CodecOptions() + result = client.admin.command("ping", codec_options=codec_options) + result["a"] = 1 + + @only_type_check + def test_typeddict_document_type(self) -> None: + client: MongoClient = MongoClient() + codec_options: CodecOptions[Movie] = CodecOptions() + result = client.admin.command("ping", codec_options=codec_options) + assert result["year"] == 1 + assert result["name"] == "a" + + @only_type_check + def test_raw_bson_document_type(self) -> None: + client: MongoClient = MongoClient() + codec_options = CodecOptions(RawBSONDocument) + result = client.admin.command("ping", codec_options=codec_options) + assert len(result.raw) > 0 + + @only_type_check + def test_son_document_type(self) -> None: + client = MongoClient(document_class=SON[str, Any]) + codec_options = CodecOptions(SON[str, Any]) + result = client.admin.command("ping", codec_options=codec_options) + result["a"] = 1 + + +class TestCodecOptionsDocumentType(unittest.TestCase): + def test_default(self) -> None: + options: CodecOptions = CodecOptions() + obj = options.document_class() + obj["a"] = 1 + + def test_explicit_document_type(self) -> None: + options: CodecOptions[Dict[str, Any]] = CodecOptions() + obj = options.document_class() + obj["a"] = 1 + + def test_typeddict_document_type(self) -> None: + options: CodecOptions[Movie] = CodecOptions() + # Suppress: Cannot instantiate type "Type[Movie]". + obj = options.document_class(name="a", year=1) # type: ignore[misc] + assert obj["year"] == 1 + assert obj["name"] == "a" + + def test_raw_bson_document_type(self) -> None: + options = CodecOptions(RawBSONDocument) + doc_bson = b"\x10\x00\x00\x00\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff\x00" + obj = options.document_class(doc_bson) + assert len(obj.raw) > 0 + + def test_son_document_type(self) -> None: + options = CodecOptions(SON[str, Any]) + obj = options.document_class() + obj["a"] = 1 if __name__ == "__main__": From 0a6e7bc38760842d5b99363168ba13447ba03799 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 16 Mar 2022 09:36:31 -0700 Subject: [PATCH 0100/1588] PYTHON-3174 Don't reinit client_context.client (#899) --- test/test_raw_bson.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/test/test_raw_bson.py b/test/test_raw_bson.py index a27af6e217..d82e5104c0 100644 --- a/test/test_raw_bson.py +++ b/test/test_raw_bson.py @@ -20,7 +20,6 @@ from test import client_context, unittest from test.test_client import IntegrationTest -from test.utils import rs_or_single_client from bson import decode, encode from bson.binary import JAVA_LEGACY, Binary, UuidRepresentation @@ -42,12 +41,6 @@ class TestRawBSONDocument(IntegrationTest): ) document = RawBSONDocument(bson_string) - @classmethod - def setUpClass(cls): - super(TestRawBSONDocument, cls).setUpClass() - client_context.client = rs_or_single_client() - cls.client = client_context.client - def tearDown(self): if client_context.connected: self.client.pymongo_test.test_raw.drop() From 648a87e22867c49c23baf5caff982b8df8a735c3 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 16 Mar 2022 12:32:00 -0700 Subject: [PATCH 0101/1588] PYTHON-3173 Skip version API test for count (#902) --- test/test_examples.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_examples.py b/test/test_examples.py index ccb48307e4..f38e540507 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -1201,6 +1201,7 @@ def test_versioned_api(self): client = MongoClient(uri, server_api=ServerApi("1", deprecation_errors=True)) # End Versioned API Example 4 + @unittest.skip("PYTHON-3167 count has been added to API version 1") @client_context.require_version_min(4, 7) def test_versioned_api_migration(self): # SERVER-58785 From b3604a81d30cf9d67ea94a7f314b5602f08b46a1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 16 Mar 2022 15:26:10 -0500 Subject: [PATCH 0102/1588] PYTHON-3171 Add usage of NoReturn annotation (#901) --- bson/__init__.py | 3 ++- bson/objectid.py | 4 ++-- gridfs/grid_file.py | 18 +++++++++--------- pymongo/bulk.py | 3 ++- pymongo/client_session.py | 5 +++-- pymongo/collection.py | 7 ++++--- pymongo/command_cursor.py | 4 ++-- pymongo/common.py | 3 ++- pymongo/cursor.py | 3 ++- pymongo/database.py | 5 +++-- pymongo/helpers.py | 6 +++--- pymongo/message.py | 4 ++-- pymongo/mongo_client.py | 3 ++- pymongo/pool.py | 8 +++++--- 14 files changed, 43 insertions(+), 33 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index 343fbecb25..11a87bbe79 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -76,6 +76,7 @@ List, Mapping, MutableMapping, + NoReturn, Optional, Sequence, Tuple, @@ -170,7 +171,7 @@ def get_data_and_view(data: Any) -> Tuple[Any, memoryview]: return view.tobytes(), view -def _raise_unknown_type(element_type: int, element_name: str) -> None: +def _raise_unknown_type(element_type: int, element_name: str) -> NoReturn: """Unknown type helper.""" raise InvalidBSON( "Detected unknown BSON type %r for fieldname '%s'. Are " diff --git a/bson/objectid.py b/bson/objectid.py index 24d25d0377..c174b47327 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -24,7 +24,7 @@ import threading import time from random import SystemRandom -from typing import Any, Optional, Type, Union +from typing import Any, NoReturn, Optional, Type, Union from bson.errors import InvalidId from bson.tz_util import utc @@ -32,7 +32,7 @@ _MAX_COUNTER_VALUE = 0xFFFFFF -def _raise_invalid_id(oid: str) -> None: +def _raise_invalid_id(oid: str) -> NoReturn: raise InvalidId( "%r is not a valid ObjectId, it must be a 12-byte input" " or a 24-character hex string" % oid diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index b290fc68b0..5d63d5c653 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -17,7 +17,7 @@ import io import math import os -from typing import Any, Iterable, List, Mapping, Optional +from typing import Any, Iterable, List, Mapping, NoReturn, Optional from bson.binary import Binary from bson.int64 import Int64 @@ -298,7 +298,7 @@ def __flush(self) -> Any: except DuplicateKeyError: self._raise_file_exists(self._id) - def _raise_file_exists(self, file_id: Any) -> None: + def _raise_file_exists(self, file_id: Any) -> NoReturn: """Raise a FileExists exception for the given file_id.""" raise FileExists("file with _id %r already exists" % file_id) @@ -312,7 +312,7 @@ def close(self) -> None: self.__flush() object.__setattr__(self, "_closed", True) - def read(self, size: Optional[int] = -1) -> None: + def read(self, size: int = -1) -> NoReturn: raise io.UnsupportedOperation("read") def readable(self) -> bool: @@ -682,10 +682,10 @@ def close(self) -> None: self.__chunk_iter = None super().close() - def write(self, value: Any) -> None: + def write(self, value: Any) -> NoReturn: raise io.UnsupportedOperation("write") - def writelines(self, lines: Any) -> None: + def writelines(self, lines: Any) -> NoReturn: raise io.UnsupportedOperation("writelines") def writable(self) -> bool: @@ -704,7 +704,7 @@ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: self.close() return False - def fileno(self) -> int: + def fileno(self) -> NoReturn: raise io.UnsupportedOperation("fileno") def flush(self) -> None: @@ -714,7 +714,7 @@ def flush(self) -> None: def isatty(self) -> bool: return False - def truncate(self, size: Optional[int] = None) -> int: + def truncate(self, size: Optional[int] = None) -> NoReturn: # See https://docs.python.org/3/library/io.html#io.IOBase.writable # for why truncate has to raise. raise io.UnsupportedOperation("truncate") @@ -891,10 +891,10 @@ def next(self) -> GridOut: __next__ = next - def add_option(self, *args: Any, **kwargs: Any) -> None: # type: ignore[override] + def add_option(self, *args: Any, **kwargs: Any) -> NoReturn: raise NotImplementedError("Method does not exist for GridOutCursor") - def remove_option(self, *args: Any, **kwargs: Any) -> None: # type: ignore[override] + def remove_option(self, *args: Any, **kwargs: Any) -> NoReturn: raise NotImplementedError("Method does not exist for GridOutCursor") def _clone_base(self, session: ClientSession) -> "GridOutCursor": diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 9055e40e98..44923f73df 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -18,6 +18,7 @@ """ import copy from itertools import islice +from typing import Any, NoReturn from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument @@ -128,7 +129,7 @@ def _merge_command(run, full_result, offset, result): full_result["writeConcernErrors"].append(wce) -def _raise_bulk_write_error(full_result): +def _raise_bulk_write_error(full_result: Any) -> NoReturn: """Raise a BulkWriteError from the full bulk api result.""" if full_result["writeErrors"]: full_result["writeErrors"].sort(key=lambda error: error["index"]) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 20d36fb062..a0c269cb8d 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -142,6 +142,7 @@ ContextManager, Generic, Mapping, + NoReturn, Optional, TypeVar, ) @@ -422,7 +423,7 @@ def __del__(self): self.sock_mgr = None -def _reraise_with_unknown_commit(exc): +def _reraise_with_unknown_commit(exc: Any) -> NoReturn: """Re-raise an exception with the UnknownTransactionCommitResult label.""" exc._add_error_label("UnknownTransactionCommitResult") raise @@ -1003,7 +1004,7 @@ def _update_read_concern(self, cmd, sock_info): if self._snapshot_time is not None: rc["atClusterTime"] = self._snapshot_time - def __copy__(self): + def __copy__(self) -> NoReturn: raise TypeError("A ClientSession cannot be copied, create a new session instead") diff --git a/pymongo/collection.py b/pymongo/collection.py index dc344b640f..d0ebd9311a 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -23,6 +23,7 @@ List, Mapping, MutableMapping, + NoReturn, Optional, Sequence, Tuple, @@ -343,7 +344,7 @@ def __ne__(self, other: Any) -> bool: def __hash__(self) -> int: return hash((self.__database, self.__name)) - def __bool__(self) -> bool: + def __bool__(self) -> NoReturn: raise NotImplementedError( "Collection objects do not implement truth " "value testing or bool(). Please compare " @@ -3143,12 +3144,12 @@ def find_one_and_update( def __iter__(self) -> "Collection[_DocumentType]": return self - def __next__(self) -> None: + def __next__(self) -> NoReturn: raise TypeError("'Collection' object is not iterable") next = __next__ - def __call__(self, *args: Any, **kwargs: Any) -> None: + def __call__(self, *args: Any, **kwargs: Any) -> NoReturn: """This is only here so that some API misusages are easier to debug.""" if "." not in self.__name: raise TypeError( diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index d10e23f957..0bd99f0bbb 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -15,7 +15,7 @@ """CommandCursor class to iterate over command results.""" from collections import deque -from typing import TYPE_CHECKING, Any, Generic, Iterator, Mapping, Optional +from typing import TYPE_CHECKING, Any, Generic, Iterator, Mapping, NoReturn, Optional from bson import _convert_raw_document_lists_to_streams from pymongo.cursor import _CURSOR_CLOSED_ERRORS, _SocketManager @@ -344,5 +344,5 @@ def _unpack_response( _convert_raw_document_lists_to_streams(raw_response[0]) return raw_response - def __getitem__(self, index): + def __getitem__(self, index: int) -> NoReturn: raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") diff --git a/pymongo/common.py b/pymongo/common.py index 5255468b5a..669e12ead7 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -25,6 +25,7 @@ List, Mapping, MutableMapping, + NoReturn, Optional, Sequence, Tuple, @@ -153,7 +154,7 @@ def clean_node(node: str) -> Tuple[str, int]: return host.lower(), port -def raise_config_error(key: str, dummy: Any) -> None: +def raise_config_error(key: str, dummy: Any) -> NoReturn: """Raise ConfigurationError with the given key name.""" raise ConfigurationError("Unknown option %s" % (key,)) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index a2ccdf5860..9f6f0898b4 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -25,6 +25,7 @@ Iterable, List, Mapping, + NoReturn, Optional, Sequence, Tuple, @@ -1339,5 +1340,5 @@ def explain(self) -> _DocumentType: clone = self._clone(deepcopy=True, base=Cursor(self.collection)) return clone.explain() - def __getitem__(self, index: Any) -> "Cursor[_DocumentType]": + def __getitem__(self, index: Any) -> NoReturn: raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") diff --git a/pymongo/database.py b/pymongo/database.py index 6f2d0fd5cc..17cba06b65 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -21,6 +21,7 @@ List, Mapping, MutableMapping, + NoReturn, Optional, Sequence, TypeVar, @@ -1010,12 +1011,12 @@ def validate_collection( def __iter__(self) -> "Database[_DocumentType]": return self - def __next__(self) -> "Database[_DocumentType]": + def __next__(self) -> NoReturn: raise TypeError("'Database' object is not iterable") next = __next__ - def __bool__(self) -> bool: + def __bool__(self) -> NoReturn: raise NotImplementedError( "Database objects do not implement truth " "value testing or bool(). Please compare " diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 8311aafa8f..60b69424a2 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -17,7 +17,7 @@ import sys import traceback from collections import abc -from typing import Any +from typing import Any, List, NoReturn from bson.son import SON from pymongo import ASCENDING @@ -180,7 +180,7 @@ def _check_command_response( raise OperationFailure(errmsg, code, response, max_wire_version) -def _raise_last_write_error(write_errors): +def _raise_last_write_error(write_errors: List[Any]) -> NoReturn: # If the last batch had multiple errors only report # the last error to emulate continue_on_error. error = write_errors[-1] @@ -189,7 +189,7 @@ def _raise_last_write_error(write_errors): raise WriteError(error.get("errmsg"), error.get("code"), error) -def _raise_write_concern_error(error): +def _raise_write_concern_error(error: Any) -> NoReturn: if "errInfo" in error and error["errInfo"].get("wtimeout"): # Make sure we raise WTimeoutError raise WTimeoutError(error.get("errmsg"), error.get("code"), error) diff --git a/pymongo/message.py b/pymongo/message.py index 58f71629d6..6aa8e4e7f9 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -24,7 +24,7 @@ import random import struct from io import BytesIO as _BytesIO -from typing import Any, Dict +from typing import Any, Dict, NoReturn import bson from bson import CodecOptions, _decode_selective, _dict_to_bson, _make_c_string, encode @@ -991,7 +991,7 @@ def max_split_size(self): return _MAX_SPLIT_SIZE_ENC -def _raise_document_too_large(operation, doc_size, max_size): +def _raise_document_too_large(operation: str, doc_size: int, max_size: int) -> NoReturn: """Internal helper for raising DocumentTooLarge.""" if operation == "insert": raise DocumentTooLarge( diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 4231db95ae..280818ce00 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -43,6 +43,7 @@ Generic, List, Mapping, + NoReturn, Optional, Sequence, Set, @@ -1975,7 +1976,7 @@ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: def __iter__(self) -> "MongoClient[_DocumentType]": return self - def __next__(self) -> None: + def __next__(self) -> NoReturn: raise TypeError("'MongoClient' object is not iterable") next = __next__ diff --git a/pymongo/pool.py b/pymongo/pool.py index c7bd21fc8f..1aaae4067f 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -24,7 +24,7 @@ import threading import time import weakref -from typing import Any +from typing import Any, NoReturn, Optional from bson import DEFAULT_CODEC_OPTIONS from bson.son import SON @@ -249,7 +249,9 @@ def _set_keepalive_times(sock): "foo".encode("idna") -def _raise_connection_failure(address, error, msg_prefix=None): +def _raise_connection_failure( + address: Any, error: Exception, msg_prefix: Optional[str] = None +) -> NoReturn: """Convert a socket.error to ConnectionFailure and raise it.""" host, port = address # If connecting to a Unix socket, port will be None. @@ -1593,7 +1595,7 @@ def _perished(self, sock_info): return False - def _raise_wait_queue_timeout(self): + def _raise_wait_queue_timeout(self) -> NoReturn: listeners = self.opts._event_listeners if self.enabled_for_cmap: listeners.publish_connection_check_out_failed( From da81c69644a3d8245c4b60a92d9ce39ff0a2e8ba Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 16 Mar 2022 15:26:45 -0500 Subject: [PATCH 0103/1588] PYTHON-3157 Update Release Documentation to Include Github Releases (#900) --- RELEASE.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/RELEASE.rst b/RELEASE.rst index 84b60d9b6a..ad18446a0f 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -89,3 +89,9 @@ Doing a Release 15. File a ticket for DOCSP highlighting changes in server version and Python version compatibility or the lack thereof, for example: https://jira.mongodb.org/browse/DOCSP-13536 + +16. Create a GitHub Release for the tag using + https://github.com/mongodb/mongo-python-driver/releases/new. + The title should be "PyMongo X.Y.Z", and the description should contain + a link to the release notes on the the community forum, e.g. + "Release notes: mongodb.com/community/forums/t/pymongo-4-0-2-released/150457." From 861d79537fee9dd80be02a4ffdbadc5897acfd7d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 22 Mar 2022 14:52:06 -0700 Subject: [PATCH 0104/1588] PYTHON-3180 Use server v3 toolchain in perf tests (#905) --- .evergreen/run-perf-tests.sh | 2 +- .evergreen/run-tests.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.evergreen/run-perf-tests.sh b/.evergreen/run-perf-tests.sh index d2a913c824..bc447a9569 100644 --- a/.evergreen/run-perf-tests.sh +++ b/.evergreen/run-perf-tests.sh @@ -13,7 +13,7 @@ cd .. export TEST_PATH="${PROJECT_DIRECTORY}/driver-performance-test-data" export OUTPUT_FILE="${PROJECT_DIRECTORY}/results.json" -MTCBIN=/opt/mongodbtoolchain/v2/bin +MTCBIN=/opt/mongodbtoolchain/v3/bin VIRTUALENV="$MTCBIN/virtualenv -p $MTCBIN/python3" $VIRTUALENV pyperftest diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 7b9d051bd7..ade267d2b1 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -69,8 +69,8 @@ if [ -z "$PYTHON_BINARY" ]; then # system python3 doesn't exist or exists but is older than 3.6. if is_python_36 "$(command -v python3)"; then PYTHON=$(command -v python3) - elif is_python_36 "$(command -v /opt/mongodbtoolchain/v2/bin/python3)"; then - PYTHON=$(command -v /opt/mongodbtoolchain/v2/bin/python3) + elif is_python_36 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then + PYTHON=$(command -v /opt/mongodbtoolchain/v3/bin/python3) else echo "Cannot test without python3.6+ installed!" fi From 9562a81903fd4520127137573292b47a24df1459 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 25 Mar 2022 23:47:18 +0000 Subject: [PATCH 0105/1588] PYTHON-3119 getMore helper should explicitly send inherited comment (#904) --- .gitignore | 1 + pymongo/aggregation.py | 1 + pymongo/collection.py | 7 +- pymongo/command_cursor.py | 5 + pymongo/cursor.py | 1 + pymongo/database.py | 1 + pymongo/message.py | 15 +- pymongo/mongo_client.py | 2 +- .../unified/change-streams.json | 179 ++++++++++++++++++ test/crud/unified/aggregate.json | 125 +++++++++++- .../unified/bulkWrite-updateMany-let.json | 2 +- .../crud/unified/bulkWrite-updateOne-let.json | 2 +- test/crud/unified/find-comment.json | 109 ++++++++++- test/test_client.py | 1 + 14 files changed, 440 insertions(+), 11 deletions(-) diff --git a/.gitignore b/.gitignore index de435d109e..f7ad6563ff 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,4 @@ pymongo.egg-info/ .tox mongocryptd.pid .idea/ +.nova/ diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 84ecffe5fb..62fe4bd055 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -174,6 +174,7 @@ def get_cursor(self, session, server, sock_info, read_preference): max_await_time_ms=self._max_await_time_ms, session=session, explicit_session=self._explicit_session, + comment=self._options.get("comment"), ) cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor diff --git a/pymongo/collection.py b/pymongo/collection.py index d0ebd9311a..3de1210522 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2180,7 +2180,12 @@ def _cmd(session, server, sock_info, read_preference): raise cursor = {"id": 0, "firstBatch": []} cmd_cursor = CommandCursor( - coll, cursor, sock_info.address, session=session, explicit_session=explicit_session + coll, + cursor, + sock_info.address, + session=session, + explicit_session=explicit_session, + comment=cmd.get("comment"), ) cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index 0bd99f0bbb..6f3f244419 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -43,6 +43,7 @@ def __init__( max_await_time_ms: Optional[int] = None, session: Optional["ClientSession"] = None, explicit_session: bool = False, + comment: Any = None, ) -> None: """Create a new command cursor.""" self.__sock_mgr: Any = None @@ -56,6 +57,7 @@ def __init__( self.__session = session self.__explicit_session = explicit_session self.__killed = self.__id == 0 + self.__comment = comment if self.__killed: self.__end_session(True) @@ -224,6 +226,7 @@ def _refresh(self): self.__max_await_time_ms, self.__sock_mgr, False, + self.__comment, ) ) else: # Cursor id is zero nothing else to return @@ -314,6 +317,7 @@ def __init__( max_await_time_ms: Optional[int] = None, session: Optional["ClientSession"] = None, explicit_session: bool = False, + comment: Any = None, ) -> None: """Create a new cursor / iterator over raw batches of BSON data. @@ -332,6 +336,7 @@ def __init__( max_await_time_ms, session, explicit_session, + comment, ) def _unpack_response( diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 9f6f0898b4..350cc255bb 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -1183,6 +1183,7 @@ def _refresh(self): self.__max_await_time_ms, self.__sock_mgr, self.__exhaust, + self.__comment, ) self.__send_message(g) diff --git a/pymongo/database.py b/pymongo/database.py index 17cba06b65..d3d1b274fd 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -780,6 +780,7 @@ def _list_collections(self, sock_info, session, read_preference, **kwargs): sock_info.address, session=tmp_session, explicit_session=session is not None, + comment=cmd.get("comment"), ) cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor diff --git a/pymongo/message.py b/pymongo/message.py index 6aa8e4e7f9..1fdf0ece35 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -222,13 +222,15 @@ def _gen_find_command( return cmd -def _gen_get_more_command(cursor_id, coll, batch_size, max_await_time_ms): +def _gen_get_more_command(cursor_id, coll, batch_size, max_await_time_ms, comment, sock_info): """Generate a getMore command document.""" cmd = SON([("getMore", cursor_id), ("collection", coll)]) if batch_size: cmd["batchSize"] = batch_size if max_await_time_ms is not None: cmd["maxTimeMS"] = max_await_time_ms + if comment is not None and sock_info.max_wire_version >= 9: + cmd["comment"] = comment return cmd @@ -421,6 +423,7 @@ class _GetMore(object): "sock_mgr", "_as_command", "exhaust", + "comment", ) name = "getMore" @@ -438,6 +441,7 @@ def __init__( max_await_time_ms, sock_mgr, exhaust, + comment, ): self.db = db self.coll = coll @@ -451,6 +455,7 @@ def __init__( self.sock_mgr = sock_mgr self._as_command = None self.exhaust = exhaust + self.comment = comment def namespace(self): return "%s.%s" % (self.db, self.coll) @@ -473,9 +478,13 @@ def as_command(self, sock_info): return self._as_command cmd = _gen_get_more_command( - self.cursor_id, self.coll, self.ntoreturn, self.max_await_time_ms + self.cursor_id, + self.coll, + self.ntoreturn, + self.max_await_time_ms, + self.comment, + sock_info, ) - if self.session: self.session._apply_to(cmd, False, self.read_preference, sock_info) sock_info.add_server_api(cmd) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 280818ce00..83295fccc9 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1763,7 +1763,7 @@ def list_databases( "firstBatch": res["databases"], "ns": "admin.$cmd", } - return CommandCursor(admin["$cmd"], cursor, None) + return CommandCursor(admin["$cmd"], cursor, None, comment=comment) def list_database_names( self, diff --git a/test/change_streams/unified/change-streams.json b/test/change_streams/unified/change-streams.json index 4aea9a4aa1..5fd2544ce0 100644 --- a/test/change_streams/unified/change-streams.json +++ b/test/change_streams/unified/change-streams.json @@ -247,6 +247,185 @@ ] } ] + }, + { + "description": "Test that comment is set on getMore", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": { + "key": "value" + } + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collection0", + "documents": [ + { + "_id": 1, + "a": 1 + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection0", + "comment": { + "key": "value" + } + }, + "commandName": "getMore", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test that comment is not set on getMore - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.3.99", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": "comment" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": "comment" + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collection0", + "documents": [ + { + "_id": 1, + "a": 1 + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection0", + "comment": { + "$$exists": false + } + }, + "commandName": "getMore", + "databaseName": "database0" + } + } + ] + } + ] } ] } diff --git a/test/crud/unified/aggregate.json b/test/crud/unified/aggregate.json index f6da8ff32f..0cbfb4e6e9 100644 --- a/test/crud/unified/aggregate.json +++ b/test/crud/unified/aggregate.json @@ -327,10 +327,131 @@ ] }, { - "description": "aggregate with comment does not set comment on getMore", + "description": "aggregate with comment sets comment on getMore", "runOnRequirements": [ { - "minServerVersion": "3.6.0" + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "batchSize": 2, + "comment": { + "content": "test" + } + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "cursor": { + "batchSize": 2 + }, + "comment": { + "content": "test" + } + }, + "commandName": "aggregate", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "content": "test" + } + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "content": "test" + } + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + } + ] + } + ] + }, + { + "description": "aggregate with comment does not set comment on getMore - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.3.99" } ], "operations": [ diff --git a/test/crud/unified/bulkWrite-updateMany-let.json b/test/crud/unified/bulkWrite-updateMany-let.json index 3cc8da4c53..fbeba1a607 100644 --- a/test/crud/unified/bulkWrite-updateMany-let.json +++ b/test/crud/unified/bulkWrite-updateMany-let.json @@ -142,7 +142,7 @@ "description": "BulkWrite updateMany with let option unsupported (server-side error)", "runOnRequirements": [ { - "minServerVersion": "3.6.0", + "minServerVersion": "4.2.0", "maxServerVersion": "4.9" } ], diff --git a/test/crud/unified/bulkWrite-updateOne-let.json b/test/crud/unified/bulkWrite-updateOne-let.json index 2a3e4f79dc..96783c782f 100644 --- a/test/crud/unified/bulkWrite-updateOne-let.json +++ b/test/crud/unified/bulkWrite-updateOne-let.json @@ -144,7 +144,7 @@ "description": "BulkWrite updateOne with let option unsupported (server-side error)", "runOnRequirements": [ { - "minServerVersion": "3.6.0", + "minServerVersion": "4.2.0", "maxServerVersion": "4.9" } ], diff --git a/test/crud/unified/find-comment.json b/test/crud/unified/find-comment.json index 6000bb0172..600a3723f1 100644 --- a/test/crud/unified/find-comment.json +++ b/test/crud/unified/find-comment.json @@ -195,10 +195,115 @@ ] }, { - "description": "find with comment does not set comment on getMore", + "description": "find with comment sets comment on getMore", "runOnRequirements": [ { - "minServerVersion": "3.6" + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2, + "comment": { + "key": "value" + } + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2, + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "key": "value" + } + } + } + } + ] + } + ] + }, + { + "description": "find with comment does not set comment on getMore - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.3.99" } ], "operations": [ diff --git a/test/test_client.py b/test/test_client.py index a0d6e22d53..7a66792873 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1344,6 +1344,7 @@ def test_stale_getmore(self): None, None, False, + None, ), unpack_res=Cursor(client.pymongo_test.collection)._unpack_response, address=("not-a-member", 27017), From e325b24b78e431cb889c5902d00b8f4af2c700c3 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 28 Mar 2022 12:18:26 -0500 Subject: [PATCH 0106/1588] PYTHON-3127 Snapshot Query Examples for the Manual (#907) --- test/test_examples.py | 87 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 86 insertions(+), 1 deletion(-) diff --git a/test/test_examples.py b/test/test_examples.py index f38e540507..b7b70463ac 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -21,7 +21,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import rs_client +from test.utils import rs_client, wait_until import pymongo from pymongo.errors import ConnectionFailure, OperationFailure @@ -1297,5 +1297,90 @@ def strptime(s): # End Versioned API Example 8 +class TestSnapshotQueryExamples(IntegrationTest): + @client_context.require_version_min(5, 0) + def test_snapshot_query(self): + client = self.client + + if not client_context.is_topology_type(["replicaset", "sharded"]): + self.skipTest("Must be a sharded or replicaset") + + self.addCleanup(client.drop_database, "pets") + db = client.pets + db.drop_collection("cats") + db.drop_collection("dogs") + db.cats.insert_one({"name": "Whiskers", "color": "white", "age": 10, "adoptable": True}) + db.dogs.insert_one({"name": "Pebbles", "color": "Brown", "age": 10, "adoptable": True}) + wait_until(lambda: self.check_for_snapshot(db.cats), "success") + wait_until(lambda: self.check_for_snapshot(db.dogs), "success") + + # Start Snapshot Query Example 1 + + db = client.pets + with client.start_session(snapshot=True) as s: + adoptablePetsCount = db.cats.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableCatsCount"}], session=s + ).next()["adoptableCatsCount"] + + adoptablePetsCount += db.dogs.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableDogsCount"}], session=s + ).next()["adoptableDogsCount"] + + print(adoptablePetsCount) + + # End Snapshot Query Example 1 + db = client.retail + self.addCleanup(client.drop_database, "retail") + db.drop_collection("sales") + + saleDate = datetime.datetime.now() + db.sales.insert_one({"shoeType": "boot", "price": 30, "saleDate": saleDate}) + wait_until(lambda: self.check_for_snapshot(db.sales), "success") + + # Start Snapshot Query Example 2 + db = client.retail + with client.start_session(snapshot=True) as s: + total = db.sales.aggregate( + [ + { + "$match": { + "$expr": { + "$gt": [ + "$saleDate", + { + "$dateSubtract": { + "startDate": "$$NOW", + "unit": "day", + "amount": 1, + } + }, + ] + } + } + }, + {"$count": "totalDailySales"}, + ], + session=s, + ).next()["totalDailySales"] + + # End Snapshot Query Example 2 + + def check_for_snapshot(self, collection): + """Wait for snapshot reads to become available to prevent this error: + [246:SnapshotUnavailable]: Unable to read from a snapshot due to pending collection catalog changes; please retry the operation. Snapshot timestamp is Timestamp(1646666892, 4). Collection minimum is Timestamp(1646666892, 5) (on localhost:27017, modern retry, attempt 1) + From https://github.com/mongodb/mongo-ruby-driver/commit/7c4117b58e3d12e237f7536f7521e18fc15f79ac + """ + with self.client.start_session(snapshot=True) as s: + try: + with collection.aggregate([], session=s): + pass + return True + except OperationFailure as e: + # Retry them as the server demands... + if e.code == 246: # SnapshotUnavailable + return False + raise + + if __name__ == "__main__": unittest.main() From 72d8900c3612fc2ab838c2afe60c0e2680fde741 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 28 Mar 2022 13:48:58 -0500 Subject: [PATCH 0107/1588] PYTHON-3058 Bump maxWireVersion for MongoDB 5.2 (#908) --- pymongo/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/common.py b/pymongo/common.py index 669e12ead7..9007bbdfd2 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -63,7 +63,7 @@ # What this version of PyMongo supports. MIN_SUPPORTED_SERVER_VERSION = "3.6" MIN_SUPPORTED_WIRE_VERSION = 6 -MAX_SUPPORTED_WIRE_VERSION = 14 +MAX_SUPPORTED_WIRE_VERSION = 15 # Frequency to call hello on servers, in seconds. HEARTBEAT_FREQUENCY = 10 From 75fa14d19bf3a592df2ff120dd412ad1fb565f02 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 28 Mar 2022 15:09:53 -0700 Subject: [PATCH 0108/1588] PYTHON-3084 MongoClient/Database/Collection should not implement Iterable (#909) --- pymongo/collection.py | 4 ++-- pymongo/database.py | 4 ++-- pymongo/mongo_client.py | 4 ++-- test/test_client.py | 26 +++++++++++++++++++++----- test/test_collection.py | 23 +++++++++++++++++++++-- test/test_database.py | 23 +++++++++++++++++++++-- 6 files changed, 69 insertions(+), 15 deletions(-) diff --git a/pymongo/collection.py b/pymongo/collection.py index 3de1210522..f382628aa8 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -3146,8 +3146,8 @@ def find_one_and_update( **kwargs, ) - def __iter__(self) -> "Collection[_DocumentType]": - return self + # See PYTHON-3084. + __iter__ = None def __next__(self) -> NoReturn: raise TypeError("'Collection' object is not iterable") diff --git a/pymongo/database.py b/pymongo/database.py index d3d1b274fd..b5770b0db9 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -1009,8 +1009,8 @@ def validate_collection( return result - def __iter__(self) -> "Database[_DocumentType]": - return self + # See PYTHON-3084. + __iter__ = None def __next__(self) -> NoReturn: raise TypeError("'Database' object is not iterable") diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 83295fccc9..8781cb1f01 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1973,8 +1973,8 @@ def __enter__(self) -> "MongoClient[_DocumentType]": def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() - def __iter__(self) -> "MongoClient[_DocumentType]": - return self + # See PYTHON-3084. + __iter__ = None def __next__(self) -> NoReturn: raise TypeError("'MongoClient' object is not iterable") diff --git a/test/test_client.py b/test/test_client.py index 7a66792873..5958ff6d52 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -26,7 +26,7 @@ import sys import threading import time -from typing import Type, no_type_check +from typing import Iterable, Type, no_type_check sys.path[0:0] = [""] @@ -210,10 +210,26 @@ def test_getattr(self): self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) def test_iteration(self): - def iterate(): - [a for a in self.client] - - self.assertRaises(TypeError, iterate) + client = self.client + if "PyPy" in sys.version: + msg = "'NoneType' object is not callable" + else: + msg = "'MongoClient' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in client: # type: ignore[misc] # error: "None" not callable [misc] + break + # Index fails + with self.assertRaises(TypeError): + _ = client[0] + # next fails + with self.assertRaisesRegex(TypeError, "'MongoClient' object is not iterable"): + _ = next(client) + # .next() fails + with self.assertRaisesRegex(TypeError, "'MongoClient' object is not iterable"): + _ = client.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(client, Iterable) def test_get_default_database(self): c = rs_or_single_client( diff --git a/test/test_collection.py b/test/test_collection.py index 47636b495f..6319321045 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -21,7 +21,7 @@ import sys from codecs import utf_8_decode # type: ignore from collections import defaultdict -from typing import no_type_check +from typing import Iterable, no_type_check from pymongo.database import Database @@ -124,7 +124,26 @@ def test_getattr(self): self.assertEqual(coll2.write_concern, coll4.write_concern) def test_iteration(self): - self.assertRaises(TypeError, next, self.db) + coll = self.db.coll + if "PyPy" in sys.version: + msg = "'NoneType' object is not callable" + else: + msg = "'Collection' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in coll: # type: ignore[misc] # error: "None" not callable [misc] + break + # Non-string indices will start failing in PyMongo 5. + self.assertEqual(coll[0].name, "coll.0") + self.assertEqual(coll[{}].name, "coll.{}") + # next fails + with self.assertRaisesRegex(TypeError, "'Collection' object is not iterable"): + _ = next(coll) + # .next() fails + with self.assertRaisesRegex(TypeError, "'Collection' object is not iterable"): + _ = coll.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(coll, Iterable) class TestCollection(IntegrationTest): diff --git a/test/test_database.py b/test/test_database.py index 8844046ad1..58cbe54335 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -16,7 +16,7 @@ import re import sys -from typing import Any, List, Mapping +from typing import Any, Iterable, List, Mapping sys.path[0:0] = [""] @@ -94,7 +94,26 @@ def test_getattr(self): self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) def test_iteration(self): - self.assertRaises(TypeError, next, self.client.pymongo_test) + db = self.client.pymongo_test + if "PyPy" in sys.version: + msg = "'NoneType' object is not callable" + else: + msg = "'Database' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in db: # type: ignore[misc] # error: "None" not callable [misc] + break + # Index fails + with self.assertRaises(TypeError): + _ = db[0] + # next fails + with self.assertRaisesRegex(TypeError, "'Database' object is not iterable"): + _ = next(db) + # .next() fails + with self.assertRaisesRegex(TypeError, "'Database' object is not iterable"): + _ = db.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(db, Iterable) class TestDatabase(IntegrationTest): From c15fce0b3c1dab22e4434365b697ad38d0f23c5a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 28 Mar 2022 15:23:00 -0700 Subject: [PATCH 0109/1588] PYTHON-3138 copydb was removed in MongoDB 4.2 (#910) --- doc/examples/copydb.rst | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/doc/examples/copydb.rst b/doc/examples/copydb.rst index 5cf5c66ded..27f1912c6e 100644 --- a/doc/examples/copydb.rst +++ b/doc/examples/copydb.rst @@ -1,8 +1,37 @@ Copying a Database ================== -To copy a database within a single mongod process, or between mongod -servers, simply connect to the target mongod and use the +MongoDB >= 4.2 +-------------- + +Starting in MongoDB version 4.2, the server removes the deprecated ``copydb`` command. +As an alternative, users can use ``mongodump`` and ``mongorestore`` (with the ``mongorestore`` +options ``--nsFrom`` and ``--nsTo``). + +For example, to copy the ``test`` database from a local instance running on the +default port 27017 to the ``examples`` database on the same instance, you can: + +#. Use ``mongodump`` to dump the test database to an archive ``mongodump-test-db``:: + + mongodump --archive="mongodump-test-db" --db=test + +#. Use ``mongorestore`` with ``--nsFrom`` and ``--nsTo`` to restore (with database name change) + from the archive:: + + mongorestore --archive="mongodump-test-db" --nsFrom='test.*' --nsTo='examples.*' + +Include additional options as necessary, such as to specify the uri or host, username, +password and authentication database. + +For more info about using ``mongodump`` and ``mongorestore`` see the `Copy a Database`_ example +in the official ``mongodump`` documentation. + +MongoDB <= 4.0 +-------------- + +When using MongoDB <= 4.0, it is possible to use the deprecated ``copydb`` command +to copy a database. To copy a database within a single ``mongod`` process, or +between ``mongod`` servers, connect to the target ``mongod`` and use the :meth:`~pymongo.database.Database.command` method:: >>> from pymongo import MongoClient @@ -39,3 +68,6 @@ but it has been removed. .. _copyDatabase function in the mongo shell: http://docs.mongodb.org/manual/reference/method/db.copyDatabase/ + +.. _Copy a Database: + https://www.mongodb.com/docs/database-tools/mongodump/#std-label-mongodump-example-copy-clone-database From d8c2b315b0ae9fb7e260f0224293605d249c460f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 29 Mar 2022 14:59:33 -0500 Subject: [PATCH 0110/1588] PYTHON-3185 Pre-Commit Needs an Upgrade (#911) --- .pre-commit-config.yaml | 4 ++-- doc/changelog.rst | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2fc5100787..8b6671d41d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,7 +17,7 @@ repos: exclude_types: [json] - repo: https://github.com/psf/black - rev: 22.1.0 + rev: 22.3.0 hooks: - id: black files: \.py$ @@ -51,7 +51,7 @@ repos: args: ["--severity=warning"] - repo: https://github.com/sirosen/check-jsonschema - rev: 0.11.0 + rev: 0.14.1 hooks: - id: check-jsonschema name: "Check GitHub Workflows" diff --git a/doc/changelog.rst b/doc/changelog.rst index d326c24b32..0bacb1bb79 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -2894,7 +2894,7 @@ highlights is `here - added support for :class:`~pymongo.cursor.Cursor.max_scan`. - raise :class:`~gridfs.errors.FileExists` exception when creating a duplicate GridFS file. -- use `y2038 `_ for time handling in +- use `y2038 `_ for time handling in the C extension - eliminates 2038 problems when extension is installed. - added `sort` parameter to From a4bba9dd5c60842c5cd69900a552f7c1288e5149 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 29 Mar 2022 13:45:27 -0700 Subject: [PATCH 0111/1588] Revert "PYTHON-2970 Prioritize electionId over setVersion for stale primary check (#845)" This reverts commit 225d131c2d3f6f0b4c46c130abb3e1452010ad40. --- doc/changelog.rst | 41 ++--- pymongo/topology_description.py | 29 ++-- .../rs/electionId_precedence_setVersion.json | 92 ----------- .../rs/null_election_id.json | 30 ++-- .../rs/secondary_ignore_ok_0.json | 2 +- .../rs/set_version_can_rollback.json | 149 ------------------ ...tversion_equal_max_without_electionid.json | 84 ---------- ...on_greaterthan_max_without_electionid.json | 84 ---------- .../rs/setversion_without_electionid.json | 12 +- .../rs/use_setversion_without_electionid.json | 32 ++-- 10 files changed, 67 insertions(+), 488 deletions(-) delete mode 100644 test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json delete mode 100644 test/discovery_and_monitoring/rs/set_version_can_rollback.json delete mode 100644 test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json delete mode 100644 test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 0bacb1bb79..d263d4534e 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,40 +4,33 @@ Changelog Changes in Version 4.1 ---------------------- +.. warning:: PyMongo 4.1 drops support for Python 3.6.0 and 3.6.1, Python 3.6.2+ is now required. + PyMongo 4.1 brings a number of improvements including: -- :meth:`pymongo.collection.Collection.update_one`, - :meth:`pymongo.collection.Collection.update_many`, - :meth:`pymongo.collection.Collection.delete_one`, - :meth:`pymongo.collection.Collection.delete_many`, - :meth:`pymongo.collection.Collection.aggregate`, - :meth:`pymongo.collection.Collection.find_one_and_delete`, - :meth:`pymongo.collection.Collection.find_one_and_replace`, - :meth:`pymongo.collection.Collection.find_one_and_update`, - :meth:`pymongo.collection.Collection.find`, - and :meth:`pymongo.collection.Collection.replace_one `all support a new - keyword argument ``let`` which is a map of parameter names and values. +- Added support for the ``let`` parameter to + :meth:`~pymongo.collection.Collection.update_one`, + :meth:`~pymongo.collection.Collection.update_many`, + :meth:`~pymongo.collection.Collection.delete_one`, + :meth:`~pymongo.collection.Collection.delete_many`, + :meth:`~pymongo.collection.Collection.replace_one`, + :meth:`~pymongo.collection.Collection.aggregate`, + :meth:`~pymongo.collection.Collection.find_one_and_delete`, + :meth:`~pymongo.collection.Collection.find_one_and_replace`, + :meth:`~pymongo.collection.Collection.find_one_and_update`, + :meth:`~pymongo.collection.Collection.find`, + :meth:`~pymongo.collection.Collection.find_one`, + and :meth:`~pymongo.collection.Collection.bulk_write`. + ``let`` is a map of parameter names and values. Parameters can then be accessed as variables in an aggregate expression context. - :meth:`~pymongo.collection.Collection.aggregate` now supports $merge and $out executing on secondaries on MongoDB >=5.0. aggregate() now always obeys the collection's :attr:`read_preference` on MongoDB >= 5.0. -- :meth:`gridfs.GridOut.seek` now returns the new position in the file, to +- :meth:`gridfs.grid_file.GridOut.seek` now returns the new position in the file, to conform to the behavior of :meth:`io.IOBase.seek`. -Breaking Changes in 4.1 -....................... -- Removed support for Python 3.6.0 and 3.6.1, Python 3.6.2+ is now required. - -Bug fixes -......... - -- Fixed a bug where the client could be unable to discover the new primary - after a simultaneous replica set election and reconfig (`PYTHON-2970`_). - -.. _PYTHON-2970: https://jira.mongodb.org/browse/PYTHON-2970 - Issues Resolved ............... diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 9f718376ef..b3dd60680f 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -17,7 +17,6 @@ from random import sample from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple -from bson.min_key import MinKey from bson.objectid import ObjectId from pymongo import common from pymongo.errors import ConfigurationError @@ -532,16 +531,24 @@ def _update_rs_from_primary( sds.pop(server_description.address) return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) - new_election_tuple = server_description.election_id, server_description.set_version - max_election_tuple = max_election_id, max_set_version - new_election_safe = tuple(MinKey() if i is None else i for i in new_election_tuple) - max_election_safe = tuple(MinKey() if i is None else i for i in max_election_tuple) - if new_election_safe >= max_election_safe: - max_election_id, max_set_version = new_election_tuple - else: - # Stale primary, set to type Unknown. - sds[server_description.address] = server_description.to_unknown() - return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + max_election_tuple = max_set_version, max_election_id + if None not in server_description.election_tuple: + if ( + None not in max_election_tuple + and max_election_tuple > server_description.election_tuple + ): + + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown() + return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) + + max_election_id = server_description.election_id + + if server_description.set_version is not None and ( + max_set_version is None or server_description.set_version > max_set_version + ): + + max_set_version = server_description.set_version # We've heard from the primary. Is it the same primary as before? for server in sds.values(): diff --git a/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json deleted file mode 100644 index a7b49e2b97..0000000000 --- a/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "description": "ElectionId is considered higher precedence than setVersion", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000001" - }, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ], - [ - "b:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 2, - "electionId": { - "$oid": "000000000000000000000001" - }, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ], - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000002" - }, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000002" - } - }, - "b:27017": { - "type": "Unknown", - "setName": null, - "setVersion": null, - "electionId": null - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs", - "maxSetVersion": 1, - "maxElectionId": { - "$oid": "000000000000000000000002" - } - } - } - ] -} diff --git a/test/discovery_and_monitoring/rs/null_election_id.json b/test/discovery_and_monitoring/rs/null_election_id.json index 8eb519595a..62120e8448 100644 --- a/test/discovery_and_monitoring/rs/null_election_id.json +++ b/test/discovery_and_monitoring/rs/null_election_id.json @@ -123,18 +123,15 @@ "outcome": { "servers": { "a:27017": { - "type": "Unknown", - "setName": null, - "setVersion": null, - "electionId": null - }, - "b:27017": { "type": "RSPrimary", "setName": "rs", "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000002" - } + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null }, "c:27017": { "type": "Unknown", @@ -177,18 +174,15 @@ "outcome": { "servers": { "a:27017": { - "type": "Unknown", - "setName": null, - "setVersion": null, - "electionId": null - }, - "b:27017": { "type": "RSPrimary", "setName": "rs", "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000002" - } + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null }, "c:27017": { "type": "Unknown", diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json index ee9519930b..4c1cb011a5 100644 --- a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json @@ -1,5 +1,5 @@ { - "description": "Secondary ignored when ok is zero", + "description": "New primary", "uri": "mongodb://a,b/?replicaSet=rs", "phases": [ { diff --git a/test/discovery_and_monitoring/rs/set_version_can_rollback.json b/test/discovery_and_monitoring/rs/set_version_can_rollback.json deleted file mode 100644 index 28ecbeefca..0000000000 --- a/test/discovery_and_monitoring/rs/set_version_can_rollback.json +++ /dev/null @@ -1,149 +0,0 @@ -{ - "description": "Set version rolls back after new primary with higher election Id", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "responses": [ - [ - "a:27017", - { - "ok": 1, - "hello": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 2, - "electionId": { - "$oid": "000000000000000000000001" - }, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 2, - "electionId": { - "$oid": "000000000000000000000001" - } - }, - "b:27017": { - "type": "Unknown", - "setName": null, - "electionId": null - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs", - "maxSetVersion": 2, - "maxElectionId": { - "$oid": "000000000000000000000001" - } - } - }, - { - "_comment": "Response from new primary with newer election Id", - "responses": [ - [ - "b:27017", - { - "ok": 1, - "hello": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000002" - }, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "setName": null, - "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000002" - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs", - "maxSetVersion": 1, - "maxElectionId": { - "$oid": "000000000000000000000002" - } - } - }, - { - "_comment": "Response from stale primary", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "hello": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 2, - "electionId": { - "$oid": "000000000000000000000001" - }, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "setName": null, - "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000002" - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs", - "maxSetVersion": 1, - "maxElectionId": { - "$oid": "000000000000000000000002" - } - } - } - ] -} diff --git a/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json deleted file mode 100644 index 91e84d4fa0..0000000000 --- a/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "description": "setVersion version that is equal is treated the same as greater than if there is no electionId", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 1, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - }, - "b:27017": { - "type": "Unknown", - "setName": null, - "electionId": null - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs", - "maxSetVersion": 1 - } - }, - { - "responses": [ - [ - "b:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 1, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "setName": null, - "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs", - "maxSetVersion": 1 - } - } - ] -} diff --git a/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json deleted file mode 100644 index b15fd5c1a7..0000000000 --- a/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "description": "setVersion that is greater than maxSetVersion is used if there is no electionId", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 1, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - }, - "b:27017": { - "type": "Unknown", - "setName": null, - "electionId": null - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs", - "maxSetVersion": 1 - } - }, - { - "responses": [ - [ - "b:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 2, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "setName": null, - "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 2, - "electionId": null - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs", - "maxSetVersion": 2 - } - } - ] -} diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_without_electionid.json index f59c162ae1..2f68287f1d 100644 --- a/test/discovery_and_monitoring/rs/setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid.json @@ -1,5 +1,5 @@ { - "description": "setVersion that is less than maxSetVersion is ignored if there is no electionId", + "description": "setVersion is ignored if there is no electionId", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -63,14 +63,14 @@ "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 2, + "type": "Unknown", + "setName": null, "electionId": null }, "b:27017": { - "type": "Unknown", - "setName": null, + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, "electionId": null } }, diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json index 6dd753d5d8..421ff57c8d 100644 --- a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json @@ -71,23 +71,20 @@ "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000001" - } - }, - "b:27017": { "type": "Unknown", "setName": null, "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2 } }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, "setName": "rs", - "maxSetVersion": 1, + "maxSetVersion": 2, "maxElectionId": { "$oid": "000000000000000000000001" } @@ -118,25 +115,22 @@ "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000002" - } - }, - "b:27017": { "type": "Unknown", "setName": null, "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2 } }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, "setName": "rs", - "maxSetVersion": 1, + "maxSetVersion": 2, "maxElectionId": { - "$oid": "000000000000000000000002" + "$oid": "000000000000000000000001" } } } From 1d30802f8c7f997d1de482e537f9d88bc85655e3 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 29 Mar 2022 18:03:42 -0500 Subject: [PATCH 0112/1588] PYTHON-3074 Add documentation for type hints (#906) --- doc/changelog.rst | 1 + doc/examples/index.rst | 1 + doc/examples/type_hints.rst | 243 ++++++++++++++++++++++++++++ doc/index.rst | 3 + pymongo/common.py | 10 +- test/mypy_fails/insert_many_dict.py | 2 +- test/mypy_fails/insert_one_list.py | 2 +- test/test_mypy.py | 3 + 8 files changed, 262 insertions(+), 3 deletions(-) create mode 100644 doc/examples/type_hints.rst diff --git a/doc/changelog.rst b/doc/changelog.rst index d263d4534e..ab895fad51 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -8,6 +8,7 @@ Changes in Version 4.1 PyMongo 4.1 brings a number of improvements including: +- Type Hinting support (formerly provided by ``pymongo-stubs``). See :doc:`examples/type_hints` for more information. - Added support for the ``let`` parameter to :meth:`~pymongo.collection.Collection.update_one`, :meth:`~pymongo.collection.Collection.update_many`, diff --git a/doc/examples/index.rst b/doc/examples/index.rst index f8828cdfd7..6cdeafc201 100644 --- a/doc/examples/index.rst +++ b/doc/examples/index.rst @@ -31,5 +31,6 @@ MongoDB, you can start it like so: server_selection tailable tls + type_hints encryption uuid diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst new file mode 100644 index 0000000000..029761bc75 --- /dev/null +++ b/doc/examples/type_hints.rst @@ -0,0 +1,243 @@ + +.. _type_hints-example: + +Type Hints +=========== + +As of version 4.1, PyMongo ships with `type hints`_. With type hints, Python +type checkers can easily find bugs before they reveal themselves in your code. + +If your IDE is configured to use type hints, +it can suggest more appropriate completions and highlight errors in your code. +Some examples include `PyCharm`_, `Sublime Text`_, and `Visual Studio Code`_. + +You can also use the `mypy`_ tool from your command line or in Continuous Integration tests. + +All of the public APIs in PyMongo are fully type hinted, and +several of them support generic parameters for the +type of document object returned when decoding BSON documents. + +Due to `limitations in mypy`_, the default +values for generic document types are not yet provided (they will eventually be ``Dict[str, any]``). + +For a larger set of examples that use types, see the PyMongo `test_mypy module`_. + +If you would like to opt out of using the provided types, add the following to +your `mypy config`_: :: + + [mypy-pymongo] + follow_imports = False + + +Basic Usage +----------- + +Note that a type for :class:`~pymongo.mongo_client.MongoClient` must be specified. Here we use the +default, unspecified document type: + +.. doctest:: + + >>> from pymongo import MongoClient + >>> client: MongoClient = MongoClient() + >>> collection = client.test.test + >>> inserted = collection.insert_one({"x": 1, "tags": ["dog", "cat"]}) + >>> retrieved = collection.find_one({"x": 1}) + >>> assert isinstance(retrieved, dict) + +For a more accurate typing for document type you can use: + +.. doctest:: + + >>> from typing import Any, Dict + >>> from pymongo import MongoClient + >>> client: MongoClient[Dict[str, Any]] = MongoClient() + >>> collection = client.test.test + >>> inserted = collection.insert_one({"x": 1, "tags": ["dog", "cat"]}) + >>> retrieved = collection.find_one({"x": 1}) + >>> assert isinstance(retrieved, dict) + +Typed Client +------------ + +:class:`~pymongo.mongo_client.MongoClient` is generic on the document type used to decode BSON documents. + +You can specify a :class:`~bson.raw_bson.RawBSONDocument` document type: + +.. doctest:: + + >>> from pymongo import MongoClient + >>> from bson.raw_bson import RawBSONDocument + >>> client = MongoClient(document_class=RawBSONDocument) + >>> collection = client.test.test + >>> inserted = collection.insert_one({"x": 1, "tags": ["dog", "cat"]}) + >>> result = collection.find_one({"x": 1}) + >>> assert isinstance(result, RawBSONDocument) + +Subclasses of :py:class:`collections.abc.Mapping` can also be used, such as :class:`~bson.son.SON`: + +.. doctest:: + + >>> from bson import SON + >>> from pymongo import MongoClient + >>> client = MongoClient(document_class=SON[str, int]) + >>> collection = client.test.test + >>> inserted = collection.insert_one({"x": 1, "y": 2 }) + >>> result = collection.find_one({"x": 1}) + >>> assert result is not None + >>> assert result["x"] == 1 + +Note that when using :class:`~bson.son.SON`, the key and value types must be given, e.g. ``SON[str, Any]``. + + +Typed Collection +---------------- + +You can use :py:class:`~typing.TypedDict` when using a well-defined schema for the data in a :class:`~pymongo.collection.Collection`: + +.. doctest:: + + >>> from typing import TypedDict + >>> from pymongo import MongoClient, Collection + >>> class Movie(TypedDict): + ... name: str + ... year: int + ... + >>> client: MongoClient = MongoClient() + >>> collection: Collection[Movie] = client.test.test + >>> inserted = collection.insert_one({"name": "Jurassic Park", "year": 1993 }) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> assert result["year"] == 1993 + +Typed Database +-------------- + +While less common, you could specify that the documents in an entire database +match a well-defined shema using :py:class:`~typing.TypedDict`. + + +.. doctest:: + + >>> from typing import TypedDict + >>> from pymongo import MongoClient, Database + >>> class Movie(TypedDict): + ... name: str + ... year: int + ... + >>> client: MongoClient = MongoClient() + >>> db: Database[Movie] = client.test + >>> collection = db.test + >>> inserted = collection.insert_one({"name": "Jurassic Park", "year": 1993 }) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> assert result["year"] == 1993 + +Typed Command +------------- +When using the :meth:`~pymongo.database.Database.command`, you can specify the document type by providing a custom :class:`~bson.codec_options.CodecOptions`: + +.. doctest:: + + >>> from pymongo import MongoClient + >>> from bson.raw_bson import RawBSONDocument + >>> from bson import CodecOptions + >>> client: MongoClient = MongoClient() + >>> options = CodecOptions(RawBSONDocument) + >>> result = client.admin.command("ping", codec_options=options) + >>> assert isinstance(result, RawBSONDocument) + +Custom :py:class:`collections.abc.Mapping` subclasses and :py:class:`~typing.TypedDict` are also supported. +For :py:class:`~typing.TypedDict`, use the form: ``options: CodecOptions[MyTypedDict] = CodecOptions(...)``. + +Typed BSON Decoding +------------------- +You can specify the document type returned by :mod:`bson` decoding functions by providing :class:`~bson.codec_options.CodecOptions`: + +.. doctest:: + + >>> from typing import Any, Dict + >>> from bson import CodecOptions, encode, decode + >>> class MyDict(Dict[str, Any]): + ... def foo(self): + ... return "bar" + ... + >>> options = CodecOptions(document_class=MyDict) + >>> doc = {"x": 1, "y": 2 } + >>> bsonbytes = encode(doc, codec_options=options) + >>> rt_document = decode(bsonbytes, codec_options=options) + >>> assert rt_document.foo() == "bar" + +:class:`~bson.raw_bson.RawBSONDocument` and :py:class:`~typing.TypedDict` are also supported. +For :py:class:`~typing.TypedDict`, use the form: ``options: CodecOptions[MyTypedDict] = CodecOptions(...)``. + + +Troubleshooting +--------------- + +Client Type Annotation +~~~~~~~~~~~~~~~~~~~~~~ +If you forget to add a type annotation for a :class:`~pymongo.mongo_client.MongoClient` object you may get the followig ``mypy`` error:: + + from pymongo import MongoClient + client = MongoClient() # error: Need type annotation for "client" + +The solution is to annotate the type as ``client: MongoClient`` or ``client: MongoClient[Dict[str, Any]]``. See `Basic Usage`_. + +Incompatible Types +~~~~~~~~~~~~~~~~~~ +If you use the generic form of :class:`~pymongo.mongo_client.MongoClient` you +may encounter a ``mypy`` error like:: + + from pymongo import MongoClient + + client: MongoClient = MongoClient() + client.test.test.insert_many( + {"a": 1} + ) # error: Dict entry 0 has incompatible type "str": "int"; + # expected "Mapping[str, Any]": "int" + + +The solution is to use ``client: MongoClient[Dict[str, Any]]`` as used in +`Basic Usage`_ . + +Actual Type Errors +~~~~~~~~~~~~~~~~~~ + +Other times ``mypy`` will catch an actual error, like the following code:: + + from pymongo import MongoClient + from typing import Mapping + client: MongoClient = MongoClient() + client.test.test.insert_one( + [{}] + ) # error: Argument 1 to "insert_one" of "Collection" has + # incompatible type "List[Dict[, ]]"; + # expected "Mapping[str, Any]" + +In this case the solution is to use ``insert_one({})``, passing a document instead of a list. + +Another example is trying to set a value on a :class:`~bson.raw_bson.RawBSONDocument`, which is read-only.:: + + from bson.raw_bson import RawBSONDocument + from pymongo import MongoClient + + client = MongoClient(document_class=RawBSONDocument) + coll = client.test.test + doc = {"my": "doc"} + coll.insert_one(doc) + retreived = coll.find_one({"_id": doc["_id"]}) + assert retreived is not None + assert len(retreived.raw) > 0 + retreived[ + "foo" + ] = "bar" # error: Unsupported target for indexed assignment + # ("RawBSONDocument") [index] + +.. _PyCharm: https://www.jetbrains.com/help/pycharm/type-hinting-in-product.html +.. _Visual Studio Code: https://code.visualstudio.com/docs/languages/python +.. _Sublime Text: https://github.com/sublimelsp/LSP-pyright +.. _type hints: https://docs.python.org/3/library/typing.html +.. _mypy: https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html +.. _limitations in mypy: https://github.com/python/mypy/issues/3737 +.. _mypy config: https://mypy.readthedocs.io/en/stable/config_file.html +.. _test_mypy module: https://github.com/mongodb/mongo-python-driver/blob/master/test/test_mypy.py diff --git a/doc/index.rst b/doc/index.rst index 8fd357b4cd..b6e510ad33 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -28,6 +28,9 @@ everything you need to know to use **PyMongo**. :doc:`examples/encryption` Using PyMongo with client side encryption. +:doc:`examples/type_hints` + Using PyMongo with type hints. + :doc:`faq` Some questions that come up often. diff --git a/pymongo/common.py b/pymongo/common.py index 9007bbdfd2..5a6ffbd369 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -448,7 +448,15 @@ def validate_document_class( option: str, value: Any ) -> Union[Type[MutableMapping], Type[RawBSONDocument]]: """Validate the document_class option.""" - if not issubclass(value, (abc.MutableMapping, RawBSONDocument)): + # issubclass can raise TypeError for generic aliases like SON[str, Any]. + # In that case we can use the base class for the comparison. + is_mapping = False + try: + is_mapping = issubclass(value, abc.MutableMapping) + except TypeError: + if hasattr(value, "__origin__"): + is_mapping = issubclass(value.__origin__, abc.MutableMapping) + if not is_mapping and not issubclass(value, RawBSONDocument): raise TypeError( "%s must be dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or a " diff --git a/test/mypy_fails/insert_many_dict.py b/test/mypy_fails/insert_many_dict.py index 6e8acb67b4..7cbabc28f0 100644 --- a/test/mypy_fails/insert_many_dict.py +++ b/test/mypy_fails/insert_many_dict.py @@ -1,6 +1,6 @@ from pymongo import MongoClient -client = MongoClient() +client: MongoClient = MongoClient() client.test.test.insert_many( {"a": 1} ) # error: Dict entry 0 has incompatible type "str": "int"; expected "Mapping[str, Any]": "int" diff --git a/test/mypy_fails/insert_one_list.py b/test/mypy_fails/insert_one_list.py index 7a26a3ff79..12079ffc6d 100644 --- a/test/mypy_fails/insert_one_list.py +++ b/test/mypy_fails/insert_one_list.py @@ -1,6 +1,6 @@ from pymongo import MongoClient -client = MongoClient() +client: MongoClient = MongoClient() client.test.test.insert_one( [{}] ) # error: Argument 1 to "insert_one" of "Collection" has incompatible type "List[Dict[, ]]"; expected "Mapping[str, Any]" diff --git a/test/test_mypy.py b/test/test_mypy.py index 6cf3eb2c87..12a6cffbe6 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -309,6 +309,9 @@ def test_son_document_type(self) -> None: assert retreived is not None retreived["a"] = 1 + def test_son_document_type_runtime(self) -> None: + client = MongoClient(document_class=SON[str, Any], connect=False) + class TestCommandDocumentType(unittest.TestCase): @only_type_check From c58950a8d4fd3d1238f08944d5d3e04bde6f1e46 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 30 Mar 2022 14:29:46 -0700 Subject: [PATCH 0113/1588] PYTHON-3186 Avoid SDAM heartbeat timeouts on AWS Lambda (#912) Poll monitor socket with timeout=0 one last time after timeout expires. This avoids heartbeat timeouts and connection churn on Lambda and other FaaS envs. --- pymongo/network.py | 11 +++++- test/__init__.py | 23 +++++++++-- test/sigstop_sigcont.py | 85 +++++++++++++++++++++++++++++++++++++++++ test/test_client.py | 34 +++++++++++++++++ 4 files changed, 148 insertions(+), 5 deletions(-) create mode 100644 test/sigstop_sigcont.py diff --git a/pymongo/network.py b/pymongo/network.py index 01dca0b835..df08158b2f 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -244,6 +244,7 @@ def wait_for_read(sock_info, deadline): # Only Monitor connections can be cancelled. if context: sock = sock_info.sock + timed_out = False while True: # SSLSocket can have buffered data which won't be caught by select. if hasattr(sock, "pending") and sock.pending() > 0: @@ -252,7 +253,13 @@ def wait_for_read(sock_info, deadline): # Wait up to 500ms for the socket to become readable and then # check for cancellation. if deadline: - timeout = max(min(deadline - time.monotonic(), _POLL_TIMEOUT), 0.001) + remaining = deadline - time.monotonic() + # When the timeout has expired perform one final check to + # see if the socket is readable. This helps avoid spurious + # timeouts on AWS Lambda and other FaaS environments. + if remaining <= 0: + timed_out = True + timeout = max(min(remaining, _POLL_TIMEOUT), 0) else: timeout = _POLL_TIMEOUT readable = sock_info.socket_checker.select(sock, read=True, timeout=timeout) @@ -260,7 +267,7 @@ def wait_for_read(sock_info, deadline): raise _OperationCancelled("hello cancelled") if readable: return - if deadline and time.monotonic() > deadline: + if timed_out: raise socket.timeout("timed out") diff --git a/test/__init__.py b/test/__init__.py index c432b26098..ee6e3ca509 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -44,6 +44,7 @@ from test.version import Version from typing import Dict, no_type_check from unittest import SkipTest +from urllib.parse import quote_plus import pymongo import pymongo.errors @@ -279,6 +280,22 @@ def client_options(self): opts["replicaSet"] = self.replica_set_name return opts + @property + def uri(self): + """Return the MongoClient URI for creating a duplicate client.""" + opts = client_context.default_client_options.copy() + opts_parts = [] + for opt, val in opts.items(): + strval = str(val) + if isinstance(val, bool): + strval = strval.lower() + opts_parts.append(f"{opt}={quote_plus(strval)}") + opts_part = "&".join(opts_parts) + auth_part = "" + if client_context.auth_enabled: + auth_part = f"{quote_plus(db_user)}:{quote_plus(db_pwd)}@" + return f"mongodb://{auth_part}{self.pair}/?{opts_part}" + @property def hello(self): if not self._hello: @@ -359,7 +376,7 @@ def _init_client(self): username=db_user, password=db_pwd, replicaSet=self.replica_set_name, - **self.default_client_options + **self.default_client_options, ) # May not have this if OperationFailure was raised earlier. @@ -387,7 +404,7 @@ def _init_client(self): username=db_user, password=db_pwd, replicaSet=self.replica_set_name, - **self.default_client_options + **self.default_client_options, ) else: self.client = pymongo.MongoClient( @@ -490,7 +507,7 @@ def _check_user_provided(self): username=db_user, password=db_pwd, serverSelectionTimeoutMS=100, - **self.default_client_options + **self.default_client_options, ) try: diff --git a/test/sigstop_sigcont.py b/test/sigstop_sigcont.py new file mode 100644 index 0000000000..ef4730f0bf --- /dev/null +++ b/test/sigstop_sigcont.py @@ -0,0 +1,85 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Used by test_client.TestClient.test_sigstop_sigcont.""" + +import logging +import sys + +sys.path[0:0] = [""] + +from pymongo import monitoring +from pymongo.mongo_client import MongoClient + + +class HeartbeatLogger(monitoring.ServerHeartbeatListener): + """Log events until the listener is closed.""" + + def __init__(self): + self.closed = False + + def close(self): + self.closed = True + + def started(self, event: monitoring.ServerHeartbeatStartedEvent) -> None: + if self.closed: + return + logging.info("%s", event) + + def succeeded(self, event: monitoring.ServerHeartbeatSucceededEvent) -> None: + if self.closed: + return + logging.info("%s", event) + + def failed(self, event: monitoring.ServerHeartbeatFailedEvent) -> None: + if self.closed: + return + logging.warning("%s", event) + + +def main(uri: str) -> None: + heartbeat_logger = HeartbeatLogger() + client = MongoClient( + uri, + event_listeners=[heartbeat_logger], + heartbeatFrequencyMS=500, + connectTimeoutMS=500, + ) + client.admin.command("ping") + logging.info("TEST STARTED") + # test_sigstop_sigcont will SIGSTOP and SIGCONT this process in this loop. + while True: + try: + data = input('Type "q" to quit: ') + except EOFError: + break + if data == "q": + break + client.admin.command("ping") + logging.info("TEST COMPLETED") + heartbeat_logger.close() + client.close() + + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("unknown or missing options") + print(f"usage: python3 {sys.argv[0]} 'mongodb://localhost'") + exit(1) + + # Enable logs in this format: + # 2022-03-30 12:40:55,582 INFO + FORMAT = "%(asctime)s %(levelname)s %(message)s" + logging.basicConfig(format=FORMAT, level=logging.INFO) + main(sys.argv[1]) diff --git a/test/test_client.py b/test/test_client.py index 5958ff6d52..40f276a9db 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -23,6 +23,7 @@ import signal import socket import struct +import subprocess import sys import threading import time @@ -1688,6 +1689,39 @@ def test_srv_max_hosts_kwarg(self): ) self.assertEqual(len(client.topology_description.server_descriptions()), 2) + @unittest.skipIf( + client_context.load_balancer or client_context.serverless, + "loadBalanced clients do not run SDAM", + ) + @unittest.skipIf(sys.platform == "win32", "Windows does not support SIGSTOP") + def test_sigstop_sigcont(self): + test_dir = os.path.dirname(os.path.realpath(__file__)) + script = os.path.join(test_dir, "sigstop_sigcont.py") + p = subprocess.Popen( + [sys.executable, script, client_context.uri], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + self.addCleanup(p.wait, timeout=1) + self.addCleanup(p.kill) + time.sleep(1) + # Stop the child, sleep for twice the streaming timeout + # (heartbeatFrequencyMS + connectTimeoutMS), and restart. + os.kill(p.pid, signal.SIGSTOP) + time.sleep(2) + os.kill(p.pid, signal.SIGCONT) + time.sleep(0.5) + # Tell the script to exit gracefully. + outs, _ = p.communicate(input=b"q\n", timeout=10) + self.assertTrue(outs) + log_output = outs.decode("utf-8") + self.assertIn("TEST STARTED", log_output) + self.assertIn("ServerHeartbeatStartedEvent", log_output) + self.assertIn("ServerHeartbeatSucceededEvent", log_output) + self.assertIn("TEST COMPLETED", log_output) + self.assertNotIn("ServerHeartbeatFailedEvent", log_output) + class TestExhaustCursor(IntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" From 3179eab91d1bcb6d54527444114ab810ca772c9f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 30 Mar 2022 18:57:12 -0700 Subject: [PATCH 0114/1588] Improve the changelog for 4.1 (#915) --- doc/changelog.rst | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index ab895fad51..28c467a299 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -8,7 +8,9 @@ Changes in Version 4.1 PyMongo 4.1 brings a number of improvements including: -- Type Hinting support (formerly provided by ``pymongo-stubs``). See :doc:`examples/type_hints` for more information. +- Type Hinting support (formerly provided by `pymongo-stubs`_). See :doc:`examples/type_hints` for more information. +- Added support for the ``comment`` parameter to all helpers. For example see + :meth:`~pymongo.collection.Collection.insert_one`. - Added support for the ``let`` parameter to :meth:`~pymongo.collection.Collection.update_one`, :meth:`~pymongo.collection.Collection.update_many`, @@ -31,6 +33,16 @@ PyMongo 4.1 brings a number of improvements including: MongoDB >= 5.0. - :meth:`gridfs.grid_file.GridOut.seek` now returns the new position in the file, to conform to the behavior of :meth:`io.IOBase.seek`. +- Improved reuse of implicit sessions (`PYTHON-2956`_). + +Bug fixes +......... + +- Fixed bug that would cause SDAM heartbeat timeouts and connection churn on + AWS Lambda and other FaaS environments (`PYTHON-3186`_). +- Fixed bug where :class:`~pymongo.mongo_client.MongoClient`, + :class:`~pymongo.database.Database`, and :class:`~pymongo.collection.Collection` + mistakenly implemented :class:`typing.Iterable` (`PYTHON-3084`_). Issues Resolved ............... @@ -39,6 +51,10 @@ See the `PyMongo 4.1 release notes in JIRA`_ for the list of resolved issues in this release. .. _PyMongo 4.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=30619 +.. _PYTHON-2956: https://jira.mongodb.org/browse/PYTHON-2956 +.. _PYTHON-3084: https://jira.mongodb.org/browse/PYTHON-3084 +.. _PYTHON-3186: https://jira.mongodb.org/browse/PYTHON-3186 +.. _pymongo-stubs: https://github.com/mongodb-labs/pymongo-stubs Changes in Version 4.0 ---------------------- From 484058e18d95793a95d3d7ad5a4e7f7e3f6161d9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 30 Mar 2022 21:50:22 -0700 Subject: [PATCH 0115/1588] PYTHON-3160 Fix MMAPv1 tests (#914) --- .../driver-sessions-dirty-session-errors.json | 1 - test/test_session.py | 10 ++++----- test/unified_format.py | 21 ++++++++++++++++++- 3 files changed, 24 insertions(+), 8 deletions(-) diff --git a/test/sessions/driver-sessions-dirty-session-errors.json b/test/sessions/driver-sessions-dirty-session-errors.json index 88a9171db1..361ea83d7b 100644 --- a/test/sessions/driver-sessions-dirty-session-errors.json +++ b/test/sessions/driver-sessions-dirty-session-errors.json @@ -448,7 +448,6 @@ "name": "insertOne", "object": "collection0", "arguments": { - "session": "session0", "document": { "_id": 2 } diff --git a/test/test_session.py b/test/test_session.py index 53609c70cb..e6f15de6bf 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -183,12 +183,11 @@ def test_implicit_sessions_checkout(self): # "To confirm that implicit sessions only allocate their server session after a # successful connection checkout" test from Driver Sessions Spec. succeeded = False + lsid_set = set() failures = 0 for _ in range(5): listener = EventListener() - client = rs_or_single_client( - event_listeners=[listener], maxPoolSize=1, retryWrites=True - ) + client = rs_or_single_client(event_listeners=[listener], maxPoolSize=1) cursor = client.db.test.find({}) ops: List[Tuple[Callable, List[Any]]] = [ (client.db.test.find_one, [{"_id": 1}]), @@ -225,7 +224,7 @@ def thread_target(op, *args): thread.join() self.assertIsNone(thread.exc) client.close() - lsid_set = set() + lsid_set.clear() for i in listener.results["started"]: if i.command.get("lsid"): lsid_set.add(i.command.get("lsid")["id"]) @@ -233,8 +232,7 @@ def thread_target(op, *args): succeeded = True else: failures += 1 - print(failures) - self.assertTrue(succeeded) + self.assertTrue(succeeded, lsid_set) def test_pool_lifo(self): # "Pool is LIFO" test from Driver Sessions Spec. diff --git a/test/unified_format.py b/test/unified_format.py index 5bf98c5451..adfd0cac0a 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -766,11 +766,30 @@ def setUp(self): def maybe_skip_test(self, spec): # add any special-casing for skipping tests here if client_context.storage_engine == "mmapv1": - if "Dirty explicit session is discarded" in spec["description"]: + if ( + "Dirty explicit session is discarded" in spec["description"] + or "Dirty implicit session is discarded" in spec["description"] + ): raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") elif "Client side error in command starting transaction" in spec["description"]: raise unittest.SkipTest("Implement PYTHON-1894") + # Some tests need to be skipped based on the operations they try to run. + for op in spec["operations"]: + name = op["name"] + if name == "count": + self.skipTest("PyMongo does not support count()") + if name == "listIndexNames": + self.skipTest("PyMongo does not support list_index_names()") + if client_context.storage_engine == "mmapv1": + if name == "createChangeStream": + self.skipTest("MMAPv1 does not support change streams") + if name == "withTransaction" or name == "startTransaction": + self.skipTest("MMAPv1 does not support document-level locking") + if not client_context.test_commands_enabled: + if name == "failPoint" or name == "targetedFailPoint": + self.skipTest("Test commands must be enabled to use fail points") + def process_error(self, exception, spec): is_error = spec.get("isError") is_client_error = spec.get("isClientError") From 113d66dc181795d4182f4d94e7c0a9c8a8ef733e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 31 Mar 2022 04:31:05 -0500 Subject: [PATCH 0116/1588] PYTHON-2406 Clean up of tools documentation page (#913) --- doc/tools.rst | 56 +++++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/doc/tools.rst b/doc/tools.rst index 69ee64448b..e88b57ee69 100644 --- a/doc/tools.rst +++ b/doc/tools.rst @@ -26,26 +26,13 @@ needs. Even if you eventually come to the decision to use one of these layers, the time spent working directly with the driver will have increased your understanding of how MongoDB actually works. -PyMODM - `PyMODM `_ is an ORM-like framework on top - of PyMongo. PyMODM is maintained by engineers at MongoDB, Inc. and is quick - to adopt new MongoDB features. PyMODM is a "core" ODM, meaning that it - provides simple, extensible functionality that can be leveraged by other - libraries to target platforms like Django. At the same time, PyMODM is - powerful enough to be used for developing applications on its own. Complete - documentation is available on `readthedocs - `_. - -Humongolus - `Humongolus `_ is a lightweight ORM - framework for Python and MongoDB. The name comes from the combination of - MongoDB and `Homunculus `_ (the - concept of a miniature though fully formed human body). Humongolus allows - you to create models/schemas with robust validation. It attempts to be as - pythonic as possible and exposes the pymongo cursor objects whenever - possible. The code is available for download - `at GitHub `_. Tutorials and usage - examples are also available at GitHub. +MongoEngine + `MongoEngine `_ is another ORM-like + layer on top of PyMongo. It allows you to define schemas for + documents and query collections using syntax inspired by the Django + ORM. The code is available on `GitHub + `_; for more information, see + the `tutorial `_. MincePy `MincePy `_ is an @@ -65,14 +52,6 @@ Ming `_ for more details. -MongoEngine - `MongoEngine `_ is another ORM-like - layer on top of PyMongo. It allows you to define schemas for - documents and query collections using syntax inspired by the Django - ORM. The code is available on `GitHub - `_; for more information, see - the `tutorial `_. - MotorEngine `MotorEngine `_ is a port of MongoEngine to Motor, for asynchronous access with Tornado. @@ -91,6 +70,16 @@ uMongo No longer maintained """""""""""""""""""" +PyMODM + `PyMODM `_ is an ORM-like framework on top + of PyMongo. PyMODM is maintained by engineers at MongoDB, Inc. and is quick + to adopt new MongoDB features. PyMODM is a "core" ODM, meaning that it + provides simple, extensible functionality that can be leveraged by other + libraries to target platforms like Django. At the same time, PyMODM is + powerful enough to be used for developing applications on its own. Complete + documentation is available on `readthedocs + `_. + MongoKit The `MongoKit `_ framework is an ORM-like layer on top of PyMongo. There is also a MongoKit @@ -116,6 +105,17 @@ Manga Django ORM, but Pymongo's query language is maintained. The source `is on GitHub `_. +Humongolus + `Humongolus `_ is a lightweight ORM + framework for Python and MongoDB. The name comes from the combination of + MongoDB and `Homunculus `_ (the + concept of a miniature though fully formed human body). Humongolus allows + you to create models/schemas with robust validation. It attempts to be as + pythonic as possible and exposes the pymongo cursor objects whenever + possible. The code is available for download + `at GitHub `_. Tutorials and usage + examples are also available at GitHub. + Framework Tools --------------- This section lists tools and adapters that have been designed to work with From 1d6914f749baa6e538a7c6f327eb626d6c97a206 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 31 Mar 2022 12:25:45 -0700 Subject: [PATCH 0117/1588] PYTHON-3191 Fix test_sigstop_sigcont with Versioned API (#916) --- test/__init__.py | 1 + test/sigstop_sigcont.py | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/test/__init__.py b/test/__init__.py index ee6e3ca509..3800c7890e 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -284,6 +284,7 @@ def client_options(self): def uri(self): """Return the MongoClient URI for creating a duplicate client.""" opts = client_context.default_client_options.copy() + opts.pop("server_api", None) # Cannot be set from the URI opts_parts = [] for opt, val in opts.items(): strval = str(val) diff --git a/test/sigstop_sigcont.py b/test/sigstop_sigcont.py index ef4730f0bf..87b4f62038 100644 --- a/test/sigstop_sigcont.py +++ b/test/sigstop_sigcont.py @@ -15,12 +15,19 @@ """Used by test_client.TestClient.test_sigstop_sigcont.""" import logging +import os import sys sys.path[0:0] = [""] from pymongo import monitoring from pymongo.mongo_client import MongoClient +from pymongo.server_api import ServerApi + +SERVER_API = None +MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") +if MONGODB_API_VERSION: + SERVER_API = ServerApi(MONGODB_API_VERSION) class HeartbeatLogger(monitoring.ServerHeartbeatListener): @@ -55,6 +62,7 @@ def main(uri: str) -> None: event_listeners=[heartbeat_logger], heartbeatFrequencyMS=500, connectTimeoutMS=500, + server_api=SERVER_API, ) client.admin.command("ping") logging.info("TEST STARTED") From a809b3c005392017bd9d4de41bb286bd26bb42b8 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 31 Mar 2022 16:11:20 -0500 Subject: [PATCH 0118/1588] PYTHON-3190 Test Failure - doctests failing cannot import name 'TypedDict' (#917) --- .evergreen/config.yml | 2 +- doc/examples/type_hints.rst | 14 ++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index ef60eaf7d7..a6d9375f26 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2506,7 +2506,7 @@ buildvariants: - matrix_name: "tests-doctests" matrix_spec: platform: ubuntu-18.04 - python-version: ["3.6"] + python-version: ["3.8"] display_name: "Doctests ${python-version} ${platform}" tasks: - name: "doctests" diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst index 029761bc75..6858e95290 100644 --- a/doc/examples/type_hints.rst +++ b/doc/examples/type_hints.rst @@ -92,12 +92,13 @@ Note that when using :class:`~bson.son.SON`, the key and value types must be giv Typed Collection ---------------- -You can use :py:class:`~typing.TypedDict` when using a well-defined schema for the data in a :class:`~pymongo.collection.Collection`: +You can use :py:class:`~typing.TypedDict` (Python 3.8+) when using a well-defined schema for the data in a :class:`~pymongo.collection.Collection`: .. doctest:: >>> from typing import TypedDict - >>> from pymongo import MongoClient, Collection + >>> from pymongo import MongoClient + >>> from pymongo.collection import Collection >>> class Movie(TypedDict): ... name: str ... year: int @@ -113,13 +114,14 @@ Typed Database -------------- While less common, you could specify that the documents in an entire database -match a well-defined shema using :py:class:`~typing.TypedDict`. +match a well-defined shema using :py:class:`~typing.TypedDict` (Python 3.8+). .. doctest:: >>> from typing import TypedDict - >>> from pymongo import MongoClient, Database + >>> from pymongo import MongoClient + >>> from pymongo.database import Database >>> class Movie(TypedDict): ... name: str ... year: int @@ -146,7 +148,7 @@ When using the :meth:`~pymongo.database.Database.command`, you can specify the d >>> result = client.admin.command("ping", codec_options=options) >>> assert isinstance(result, RawBSONDocument) -Custom :py:class:`collections.abc.Mapping` subclasses and :py:class:`~typing.TypedDict` are also supported. +Custom :py:class:`collections.abc.Mapping` subclasses and :py:class:`~typing.TypedDict` (Python 3.8+) are also supported. For :py:class:`~typing.TypedDict`, use the form: ``options: CodecOptions[MyTypedDict] = CodecOptions(...)``. Typed BSON Decoding @@ -167,7 +169,7 @@ You can specify the document type returned by :mod:`bson` decoding functions by >>> rt_document = decode(bsonbytes, codec_options=options) >>> assert rt_document.foo() == "bar" -:class:`~bson.raw_bson.RawBSONDocument` and :py:class:`~typing.TypedDict` are also supported. +:class:`~bson.raw_bson.RawBSONDocument` and :py:class:`~typing.TypedDict` (Python 3.8+) are also supported. For :py:class:`~typing.TypedDict`, use the form: ``options: CodecOptions[MyTypedDict] = CodecOptions(...)``. From b0fd5cbdf5733450a864642130a26816dada349c Mon Sep 17 00:00:00 2001 From: Duncan <52967253+dunkOnIT@users.noreply.github.com> Date: Fri, 1 Apr 2022 19:37:47 +0200 Subject: [PATCH 0119/1588] Improve docstrings for SON parameters (#919) --- pymongo/collection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/collection.py b/pymongo/collection.py index f382628aa8..9ab56dd41c 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -246,7 +246,7 @@ def _command( :Parameters: - `sock_info` - A SocketInfo instance. - - `command` - The command itself, as a SON instance. + - `command` - The command itself, as a :class:`~bson.son.SON` instance. - `codec_options` (optional) - An instance of :class:`~bson.codec_options.CodecOptions`. - `check`: raise OperationFailure if there are errors @@ -1443,7 +1443,7 @@ def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: this :class:`Collection`. :Parameters: - - `filter` (optional): a SON object specifying elements which + - `filter` (optional): a :class:`~bson.son.SON` object specifying elements which must be present for a document to be included in the result set - `projection` (optional): a list of field names that should be From 6e99bf451503825577213ef148ec6b519a41257b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 4 Apr 2022 10:57:01 -0700 Subject: [PATCH 0120/1588] BUMP 4.1.0 --- gridfs/__init__.py | 6 +++--- pymongo/__init__.py | 2 +- pymongo/collection.py | 8 ++++---- setup.py | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 73425a9e53..5675e8f937 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -364,9 +364,9 @@ def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: are associated with that session. :Parameters: - - `filter` (optional): a SON object specifying elements which - must be present for a document to be included in the - result set + - `filter` (optional): A query document that selects which files + to include in the result set. Can be an empty document to include + all files. - `skip` (optional): the number of files to omit (from the start of the result set) when returning the results - `limit` (optional): the maximum number of results to diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 9581068036..69536d5e31 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -55,7 +55,7 @@ .. _text index: http://docs.mongodb.org/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 1, 0, ".dev0") +version_tuple: Tuple[Union[int, str], ...] = (4, 1, 0) def get_version_string() -> str: diff --git a/pymongo/collection.py b/pymongo/collection.py index 9ab56dd41c..d6e308b260 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1423,7 +1423,7 @@ def find_one( def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: """Query the database. - The `filter` argument is a prototype document that all results + The `filter` argument is a query document that all results must match. For example: >>> db.test.find({"hello": "world"}) @@ -1443,9 +1443,9 @@ def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: this :class:`Collection`. :Parameters: - - `filter` (optional): a :class:`~bson.son.SON` object specifying elements which - must be present for a document to be included in the - result set + - `filter` (optional): A query document that selects which documents + to include in the result set. Can be an empty document to include + all documents. - `projection` (optional): a list of field names that should be returned in the result set or a dict specifying the fields to include or exclude. If `projection` is a list "_id" will diff --git a/setup.py b/setup.py index 5bae7dc211..d12918501d 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.1.0.dev0" +version = "4.1.0" f = open("README.rst") try: From 331600d4910c377d5b4b95825a93b932ce0c48b6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 4 Apr 2022 11:06:52 -0700 Subject: [PATCH 0121/1588] BUMP 4.2.0.dev0 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 69536d5e31..fd1309b6df 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -55,7 +55,7 @@ .. _text index: http://docs.mongodb.org/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 1, 0) +version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, ".dev0") def get_version_string() -> str: diff --git a/setup.py b/setup.py index d12918501d..8a59e6d8d6 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.1.0" +version = "4.2.0.dev0" f = open("README.rst") try: From 821b5620f796250602b2edc97db14e2ae11eb0e2 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 5 Apr 2022 13:07:06 -0700 Subject: [PATCH 0122/1588] PYTHON-3198 Fix NameError: name sys is not defined (#920) --- pymongo/uri_parser.py | 2 +- test/test_client.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index fa44dd8569..bfbf214bcb 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -15,6 +15,7 @@ """Tools to parse and validate a MongoDB URI.""" import re +import sys import warnings from typing import Any, Dict, List, Mapping, MutableMapping, Optional, Tuple, Union from urllib.parse import unquote_plus @@ -615,7 +616,6 @@ def _parse_kms_tls_options(kms_tls_options): if __name__ == "__main__": import pprint - import sys try: pprint.pprint(parse_uri(sys.argv[1])) diff --git a/test/test_client.py b/test/test_client.py index 40f276a9db..59a8324d6e 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1689,6 +1689,11 @@ def test_srv_max_hosts_kwarg(self): ) self.assertEqual(len(client.topology_description.server_descriptions()), 2) + @unittest.skipIf(_HAVE_DNSPYTHON, "dnspython must not be installed") + def test_srv_no_dnspython_error(self): + with self.assertRaisesRegex(ConfigurationError, 'The "dnspython" module must be'): + MongoClient("mongodb+srv://test1.test.build.10gen.cc/") + @unittest.skipIf( client_context.load_balancer or client_context.serverless, "loadBalanced clients do not run SDAM", From 01f983e8abfe8235afe224a5e4281f8175560604 Mon Sep 17 00:00:00 2001 From: Terence Honles Date: Wed, 6 Apr 2022 11:25:25 -0700 Subject: [PATCH 0123/1588] PYTHON-3214 Fix typing markers not being included in the distribution (#921) --- MANIFEST.in | 3 --- setup.py | 9 ++++++++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 726c631e89..d017d16ab0 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -12,6 +12,3 @@ include tools/README.rst recursive-include test *.pem recursive-include test *.py recursive-include bson *.h -include bson/py.typed -include gridfs/py.typed -include pymongo/py.typed diff --git a/setup.py b/setup.py index 8a59e6d8d6..e8e5d37bfb 100755 --- a/setup.py +++ b/setup.py @@ -295,7 +295,14 @@ def build_extension(self, ext): else: extras_require["gssapi"] = ["pykerberos"] -extra_opts = {"packages": ["bson", "pymongo", "gridfs"]} +extra_opts = { + "packages": ["bson", "pymongo", "gridfs"], + "package_data": { + "bson": ["py.typed"], + "pymongo": ["py.typed"], + "gridfs": ["py.typed"], + }, +} if "--no_ext" in sys.argv: sys.argv.remove("--no_ext") From 49c3f9fdfd5925015293ad8ac3130370f249619e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 6 Apr 2022 11:47:31 -0700 Subject: [PATCH 0124/1588] PYTHON-3215 Add Typing :: Typed trove classifier --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index e8e5d37bfb..8417178441 100755 --- a/setup.py +++ b/setup.py @@ -346,6 +346,7 @@ def build_extension(self, ext): "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database", + "Typing :: Typed", ], cmdclass={"build_ext": custom_build_ext, "doc": doc, "test": test}, extras_require=extras_require, From 3cb16cae24dd427dfccdaa0ec5324a6ac6af7a8d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 6 Apr 2022 12:09:47 -0700 Subject: [PATCH 0125/1588] PYTHON-3210 Remove flakey string assertion from invalid aws creds FLE test (#922) --- test/test_encryption.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index f63127a7be..987c02618f 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1310,9 +1310,9 @@ def test_05_aws_endpoint_wrong_region(self): } # The full error should be something like: # "Credential should be scoped to a valid region, not 'us-east-1'" - # but we only check for "us-east-1" to avoid breaking on slight + # but we only check for EncryptionError to avoid breaking on slight # changes to AWS' error message. - with self.assertRaisesRegex(EncryptionError, "us-east-1"): + with self.assertRaises(EncryptionError): self.client_encryption.create_data_key("aws", master_key=master_key) @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") From 5ccbb4d6d8b4725aa0ae86e612aba3d438094dd5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 6 Apr 2022 12:18:44 -0700 Subject: [PATCH 0126/1588] PYTHON-3216 Include codec_options.pyi in release distributions (#923) --- setup.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/setup.py b/setup.py index 8417178441..dff4678d4b 100755 --- a/setup.py +++ b/setup.py @@ -295,14 +295,7 @@ def build_extension(self, ext): else: extras_require["gssapi"] = ["pykerberos"] -extra_opts = { - "packages": ["bson", "pymongo", "gridfs"], - "package_data": { - "bson": ["py.typed"], - "pymongo": ["py.typed"], - "gridfs": ["py.typed"], - }, -} +extra_opts = {} if "--no_ext" in sys.argv: sys.argv.remove("--no_ext") @@ -350,5 +343,11 @@ def build_extension(self, ext): ], cmdclass={"build_ext": custom_build_ext, "doc": doc, "test": test}, extras_require=extras_require, + packages=["bson", "pymongo", "gridfs"], + package_data={ + "bson": ["py.typed", "*.pyi"], + "pymongo": ["py.typed", "*.pyi"], + "gridfs": ["py.typed", "*.pyi"], + }, **extra_opts ) From dca72b7884f7940498c0898cdaf7b041bc6386db Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 12 Apr 2022 17:18:23 -0700 Subject: [PATCH 0127/1588] PYTHON-3222 Fix memory leak in cbson decode_all (#927) Add decode_all keyword arg for codec_options. Make decode_all show up in docs. --- bson/__init__.py | 67 +++++++++++++++++++++++---------------------- bson/_cbsonmodule.c | 43 ++++------------------------- doc/changelog.rst | 26 ++++++++++++++++++ test/test_bson.py | 9 ++++++ 4 files changed, 76 insertions(+), 69 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index 11a87bbe79..70aa6ae86c 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -982,6 +982,40 @@ def decode( return _bson_to_dict(data, opts) +def _decode_all(data: _ReadableBuffer, opts: "CodecOptions[_DocumentType]") -> List[_DocumentType]: + """Decode a BSON data to multiple documents.""" + data, view = get_data_and_view(data) + data_len = len(data) + docs: List[_DocumentType] = [] + position = 0 + end = data_len - 1 + use_raw = _raw_document_class(opts.document_class) + try: + while position < end: + obj_size = _UNPACK_INT_FROM(data, position)[0] + if data_len - position < obj_size: + raise InvalidBSON("invalid object size") + obj_end = position + obj_size - 1 + if data[obj_end] != 0: + raise InvalidBSON("bad eoo") + if use_raw: + docs.append(opts.document_class(data[position : obj_end + 1], opts)) # type: ignore + else: + docs.append(_elements_to_dict(data, view, position + 4, obj_end, opts)) + position += obj_size + return docs + except InvalidBSON: + raise + except Exception: + # Change exception type to InvalidBSON but preserve traceback. + _, exc_value, exc_tb = sys.exc_info() + raise InvalidBSON(str(exc_value)).with_traceback(exc_tb) + + +if _USE_C: + _decode_all = _cbson._decode_all # noqa: F811 + + def decode_all( data: _ReadableBuffer, codec_options: "Optional[CodecOptions[_DocumentType]]" = None ) -> List[_DocumentType]: @@ -1008,41 +1042,10 @@ def decode_all( `codec_options`. """ opts = codec_options or DEFAULT_CODEC_OPTIONS - data, view = get_data_and_view(data) if not isinstance(opts, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR - data_len = len(data) - docs: List[_DocumentType] = [] - position = 0 - end = data_len - 1 - use_raw = _raw_document_class(opts.document_class) - try: - while position < end: - obj_size = _UNPACK_INT_FROM(data, position)[0] - if data_len - position < obj_size: - raise InvalidBSON("invalid object size") - obj_end = position + obj_size - 1 - if data[obj_end] != 0: - raise InvalidBSON("bad eoo") - if use_raw: - docs.append( - opts.document_class(data[position : obj_end + 1], codec_options) # type: ignore - ) - else: - docs.append(_elements_to_dict(data, view, position + 4, obj_end, opts)) - position += obj_size - return docs - except InvalidBSON: - raise - except Exception: - # Change exception type to InvalidBSON but preserve traceback. - _, exc_value, exc_tb = sys.exc_info() - raise InvalidBSON(str(exc_value)).with_traceback(exc_tb) - - -if _USE_C: - decode_all = _cbson.decode_all # noqa: F811 + return _decode_all(data, opts) # type: ignore[arg-type] def _decode_selective(rawdoc: Any, fields: Any, codec_options: Any) -> Mapping[Any, Any]: diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 8100e951cf..1a296db527 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -53,7 +53,6 @@ struct module_state { PyObject* BSONInt64; PyObject* Decimal128; PyObject* Mapping; - PyObject* CodecOptions; }; #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) @@ -344,8 +343,7 @@ static int _load_python_objects(PyObject* module) { _load_object(&state->BSONInt64, "bson.int64", "Int64") || _load_object(&state->Decimal128, "bson.decimal128", "Decimal128") || _load_object(&state->UUID, "uuid", "UUID") || - _load_object(&state->Mapping, "collections.abc", "Mapping") || - _load_object(&state->CodecOptions, "bson.codec_options", "CodecOptions")) { + _load_object(&state->Mapping, "collections.abc", "Mapping")) { return 1; } /* Reload our REType hack too. */ @@ -498,26 +496,6 @@ int convert_codec_options(PyObject* options_obj, void* p) { return 1; } -/* Fill out a codec_options_t* with default options. - * - * Return 1 on success. - * Return 0 on failure. - */ -int default_codec_options(struct module_state* state, codec_options_t* options) { - PyObject* options_obj = NULL; - PyObject* codec_options_func = _get_object( - state->CodecOptions, "bson.codec_options", "CodecOptions"); - if (codec_options_func == NULL) { - return 0; - } - options_obj = PyObject_CallFunctionObjArgs(codec_options_func, NULL); - Py_DECREF(codec_options_func); - if (options_obj == NULL) { - return 0; - } - return convert_codec_options(options_obj, options); -} - void destroy_codec_options(codec_options_t* options) { Py_CLEAR(options->document_class); Py_CLEAR(options->tzinfo); @@ -2411,15 +2389,10 @@ static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { PyObject* value; PyObject* result_tuple; - if (!PyArg_ParseTuple(args, "OII|O&", &bson, &position, &max, + if (!PyArg_ParseTuple(args, "OIIO&", &bson, &position, &max, convert_codec_options, &options)) { return NULL; } - if (PyTuple_GET_SIZE(args) < 4) { - if (!default_codec_options(GETSTATE(self), &options)) { - return NULL; - } - } if (!PyBytes_Check(bson)) { PyErr_SetString(PyExc_TypeError, "argument to _element_to_dict must be a bytes object"); @@ -2594,17 +2567,13 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { PyObject* dict; PyObject* result = NULL; codec_options_t options; - PyObject* options_obj; + PyObject* options_obj = NULL; Py_buffer view = {0}; - if (!PyArg_ParseTuple(args, "O|O", &bson, &options_obj)) { + if (!PyArg_ParseTuple(args, "OO", &bson, &options_obj)) { return NULL; } - if ((PyTuple_GET_SIZE(args) < 2) || (options_obj == Py_None)) { - if (!default_codec_options(GETSTATE(self), &options)) { - return NULL; - } - } else if (!convert_codec_options(options_obj, &options)) { + if (!convert_codec_options(options_obj, &options)) { return NULL; } @@ -2698,7 +2667,7 @@ static PyMethodDef _CBSONMethods[] = { "convert a dictionary to a string containing its BSON representation."}, {"_bson_to_dict", _cbson_bson_to_dict, METH_VARARGS, "convert a BSON string to a SON object."}, - {"decode_all", _cbson_decode_all, METH_VARARGS, + {"_decode_all", _cbson_decode_all, METH_VARARGS, "convert binary data to a sequence of documents."}, {"_element_to_dict", _cbson_element_to_dict, METH_VARARGS, "Decode a single key, value pair."}, diff --git a/doc/changelog.rst b/doc/changelog.rst index 28c467a299..1f8a146b37 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,32 @@ Changelog ========= +Changes in Version 4.1.1 +------------------------- + +Issues Resolved +............... + +Version 4.1.1 fixes a number of bugs: + +- Fixed a memory leak bug when calling :func:`~bson.decode_all` without a + ``codec_options`` argument (`PYTHON-3222`_). +- Fixed a bug where :func:`~bson.decode_all` did not accept ``codec_options`` + as a keyword argument (`PYTHON-3222`_). +- Fixed an oversight where type markers (py.typed files) were not included + in our release distributions (`PYTHON-3214`_). +- Fixed a bug where pymongo would raise a "NameError: name sys is not defined" + exception when attempting to parse a "mongodb+srv://" URI when the dnspython + dependency was not installed (`PYTHON-3198`_). + +See the `PyMongo 4.1.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-3198: https://jira.mongodb.org/browse/PYTHON-3198 +.. _PYTHON-3214: https://jira.mongodb.org/browse/PYTHON-3214 +.. _PYTHON-3222: https://jira.mongodb.org/browse/PYTHON-3222 +.. _PyMongo 4.1.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33290 + Changes in Version 4.1 ---------------------- diff --git a/test/test_bson.py b/test/test_bson.py index b0dce7db4e..8ad65f3412 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1006,6 +1006,15 @@ def test_decode_all_no_options(self): decoded = bson.decode_all(bson.encode(doc2), None)[0] self.assertIsInstance(decoded["id"], Binary) + def test_decode_all_kwarg(self): + doc = {"a": uuid.uuid4()} + opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + encoded = encode(doc, codec_options=opts) + # Positional codec_options + self.assertEqual([doc], decode_all(encoded, opts)) + # Keyword codec_options + self.assertEqual([doc], decode_all(encoded, codec_options=opts)) + def test_unicode_decode_error_handler(self): enc = encode({"keystr": "foobar"}) From fd512d5c90220a37341aca068bc9e3f969c8eead Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 12 Apr 2022 17:43:10 -0700 Subject: [PATCH 0128/1588] PYTHON-3225 Stop testing delete on capped collections (#928) --- test/test_collection.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/test/test_collection.py b/test/test_collection.py index 6319321045..d1a3a6a980 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1420,16 +1420,7 @@ def test_update_check_keys(self): def test_acknowledged_delete(self): db = self.db db.drop_collection("test") - db.create_collection("test", capped=True, size=1000) - - db.test.insert_one({"x": 1}) - self.assertEqual(1, db.test.count_documents({})) - - # Can't remove from capped collection. - self.assertRaises(OperationFailure, db.test.delete_one, {"x": 1}) - db.drop_collection("test") - db.test.insert_one({"x": 1}) - db.test.insert_one({"x": 1}) + db.test.insert_many([{"x": 1}, {"x": 1}]) self.assertEqual(2, db.test.delete_many({}).deleted_count) self.assertEqual(0, db.test.delete_many({}).deleted_count) From a319075ba7d9a61bd93126b8e08be0ee0b0b667c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 13 Apr 2022 14:11:13 -0500 Subject: [PATCH 0129/1588] PYTHON-3197 Update docs.mongodb.com links in source, API & Reference documentation (#926) --- README.rst | 4 +-- doc/api/index.rst | 2 +- doc/changelog.rst | 12 ++++---- doc/examples/aggregation.rst | 2 +- doc/examples/copydb.rst | 2 +- doc/examples/high_availability.rst | 8 +++--- doc/examples/server_selection.rst | 4 +-- doc/examples/tailable.rst | 4 +-- doc/examples/tls.rst | 2 +- doc/faq.rst | 4 +-- doc/migrate-to-pymongo4.rst | 44 +++++++++++++++--------------- doc/tutorial.rst | 16 +++++------ pymongo/__init__.py | 8 +++--- pymongo/change_stream.py | 2 +- pymongo/collection.py | 18 ++++++------ pymongo/cursor.py | 12 ++++---- pymongo/database.py | 14 +++++----- pymongo/message.py | 2 +- pymongo/mongo_client.py | 4 +-- pymongo/operations.py | 2 +- pymongo/read_preferences.py | 2 +- test/unified_format.py | 2 +- 22 files changed, 85 insertions(+), 85 deletions(-) diff --git a/README.rst b/README.rst index fedb9e14d4..c3c3757289 100644 --- a/README.rst +++ b/README.rst @@ -13,7 +13,7 @@ database from Python. The ``bson`` package is an implementation of the `BSON format `_ for Python. The ``pymongo`` package is a native Python driver for MongoDB. The ``gridfs`` package is a `gridfs -`_ +`_ implementation on top of ``pymongo``. PyMongo supports MongoDB 3.6, 4.0, 4.2, 4.4, and 5.0. @@ -63,7 +63,7 @@ Security Vulnerabilities If you’ve identified a security vulnerability in a driver or any other MongoDB project, please report it according to the `instructions here -`_. +`_. Installation ============ diff --git a/doc/api/index.rst b/doc/api/index.rst index 64c407fd04..30ae3608ca 100644 --- a/doc/api/index.rst +++ b/doc/api/index.rst @@ -6,7 +6,7 @@ interacting with MongoDB. :mod:`bson` is an implementation of the `BSON format `_, :mod:`pymongo` is a full-featured driver for MongoDB, and :mod:`gridfs` is a set of tools for working with the `GridFS -`_ storage +`_ storage specification. .. toctree:: diff --git a/doc/changelog.rst b/doc/changelog.rst index 1f8a146b37..eee3e4a81d 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -376,7 +376,7 @@ Deprecations .. _PYTHON-2466: https://jira.mongodb.org/browse/PYTHON-2466 .. _PYTHON-1690: https://jira.mongodb.org/browse/PYTHON-1690 .. _PYTHON-2472: https://jira.mongodb.org/browse/PYTHON-2472 -.. _profile command: https://docs.mongodb.com/manual/reference/command/profile/ +.. _profile command: https://mongodb.com/docs/manual/reference/command/profile/ Issues Resolved ............... @@ -548,7 +548,7 @@ Unavoidable breaking changes: now always raises the following error: ``InvalidOperation: GridFS does not support multi-document transactions`` -.. _validate command: https://docs.mongodb.com/manual/reference/command/validate/ +.. _validate command: https://mongodb.com/docs/manual/reference/command/validate/ Issues Resolved ............... @@ -896,7 +896,7 @@ Deprecations: - Deprecated :meth:`pymongo.collection.Collection.count` and :meth:`pymongo.cursor.Cursor.count`. These two methods use the `count` command and `may or may not be accurate - `_, + `_, depending on the options used and connected MongoDB topology. Use :meth:`~pymongo.collection.Collection.count_documents` instead. - Deprecated the snapshot option of :meth:`~pymongo.collection.Collection.find` @@ -1112,7 +1112,7 @@ Changes and Deprecations: - Deprecated the MongoClient option `socketKeepAlive`. It now defaults to true and disabling it is not recommended, see `does TCP keepalive time affect MongoDB Deployments? - `_ + `_ - Deprecated :meth:`~pymongo.collection.Collection.initialize_ordered_bulk_op`, :meth:`~pymongo.collection.Collection.initialize_unordered_bulk_op`, and :class:`~pymongo.bulk.BulkOperationBuilder`. Use @@ -2408,7 +2408,7 @@ Important New Features: - Support for mongos failover. - A new :meth:`~pymongo.collection.Collection.aggregate` method to support MongoDB's new `aggregation framework - `_. + `_. - Support for legacy Java and C# byte order when encoding and decoding UUIDs. - Support for connecting directly to an arbiter. @@ -2416,7 +2416,7 @@ Important New Features: Starting with MongoDB 2.2 the getLastError command requires authentication when the server's `authentication features - `_ are enabled. + `_ are enabled. Changes to PyMongo were required to support this behavior change. Users of authentication must upgrade to PyMongo 2.3 (or newer) for "safe" write operations to function correctly. diff --git a/doc/examples/aggregation.rst b/doc/examples/aggregation.rst index 738b09485a..cdd82ff6fb 100644 --- a/doc/examples/aggregation.rst +++ b/doc/examples/aggregation.rst @@ -74,4 +74,4 @@ you can add computed fields, create new virtual sub-objects, and extract sub-fields into the top-level of results. .. seealso:: The full documentation for MongoDB's `aggregation framework - `_ + `_ diff --git a/doc/examples/copydb.rst b/doc/examples/copydb.rst index 27f1912c6e..76d0c97a36 100644 --- a/doc/examples/copydb.rst +++ b/doc/examples/copydb.rst @@ -67,7 +67,7 @@ Versions of PyMongo before 3.0 included a ``copy_database`` helper method, but it has been removed. .. _copyDatabase function in the mongo shell: - http://docs.mongodb.org/manual/reference/method/db.copyDatabase/ + http://mongodb.com/docs/manual/reference/method/db.copyDatabase/ .. _Copy a Database: https://www.mongodb.com/docs/database-tools/mongodump/#std-label-mongodump-example-copy-clone-database diff --git a/doc/examples/high_availability.rst b/doc/examples/high_availability.rst index efd7a66cc6..8f94aba074 100644 --- a/doc/examples/high_availability.rst +++ b/doc/examples/high_availability.rst @@ -4,7 +4,7 @@ High Availability and PyMongo PyMongo makes it easy to write highly available applications whether you use a `single replica set `_ or a `large sharded cluster -`_. +`_. Connecting to a Replica Set --------------------------- @@ -14,7 +14,7 @@ PyMongo makes working with `replica sets replica set and show how to handle both initialization and normal connections with PyMongo. -.. seealso:: The MongoDB documentation on `replication `_. +.. seealso:: The MongoDB documentation on `replication `_. Starting a Replica Set ~~~~~~~~~~~~~~~~~~~~~~ @@ -261,7 +261,7 @@ attributes: **Tag sets**: Replica-set members can be `tagged -`_ according to any +`_ according to any criteria you choose. By default, PyMongo ignores tags when choosing a member to read from, but your read preference can be configured with a ``tag_sets`` parameter. ``tag_sets`` must be a list of dictionaries, each @@ -308,7 +308,7 @@ milliseconds of the closest member's ping time. replica set *through* a mongos. The equivalent is the localThreshold_ command line option. -.. _localThreshold: https://docs.mongodb.com/manual/reference/program/mongos/#std-option-mongos.--localThreshold +.. _localThreshold: https://mongodb.com/docs/manual/reference/program/mongos/#std-option-mongos.--localThreshold .. _health-monitoring: diff --git a/doc/examples/server_selection.rst b/doc/examples/server_selection.rst index fc436c0cd7..be2172489e 100644 --- a/doc/examples/server_selection.rst +++ b/doc/examples/server_selection.rst @@ -19,7 +19,7 @@ to prefer servers running on ``localhost``. from pymongo import MongoClient -.. _server selection algorithm: https://docs.mongodb.com/manual/core/read-preference-mechanics/ +.. _server selection algorithm: https://mongodb.com/docs/manual/core/read-preference-mechanics/ Example: Selecting Servers Running on ``localhost`` @@ -105,4 +105,4 @@ list of known hosts. As an example, for a 3-member replica set with a all available secondaries. -.. _server selection algorithm: https://docs.mongodb.com/manual/core/read-preference-mechanics/ +.. _server selection algorithm: https://mongodb.com/docs/manual/core/read-preference-mechanics/ diff --git a/doc/examples/tailable.rst b/doc/examples/tailable.rst index 1242e9ddf5..79458dc2ff 100644 --- a/doc/examples/tailable.rst +++ b/doc/examples/tailable.rst @@ -3,9 +3,9 @@ Tailable Cursors By default, MongoDB will automatically close a cursor when the client has exhausted all results in the cursor. However, for `capped collections -`_ you may +`_ you may use a `tailable cursor -`_ +`_ that remains open after the client exhausts the results in the initial cursor. The following is a basic example of using a tailable cursor to tail the oplog diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 9c3c2c829c..5a851e2530 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -3,7 +3,7 @@ TLS/SSL and PyMongo PyMongo supports connecting to MongoDB over TLS/SSL. This guide covers the configuration options supported by PyMongo. See `the server documentation -`_ to configure +`_ to configure MongoDB. .. warning:: Industry best practices recommend, and some regulations require, diff --git a/doc/faq.rst b/doc/faq.rst index 0d045f7629..06559ddb9b 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -314,7 +314,7 @@ when it is serialized to BSON and used as a query. Thus you can create a subdocument that exactly matches the subdocument in the collection. .. seealso:: `MongoDB Manual entry on subdocument matching - `_. + `_. What does *CursorNotFound* cursor id not valid at server mean? -------------------------------------------------------------- @@ -468,7 +468,7 @@ How can I use something like Python's ``json`` module to encode my documents to ------------------------------------------------------------------------------------- :mod:`~bson.json_util` is PyMongo's built in, flexible tool for using Python's :mod:`json` module with BSON documents and `MongoDB Extended JSON -`_. The +`_. The :mod:`json` module won't work out of the box with all documents from PyMongo as PyMongo supports some special types (like :class:`~bson.objectid.ObjectId` and :class:`~bson.dbref.DBRef`) that are not supported in JSON. diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 6d290dd51b..5f75ed1760 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -88,7 +88,7 @@ The socketKeepAlive parameter is removed Removed the ``socketKeepAlive`` keyword argument to :class:`~pymongo.mongo_client.MongoClient`. PyMongo now always enables TCP -keepalive. For more information see the `documentation `_. +keepalive. For more information see the `documentation `_. Renamed URI options ................... @@ -138,7 +138,7 @@ instead. For example:: client.admin.command('fsync', lock=True) -.. _fsync command: https://docs.mongodb.com/manual/reference/command/fsync/ +.. _fsync command: https://mongodb.com/docs/manual/reference/command/fsync/ MongoClient.unlock is removed ............................. @@ -149,7 +149,7 @@ Removed :meth:`pymongo.mongo_client.MongoClient.unlock`. Run the client.admin.command('fsyncUnlock') -.. _fsyncUnlock command: https://docs.mongodb.com/manual/reference/command/fsyncUnlock/ +.. _fsyncUnlock command: https://mongodb.com/docs/manual/reference/command/fsyncUnlock/ MongoClient.is_locked is removed ................................ @@ -160,7 +160,7 @@ Removed :attr:`pymongo.mongo_client.MongoClient.is_locked`. Run the is_locked = client.admin.command('currentOp').get('fsyncLock') -.. _currentOp command: https://docs.mongodb.com/manual/reference/command/currentOp/ +.. _currentOp command: https://mongodb.com/docs/manual/reference/command/currentOp/ MongoClient.database_names is removed ..................................... @@ -196,7 +196,7 @@ can be changed to this:: max_message_size = doc['maxMessageSizeBytes'] max_write_batch_size = doc['maxWriteBatchSize'] -.. _hello command: https://docs.mongodb.com/manual/reference/command/hello/ +.. _hello command: https://mongodb.com/docs/manual/reference/command/hello/ MongoClient.event_listeners and other configuration option helpers are removed .............................................................................. @@ -309,7 +309,7 @@ can be changed to this:: ops = list(client.admin.aggregate([{'$currentOp': {}}])) -.. _$currentOp aggregation pipeline stage: https://docs.mongodb.com/manual/reference/operator/aggregation/currentOp/ +.. _$currentOp aggregation pipeline stage: https://mongodb.com/docs/manual/reference/operator/aggregation/currentOp/ Database.add_user is removed ............................ @@ -332,8 +332,8 @@ Or change roles:: db.command("updateUser", "user", roles=["readWrite"]) -.. _createUser command: https://docs.mongodb.com/manual/reference/command/createUser/ -.. _updateUser command: https://docs.mongodb.com/manual/reference/command/updateUser/ +.. _createUser command: https://mongodb.com/docs/manual/reference/command/createUser/ +.. _updateUser command: https://mongodb.com/docs/manual/reference/command/updateUser/ Database.remove_user is removed ............................... @@ -343,7 +343,7 @@ PyMongo 3.6. Use the `dropUser command`_ instead:: db.command("dropUser", "user") -.. _dropUser command: https://docs.mongodb.com/manual/reference/command/createUser/ +.. _dropUser command: https://mongodb.com/docs/manual/reference/command/createUser/ Database.profiling_level is removed ................................... @@ -358,7 +358,7 @@ Can be changed to this:: profile = db.command('profile', -1) level = profile['was'] -.. _profile command: https://docs.mongodb.com/manual/reference/command/profile/ +.. _profile command: https://mongodb.com/docs/manual/reference/command/profile/ Database.set_profiling_level is removed ....................................... @@ -384,7 +384,7 @@ Can be changed to this:: profiling_info = list(db['system.profile'].find()) -.. _'system.profile' collection: https://docs.mongodb.com/manual/reference/database-profiler/ +.. _'system.profile' collection: https://mongodb.com/docs/manual/reference/database-profiler/ Database.__bool__ raises NotImplementedError ............................................ @@ -542,10 +542,10 @@ Can be changed to this:: | | ``{'$geoWithin': {'$centerSphere': [[,], ]}}`` | +-------------+--------------------------------------------------------------+ -.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ -.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/ -.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/ -.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/ +.. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ +.. _$geoWithin: https://mongodb.com/docs/manual/reference/operator/query/geoWithin/ +.. _$center: https://mongodb.com/docs/manual/reference/operator/query/center/ +.. _$centerSphere: https://mongodb.com/docs/manual/reference/operator/query/centerSphere/ Collection.initialize_ordered_bulk_op and initialize_unordered_bulk_op is removed ................................................................................. @@ -600,7 +600,7 @@ deprecated in PyMongo 3.5. MongoDB 4.2 removed the `group command`_. Use :meth:`~pymongo.collection.Collection.aggregate` with the ``$group`` stage instead. -.. _group command: https://docs.mongodb.com/manual/reference/command/group/ +.. _group command: https://mongodb.com/docs/manual/reference/command/group/ Collection.map_reduce and Collection.inline_map_reduce are removed .................................................................. @@ -611,10 +611,10 @@ Migrate to :meth:`~pymongo.collection.Collection.aggregate` or run the `mapReduce command`_ directly with :meth:`~pymongo.database.Database.command` instead. For more guidance on this migration see: -- https://docs.mongodb.com/manual/reference/map-reduce-to-aggregation-pipeline/ -- https://docs.mongodb.com/manual/reference/aggregation-commands-comparison/ +- https://mongodb.com/docs/manual/reference/map-reduce-to-aggregation-pipeline/ +- https://mongodb.com/docs/manual/reference/aggregation-commands-comparison/ -.. _mapReduce command: https://docs.mongodb.com/manual/reference/command/mapReduce/ +.. _mapReduce command: https://mongodb.com/docs/manual/reference/command/mapReduce/ Collection.ensure_index is removed .................................. @@ -651,7 +651,7 @@ can be changed to this:: >>> result = database.command('reIndex', 'my_collection') -.. _reIndex command: https://docs.mongodb.com/manual/reference/command/reIndex/ +.. _reIndex command: https://mongodb.com/docs/manual/reference/command/reIndex/ The modifiers parameter is removed .................................. @@ -865,7 +865,7 @@ Removed :meth:`pymongo.mongo_client.MongoClient.close_cursor` and with :meth:`pymongo.cursor.Cursor.close` or :meth:`pymongo.command_cursor.CommandCursor.close`. -.. _killCursors command: https://docs.mongodb.com/manual/reference/command/killCursors/ +.. _killCursors command: https://mongodb.com/docs/manual/reference/command/killCursors/ Database.eval, Database.system_js, and SystemJS are removed ........................................................... @@ -902,7 +902,7 @@ Collection.parallel_scan is removed Removed :meth:`~pymongo.collection.Collection.parallel_scan`. MongoDB 4.2 removed the `parallelCollectionScan command`_. There is no replacement. -.. _parallelCollectionScan command: https://docs.mongodb.com/manual/reference/command/parallelCollectionScan/ +.. _parallelCollectionScan command: https://mongodb.com/docs/manual/reference/command/parallelCollectionScan/ pymongo.message helpers are removed ................................... diff --git a/doc/tutorial.rst b/doc/tutorial.rst index 2ec6c44da8..55961241e8 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -22,7 +22,7 @@ should run without raising an exception: This tutorial also assumes that a MongoDB instance is running on the default host and port. Assuming you have `downloaded and installed -`_ MongoDB, you +`_ MongoDB, you can start it like so: .. code-block:: bash @@ -56,7 +56,7 @@ Or use the MongoDB URI format: Getting a Database ------------------ A single instance of MongoDB can support multiple independent -`databases `_. When +`databases `_. When working with PyMongo you access databases using attribute style access on :class:`~pymongo.mongo_client.MongoClient` instances: @@ -74,7 +74,7 @@ instead: Getting a Collection -------------------- -A `collection `_ is a +A `collection `_ is a group of documents stored in MongoDB, and can be thought of as roughly the equivalent of a table in a relational database. Getting a collection in PyMongo works the same as getting a database: @@ -112,7 +112,7 @@ post: Note that documents can contain native Python types (like :class:`datetime.datetime` instances) which will be automatically converted to and from the appropriate `BSON -`_ types. +`_ types. .. todo:: link to table of Python <-> BSON types @@ -134,7 +134,7 @@ of ``"_id"`` must be unique across the collection. :meth:`~pymongo.collection.Collection.insert_one` returns an instance of :class:`~pymongo.results.InsertOneResult`. For more information on ``"_id"``, see the `documentation on _id -`_. +`_. After inserting the first document, the *posts* collection has actually been created on the server. We can verify this by listing all @@ -335,7 +335,7 @@ or just of those documents that match a specific query: Range Queries ------------- MongoDB supports many different types of `advanced queries -`_. As an +`_. As an example, lets perform a query where we limit results to posts older than a certain date, but also sort the results by author: @@ -366,7 +366,7 @@ Indexing Adding indexes can help accelerate certain queries and can also add additional functionality to querying and storing documents. In this example, we'll demonstrate how to create a `unique index -`_ on a key that rejects +`_ on a key that rejects documents whose value for that key already exists in the index. First, we'll need to create the index: @@ -404,4 +404,4 @@ the collection: Traceback (most recent call last): DuplicateKeyError: E11000 duplicate key error index: test_database.profiles.$user_id_1 dup key: { : 212 } -.. seealso:: The MongoDB documentation on `indexes `_ +.. seealso:: The MongoDB documentation on `indexes `_ diff --git a/pymongo/__init__.py b/pymongo/__init__.py index fd1309b6df..a47fd0a7b3 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -24,7 +24,7 @@ GEO2D = "2d" """Index specifier for a 2-dimensional `geospatial index`_. -.. _geospatial index: http://docs.mongodb.org/manual/core/2d/ +.. _geospatial index: http://mongodb.com/docs/manual/core/2d/ """ GEOSPHERE = "2dsphere" @@ -32,7 +32,7 @@ .. versionadded:: 2.5 -.. _spherical geospatial index: http://docs.mongodb.org/manual/core/2dsphere/ +.. _spherical geospatial index: http://mongodb.com/docs/manual/core/2dsphere/ """ HASHED = "hashed" @@ -40,7 +40,7 @@ .. versionadded:: 2.5 -.. _hashed index: http://docs.mongodb.org/manual/core/index-hashed/ +.. _hashed index: http://mongodb.com/docs/manual/core/index-hashed/ """ TEXT = "text" @@ -52,7 +52,7 @@ .. versionadded:: 2.7.1 -.. _text index: http://docs.mongodb.org/manual/core/index-text/ +.. _text index: http://mongodb.com/docs/manual/core/index-text/ """ version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, ".dev0") diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index db33999788..b4bce8da59 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -78,7 +78,7 @@ class ChangeStream(Generic[_DocumentType]): :meth:`pymongo.mongo_client.MongoClient.watch` instead. .. versionadded:: 3.6 - .. seealso:: The MongoDB documentation on `changeStreams `_. + .. seealso:: The MongoDB documentation on `changeStreams `_. """ def __init__( diff --git a/pymongo/collection.py b/pymongo/collection.py index d6e308b260..79b745d355 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1475,7 +1475,7 @@ def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: marks the final document position. If more data is received iteration of the cursor will continue from the last document received. For details, see the `tailable cursor documentation - `_. + `_. - :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result of this find call will be a tailable cursor with the await flag set. The server will wait for a few seconds after returning the @@ -1783,10 +1783,10 @@ def count_documents( .. versionadded:: 3.7 - .. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ - .. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/ - .. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/ - .. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/ + .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ + .. _$geoWithin: https://mongodb.com/docs/manual/reference/operator/query/geoWithin/ + .. _$center: https://mongodb.com/docs/manual/reference/operator/query/center/ + .. _$centerSphere: https://mongodb.com/docs/manual/reference/operator/query/centerSphere/ """ pipeline = [{"$match": filter}] if "skip" in kwargs: @@ -1857,7 +1857,7 @@ def create_indexes( when connected to MongoDB >= 3.4. .. versionadded:: 3.0 - .. _createIndexes: https://docs.mongodb.com/manual/reference/command/createIndexes/ + .. _createIndexes: https://mongodb.com/docs/manual/reference/command/createIndexes/ """ common.validate_list("indexes", indexes) if comment is not None: @@ -2012,7 +2012,7 @@ def create_index( .. seealso:: The MongoDB documentation on `indexes `_. - .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/ + .. _wildcard index: https://dochub.mongodb.org/core/index-wildcard/ """ cmd_options = {} if "maxTimeMS" in kwargs: @@ -2395,7 +2395,7 @@ def aggregate( .. seealso:: :doc:`/examples/aggregation` .. _aggregate command: - https://docs.mongodb.com/manual/reference/command/aggregate + https://mongodb.com/docs/manual/reference/command/aggregate """ with self.__database.client._tmp_session(session, close=False) as s: @@ -2563,7 +2563,7 @@ def watch( .. versionadded:: 3.6 - .. seealso:: The MongoDB documentation on `changeStreams `_. + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 350cc255bb..2a85f1d82a 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -223,7 +223,7 @@ def __init__( "use an explicit session with no_cursor_timeout=True " "otherwise the cursor may still timeout after " "30 minutes, for more info see " - "https://docs.mongodb.com/v4.4/reference/method/" + "https://mongodb.com/docs/v4.4/reference/method/" "cursor.noCursorTimeout/" "#session-idle-timeout-overrides-nocursortimeout", UserWarning, @@ -908,7 +908,7 @@ def explain(self) -> _DocumentType: .. note:: This method uses the default verbosity mode of the `explain command - `_, + `_, ``allPlansExecution``. To use a different verbosity use :meth:`~pymongo.database.Database.command` to run the explain command directly. @@ -961,7 +961,7 @@ def hint(self, index: Optional[_Hint]) -> "Cursor[_DocumentType]": def comment(self, comment: Any) -> "Cursor[_DocumentType]": """Adds a 'comment' to the cursor. - http://docs.mongodb.org/manual/reference/operator/comment/ + http://mongodb.com/docs/manual/reference/operator/comment/ :Parameters: - `comment`: A string to attach to the query to help interpret and @@ -1000,8 +1000,8 @@ def where(self, code: Union[str, Code]) -> "Cursor[_DocumentType]": :Parameters: - `code`: JavaScript expression to use as a filter - .. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ - .. _$where: https://docs.mongodb.com/manual/reference/operator/query/where/ + .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ + .. _$where: https://mongodb.com/docs/manual/reference/operator/query/where/ """ self.__check_okay_to_chain() if not isinstance(code, Code): @@ -1194,7 +1194,7 @@ def alive(self) -> bool: """Does this cursor have the potential to return more data? This is mostly useful with `tailable cursors - `_ + `_ since they will stop iterating even though they *may* return more results in the future. diff --git a/pymongo/database.py b/pymongo/database.py index b5770b0db9..2156a5e972 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -367,7 +367,7 @@ def create_collection( Added the codec_options, read_preference, and write_concern options. .. _create collection command: - https://docs.mongodb.com/manual/reference/command/create + https://mongodb.com/docs/manual/reference/command/create """ with self.__client._tmp_session(session) as s: # Skip this check in a transaction where listCollections is not @@ -448,10 +448,10 @@ def aggregate( .. versionadded:: 3.9 .. _aggregation pipeline: - https://docs.mongodb.com/manual/reference/operator/aggregation-pipeline + https://mongodb.com/docs/manual/reference/operator/aggregation-pipeline .. _aggregate command: - https://docs.mongodb.com/manual/reference/command/aggregate + https://mongodb.com/docs/manual/reference/command/aggregate """ with self.client._tmp_session(session, close=False) as s: cmd = _DatabaseAggregationCommand( @@ -563,7 +563,7 @@ def watch( .. versionadded:: 3.7 - .. seealso:: The MongoDB documentation on `changeStreams `_. + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst @@ -803,7 +803,7 @@ def list_collections( command. - `**kwargs` (optional): Optional parameters of the `listCollections command - `_ + `_ can be passed as keyword arguments to this method. The supported options differ by server version. @@ -849,7 +849,7 @@ def list_collection_names( command. - `**kwargs` (optional): Optional parameters of the `listCollections command - `_ + `_ can be passed as keyword arguments to this method. The supported options differ by server version. @@ -967,7 +967,7 @@ def validate_collection( .. versionchanged:: 3.6 Added ``session`` parameter. - .. _validate command: https://docs.mongodb.com/manual/reference/command/validate/ + .. _validate command: https://mongodb.com/docs/manual/reference/command/validate/ """ name = name_or_collection if isinstance(name, Collection): diff --git a/pymongo/message.py b/pymongo/message.py index 1fdf0ece35..de43d20c97 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -13,7 +13,7 @@ # limitations under the License. """Tools for creating `messages -`_ to be sent to +`_ to be sent to MongoDB. .. note:: This module is for internal use and is generally not needed by diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 8781cb1f01..1f1e4f725b 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -934,7 +934,7 @@ def watch( .. versionadded:: 3.7 - .. seealso:: The MongoDB documentation on `changeStreams `_. + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst @@ -1741,7 +1741,7 @@ def list_databases( command. - `**kwargs` (optional): Optional parameters of the `listDatabases command - `_ + `_ can be passed as keyword arguments to this method. The supported options differ by server version. diff --git a/pymongo/operations.py b/pymongo/operations.py index e528f2a2df..84e8bf4d35 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -488,7 +488,7 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: Added the ``partialFilterExpression`` option to support partial indexes. - .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/ + .. _wildcard index: https://mongodb.com/docs/master/core/index-wildcard/ """ keys = _index_list(keys) if "name" not in kwargs: diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 5ce2fbafcc..ccb635bec0 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -153,7 +153,7 @@ def tag_sets(self) -> _TagSets: until it finds a set of tags with at least one matching member. .. seealso:: `Data-Center Awareness - `_ + `_ """ return list(self.__tag_sets) if self.__tag_sets else [{}] diff --git a/test/unified_format.py b/test/unified_format.py index adfd0cac0a..378fcc4759 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -432,7 +432,7 @@ def get_lsid_for_session(self, session_name): BSON_TYPE_ALIAS_MAP = { - # https://docs.mongodb.com/manual/reference/operator/query/type/ + # https://mongodb.com/docs/manual/reference/operator/query/type/ # https://pymongo.readthedocs.io/en/stable/api/bson/index.html "double": (float,), "string": (str,), From ff288faf39f113650f09be999201992c1a44c67d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 13 Apr 2022 12:20:54 -0700 Subject: [PATCH 0130/1588] PYTHON-3221 Resync CSFLE spec tests (#929) --- .evergreen/resync-specs.sh | 6 ++++++ test/client-side-encryption/spec/badQueries.json | 4 ++-- test/client-side-encryption/spec/types.json | 12 ++++++------ test/test_encryption.py | 6 +----- 4 files changed, 15 insertions(+), 13 deletions(-) diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index 3042fd543b..af4228d081 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -81,6 +81,12 @@ do change*streams) cpjson change-streams/tests/ change_streams/ ;; + client-side-encryption|csfle|fle) + cpjson client-side-encryption/tests/ client-side-encryption/spec + cpjson client-side-encryption/corpus/ client-side-encryption/corpus + cpjson client-side-encryption/external/ client-side-encryption/external + cpjson client-side-encryption/limits/ client-side-encryption/limits + ;; cmap|CMAP) cpjson connection-monitoring-and-pooling/tests cmap rm $PYMONGO/test/cmap/wait-queue-fairness.json # PYTHON-1873 diff --git a/test/client-side-encryption/spec/badQueries.json b/test/client-side-encryption/spec/badQueries.json index 824a53c00b..4968307ba3 100644 --- a/test/client-side-encryption/spec/badQueries.json +++ b/test/client-side-encryption/spec/badQueries.json @@ -1318,7 +1318,7 @@ } }, "result": { - "errorContains": "Cannot encrypt element of type array" + "errorContains": "Cannot encrypt element of type" } } ] @@ -1387,7 +1387,7 @@ } }, "result": { - "errorContains": "Cannot encrypt element of type array" + "errorContains": "Cannot encrypt element of type" } } ] diff --git a/test/client-side-encryption/spec/types.json b/test/client-side-encryption/spec/types.json index a070f8bff7..a6c6507e90 100644 --- a/test/client-side-encryption/spec/types.json +++ b/test/client-side-encryption/spec/types.json @@ -504,7 +504,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: double" + "errorContains": "element of type: double" } } ] @@ -551,7 +551,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: decimal" + "errorContains": "element of type: decimal" } } ] @@ -883,7 +883,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: javascriptWithScope" + "errorContains": "element of type: javascriptWithScope" } } ] @@ -928,7 +928,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: object" + "errorContains": "element of type: object" } } ] @@ -1547,7 +1547,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: array" + "errorContains": "element of type: array" } } ] @@ -1592,7 +1592,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: bool" + "errorContains": "element of type: bool" } } ] diff --git a/test/test_encryption.py b/test/test_encryption.py index 987c02618f..ec854ff03a 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -815,11 +815,7 @@ def run_test(self, provider_name): self.assertEqual(encrypted_altname, encrypted) # Explicitly encrypting an auto encrypted field. - msg = ( - r"Cannot encrypt element of type binData because schema " - r"requires that type is one of: \[ string \]" - ) - with self.assertRaisesRegex(EncryptionError, msg): + with self.assertRaisesRegex(EncryptionError, r"encrypt element of type"): self.client_encrypted.db.coll.insert_one({"encrypted_placeholder": encrypted}) def test_data_key_local(self): From 868b3f77f38cefd37d518c76435b8081233cbee1 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 13 Apr 2022 20:03:37 +0000 Subject: [PATCH 0131/1588] PYTHON-3080 Add section to troubleshooting FAQ per driver with top SEO results (#918) --- doc/common-issues.rst | 98 +++++++++++++++++++++++++++++++++++++++++++ doc/examples/tls.rst | 1 + doc/index.rst | 4 ++ 3 files changed, 103 insertions(+) create mode 100644 doc/common-issues.rst diff --git a/doc/common-issues.rst b/doc/common-issues.rst new file mode 100644 index 0000000000..1571b985e0 --- /dev/null +++ b/doc/common-issues.rst @@ -0,0 +1,98 @@ +Frequently Encountered Issues +============================= + +Also see the :ref:`TLSErrors` section. + +.. contents:: + +Server reports wire version X, PyMongo requires Y +------------------------------------------------- + +When one attempts to connect to a <=3.4 version server, PyMongo will throw the following error:: + + >>> client.admin.command('ping') + ... + pymongo.errors.ConfigurationError: Server at localhost:27017 reports wire version 5, but this version of PyMongo requires at least 6 (MongoDB 3.6). + +This is caused by the driver being too new for the server it is being run against. +To resolve this issue either upgrade your database to version >= 3.6 or downgrade to PyMongo 3.x which supports MongoDB >= 2.6. + + +'Cursor' object has no attribute '_Cursor__killed' +-------------------------------------------------- + +On versions of PyMongo <3.9, when supplying invalid arguments the constructor of Cursor, +there will be a TypeError raised, and an AttributeError printed to ``stderr``. The AttributeError is not relevant, +instead look at the TypeError for debugging information:: + + >>> coll.find(wrong=1) + Exception ignored in: + ... + AttributeError: 'Cursor' object has no attribute '_Cursor__killed' + ... + TypeError: __init__() got an unexpected keyword argument 'wrong' + +To fix this, make sure that you are supplying the correct keyword arguments. +In addition, you can also upgrade to PyMongo >=3.9, which will remove the spurious error. + + +MongoClient fails ConfigurationError +------------------------------------ + +This is a common issue stemming from using incorrect keyword argument names. + + >>> client = MongoClient(wrong=1) + ... + pymongo.errors.ConfigurationError: Unknown option wrong + +To fix this, check your spelling and make sure that the keyword argument you are specifying exists. + + +DeprecationWarning: count is deprecated +--------------------------------------- + +PyMongo no longer supports :meth:`pymongo.cursor.count`. +Instead, use :meth:`pymongo.collection.count_documents`:: + + >>> client = MongoClient() + >>> d = datetime.datetime(2009, 11, 12, 12) + >>> list(client.db.coll.find({"date": {"$lt": d}}, limit=2)) + [{'_id': ObjectId('6247b058cebb8b179b7039f8'), 'date': datetime.datetime(1, 1, 1, 0, 0)}, {'_id': ObjectId('6247b059cebb8b179b7039f9'), 'date': datetime.datetime(1, 1, 1, 0, 0)}] + >>> client.db.coll.count_documents({"date": {"$lt": d}}, limit=2) + 2 + +Note that this is NOT the same as ``Cursor.count_documents`` (which does not exist), +this is a method of the Collection class, so you must call it on a collection object +or you will receive the following error:: + + >>> Cursor(MongoClient().db.coll).count() + Traceback (most recent call last): + File "", line 1, in + AttributeError: 'Cursor' object has no attribute 'count' + >>> + +Timeout when accessing MongoDB from PyMongo with tunneling +---------------------------------------------------------- + +When attempting to connect to a replica set MongoDB instance over an SSH tunnel you +will receive the following error:: + + File "/Library/Python/2.7/site-packages/pymongo/collection.py", line 1560, in count + return self._count(cmd, collation, session) + File "/Library/Python/2.7/site-packages/pymongo/collection.py", line 1504, in _count + with self._socket_for_reads() as (sock_info, slave_ok): + File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/contextlib.py", line 17, in __enter__ + return self.gen.next() + File "/Library/Python/2.7/site-packages/pymongo/mongo_client.py", line 982, in _socket_for_reads + server = topology.select_server(read_preference) + File "/Library/Python/2.7/site-packages/pymongo/topology.py", line 224, in select_server + address)) + File "/Library/Python/2.7/site-packages/pymongo/topology.py", line 183, in select_servers + selector, server_timeout, address) + File "/Library/Python/2.7/site-packages/pymongo/topology.py", line 199, in _select_servers_loop + self._error_message(selector)) + pymongo.errors.ServerSelectionTimeoutError: localhost:27017: timed out + +This is due to the fact that PyMongo discovers replica set members using the response from the isMaster command which +then contains the address and ports of the other members. However, these addresses and ports will not be accessible through the SSH tunnel. Thus, this behavior is unsupported. +You can, however, connect directly to a single MongoDB node using the directConnection=True option with SSH tunneling. diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 5a851e2530..6dcb7a1759 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -167,6 +167,7 @@ handshake will only fail in this case if the response indicates that the certificate is revoked. Invalid or malformed responses will be ignored, favoring availability over maximum security. +.. _TLSErrors: Troubleshooting TLS Errors .......................... diff --git a/doc/index.rst b/doc/index.rst index b6e510ad33..b43f5cf580 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -54,6 +54,9 @@ everything you need to know to use **PyMongo**. :doc:`developer/index` Developer guide for contributors to PyMongo. +:doc:`common-issues` + Common issues encountered when using PyMongo. + Getting Help ------------ If you're having trouble or have questions about PyMongo, ask your question on @@ -124,3 +127,4 @@ Indices and tables python3 migrate-to-pymongo4 developer/index + common-issues From 8b3eaafb40b751e8cc78db1152c480eedd1c4c0f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 13 Apr 2022 13:27:30 -0700 Subject: [PATCH 0132/1588] BUMP 4.1.1 --- doc/changelog.rst | 6 +++--- pymongo/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index eee3e4a81d..0fe2300120 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,9 +4,6 @@ Changelog Changes in Version 4.1.1 ------------------------- -Issues Resolved -............... - Version 4.1.1 fixes a number of bugs: - Fixed a memory leak bug when calling :func:`~bson.decode_all` without a @@ -19,6 +16,9 @@ Version 4.1.1 fixes a number of bugs: exception when attempting to parse a "mongodb+srv://" URI when the dnspython dependency was not installed (`PYTHON-3198`_). +Issues Resolved +............... + See the `PyMongo 4.1.1 release notes in JIRA`_ for the list of resolved issues in this release. diff --git a/pymongo/__init__.py b/pymongo/__init__.py index a47fd0a7b3..8a4288a996 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -55,7 +55,7 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, ".dev0") +version_tuple: Tuple[Union[int, str], ...] = (4, 1, 1) def get_version_string() -> str: diff --git a/setup.py b/setup.py index dff4678d4b..9d804a06c8 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.2.0.dev0" +version = "4.1.1" f = open("README.rst") try: From 109eaaff7b447e998c30b043d27346142905fc6e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 13 Apr 2022 13:28:42 -0700 Subject: [PATCH 0133/1588] BUMP 4.2.0.dev1 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 8a4288a996..17c640b1fd 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -55,7 +55,7 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 1, 1) +version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, ".dev1") def get_version_string() -> str: diff --git a/setup.py b/setup.py index 9d804a06c8..9e8e919e88 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.1.1" +version = "4.2.0.dev1" f = open("README.rst") try: From cfa2d990f056a815490883e1fa13b81371a00c20 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 18 Apr 2022 20:38:46 -0500 Subject: [PATCH 0134/1588] PYTHON-3228 _tmp_session should validate session input (#930) --- pymongo/mongo_client.py | 16 ++++++++++++++-- test/test_collection.py | 9 ++++++++- test/test_session.py | 2 +- 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 1f1e4f725b..5c7e7cb176 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -97,8 +97,16 @@ from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern if TYPE_CHECKING: + import sys + from pymongo.read_concern import ReadConcern + if sys.version_info[:2] >= (3, 9): + from collections.abc import Generator + else: + # Deprecated since version 3.9: collections.abc.Generator now supports []. + from typing import Generator + class MongoClient(common.BaseObject, Generic[_DocumentType]): """ @@ -1666,9 +1674,13 @@ def _ensure_session(self, session=None): return None @contextlib.contextmanager - def _tmp_session(self, session, close=True): + def _tmp_session( + self, session: Optional[client_session.ClientSession], close: bool = True + ) -> "Generator[Optional[client_session.ClientSession[Any]], None, None]": """If provided session is None, lend a temporary session.""" - if session: + if session is not None: + if not isinstance(session, client_session.ClientSession): + raise ValueError("'session' argument must be a ClientSession or None.") # Don't call end_session. yield session return diff --git a/test/test_collection.py b/test/test_collection.py index d1a3a6a980..bea2ed6ca6 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1538,6 +1538,13 @@ def test_aggregation_cursor_alive(self): self.assertTrue(cursor.alive) + def test_invalid_session_parameter(self): + def try_invalid_session(): + with self.db.test.aggregate([], {}): # type:ignore + pass + + self.assertRaisesRegex(ValueError, "must be a ClientSession", try_invalid_session) + def test_large_limit(self): db = self.db db.drop_collection("test_large_limit") @@ -2131,7 +2138,7 @@ def test_helpers_with_let(self): (c.update_one, ({}, {"$inc": {"x": 3}})), (c.find_one_and_delete, ({}, {})), (c.find_one_and_replace, ({}, {})), - (c.aggregate, ([], {})), + (c.aggregate, ([],)), ] for let in [10, "str", [], False]: for helper, args in helpers: diff --git a/test/test_session.py b/test/test_session.py index e6f15de6bf..f22a2d5eab 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -199,7 +199,7 @@ def test_implicit_sessions_checkout(self): (client.db.test.find_one_and_replace, [{}, {}]), (client.db.test.aggregate, [[{"$limit": 1}]]), (client.db.test.find, []), - (client.server_info, [{}]), + (client.server_info, []), (client.db.aggregate, [[{"$listLocalSessions": {}}, {"$limit": 1}]]), (cursor.distinct, ["_id"]), (client.db.list_collections, []), From fe057cf5776348e1c2f3132fd2a395be1679a7e4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 20 Apr 2022 12:01:26 -0700 Subject: [PATCH 0135/1588] PYTHON-3220 Add CSFLE spec test for auto encryption on a collection with no jsonSchema --- .../client-side-encryption/spec/noSchema.json | 67 +++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 test/client-side-encryption/spec/noSchema.json diff --git a/test/client-side-encryption/spec/noSchema.json b/test/client-side-encryption/spec/noSchema.json new file mode 100644 index 0000000000..095434f886 --- /dev/null +++ b/test/client-side-encryption/spec/noSchema.json @@ -0,0 +1,67 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "unencrypted", + "tests": [ + { + "description": "Insert on an unencrypted collection", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "unencrypted" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "insert": "unencrypted", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + } + ] + } + } + } + ] +} From a2606cfc03b73aba54dfe22eab20df989da0b077 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 22 Apr 2022 13:44:14 -0500 Subject: [PATCH 0136/1588] PYTHON-3194 Adopt doc8 checker (#931) --- .pre-commit-config.yaml | 10 ++++++++++ CONTRIBUTING.rst | 6 +++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8b6671d41d..1fd86e0926 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,6 +40,7 @@ repos: 'flake8-logging-format==0.6.0', 'flake8-implicit-str-concat==0.2.0', ] + stages: [manual] # We use the Python version instead of the original version which seems to require Docker # https://github.com/koalaman/shellcheck-precommit @@ -49,6 +50,14 @@ repos: - id: shellcheck name: shellcheck args: ["--severity=warning"] + stages: [manual] + +- repo: https://github.com/PyCQA/doc8 + rev: 0.11.1 + hooks: + - id: doc8 + args: [--max-line-length=200] + stages: [manual] - repo: https://github.com/sirosen/check-jsonschema rev: 0.14.1 @@ -58,6 +67,7 @@ repos: files: ^\.github/workflows/ types: [yaml] args: ["--schemafile", "https://json.schemastore.org/github-workflow"] + stages: [manual] - repo: https://github.com/ariebovenberg/slotscheck rev: v0.14.0 diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index bbc22954a0..1a4423f3ef 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -55,6 +55,10 @@ To run ``pre-commit`` manually, run:: pre-commit run --all-files +To run a manual hook like `flake8` manually, run:: + + pre-commit run --all-files --hook-stage manual flake8 + Documentation ------------- @@ -67,7 +71,7 @@ You might also use the GitHub `Edit Date: Thu, 28 Apr 2022 11:07:46 -0500 Subject: [PATCH 0137/1588] PYTHON-3243 Pin version of mypy used (#932) --- .github/workflows/test-python.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index ba9b99e06b..1eea4ff166 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -39,7 +39,7 @@ jobs: mongodb-version: 4.4 - name: Run tests run: | - pip install mypy + pip install mypy==0.942 python setup.py test mypytest: @@ -59,7 +59,7 @@ jobs: cache-dependency-path: 'setup.py' - name: Install dependencies run: | - python -m pip install -U pip mypy + python -m pip install -U pip mypy==0.942 pip install -e ".[zstd, srv]" - name: Run mypy run: | From 05b55e88dfa1636511eeeac56d4a75593a76fbeb Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 2 May 2022 06:21:44 -0700 Subject: [PATCH 0138/1588] PYTHON-3038 The doc should clarify that the resulting documents that are produced with upserts are constructed from both the filter and the update params (#933) --- pymongo/collection.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pymongo/collection.py b/pymongo/collection.py index 79b745d355..1d0eb1035e 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -952,6 +952,19 @@ def update_one( {'x': 1, '_id': 1} {'x': 1, '_id': 2} + If ``upsert=True`` and no documents match the filter, create a + new document based on the filter criteria and update modifications. + + >>> result = db.test.update_one({'x': -10}, {'$inc': {'x': 3}}, upsert=True) + >>> result.matched_count + 0 + >>> result.modified_count + 0 + >>> result.upserted_id + ObjectId('626a678eeaa80587d4bb3fb7') + >>> db.test.find_one(result.upserted_id) + {'_id': ObjectId('626a678eeaa80587d4bb3fb7'), 'x': -7} + :Parameters: - `filter`: A query that matches the document to update. - `update`: The modifications to apply. From 6e4e90a882e64274d7b3ce44971c83e5a0dbeb58 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 2 May 2022 16:32:05 -0700 Subject: [PATCH 0139/1588] PYTHON-3167 Revert to using the count command for estimated_document_count (#934) Resolves PYTHON-2885, PYTHON-3166, PYTHON-3224, and PYTHON-3219. --- .evergreen/resync-specs.sh | 42 +- doc/changelog.rst | 31 + pymongo/collection.py | 29 +- .../timeseries-collection.json | 16 - test/crud/unified/aggregate-allowdiskuse.json | 155 +++ test/crud/unified/bulkWrite-comment.json | 25 + .../unified/bulkWrite-replaceOne-let.json | 12 + test/crud/unified/countDocuments-comment.json | 208 ++++ test/crud/unified/deleteMany-comment.json | 1 + test/crud/unified/deleteOne-comment.json | 1 + .../estimatedDocumentCount-comment.json | 170 ++++ test/crud/unified/estimatedDocumentCount.json | 371 ++----- test/crud/unified/find-allowdiskuse.json | 4 +- test/crud/unified/insertMany-comment.json | 1 + test/crud/unified/insertOne-comment.json | 1 + test/crud/unified/replaceOne-comment.json | 19 + test/crud/unified/replaceOne-let.json | 12 + test/crud/unified/updateMany-comment.json | 16 +- test/crud/unified/updateMany-let.json | 10 +- test/crud/unified/updateOne-comment.json | 19 + test/crud/unified/updateOne-let.json | 16 +- test/data_lake/estimatedDocumentCount.json | 19 +- .../legacy/estimatedDocumentCount-4.9.json | 246 ----- ...timatedDocumentCount-serverErrors-4.9.json | 911 ------------------ ... estimatedDocumentCount-serverErrors.json} | 2 - ...re4.9.json => estimatedDocumentCount.json} | 2 - test/unified_format.py | 17 +- .../crud-api-version-1-strict.json | 26 +- test/versioned-api/crud-api-version-1.json | 28 +- 29 files changed, 847 insertions(+), 1563 deletions(-) create mode 100644 test/crud/unified/aggregate-allowdiskuse.json create mode 100644 test/crud/unified/countDocuments-comment.json create mode 100644 test/crud/unified/estimatedDocumentCount-comment.json delete mode 100644 test/retryable_reads/legacy/estimatedDocumentCount-4.9.json delete mode 100644 test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-4.9.json rename test/retryable_reads/legacy/{estimatedDocumentCount-serverErrors-pre4.9.json => estimatedDocumentCount-serverErrors.json} (99%) rename test/retryable_reads/legacy/{estimatedDocumentCount-pre4.9.json => estimatedDocumentCount.json} (97%) diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index af4228d081..a98b091d59 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -68,17 +68,24 @@ cpjson () { for spec in "$@" do + # Match the spec dir name, the python test dir name, and/or common abbreviations. case "$spec" in - bson*corpus) + atlas-data-lake-testing|data_lake) + cpjson atlas-data-lake-testing/tests/ data_lake + ;; + bson-corpus|bson_corpus) cpjson bson-corpus/tests/ bson_corpus ;; - max*staleness) + max-staleness|max_staleness) cpjson max-staleness/tests/ max_staleness ;; - connection*string) + collection-management|collection_management) + cpjson collection-management/tests/ collection_management + ;; + connection-string|connection_string) cpjson connection-string/tests/ connection_string/test ;; - change*streams) + change-streams|change_streams) cpjson change-streams/tests/ change_streams/ ;; client-side-encryption|csfle|fle) @@ -87,32 +94,29 @@ do cpjson client-side-encryption/external/ client-side-encryption/external cpjson client-side-encryption/limits/ client-side-encryption/limits ;; - cmap|CMAP) + cmap|CMAP|connection-monitoring-and-pooling) cpjson connection-monitoring-and-pooling/tests cmap rm $PYMONGO/test/cmap/wait-queue-fairness.json # PYTHON-1873 ;; - command*monitoring) + apm|APM|command-monitoring|command_monitoring) cpjson command-monitoring/tests command_monitoring ;; crud|CRUD) cpjson crud/tests/ crud ;; - load*balancer) + load-balancers|load_balancer) cpjson load-balancers/tests load_balancer ;; - initial-dns-seedlist-discovery|srv_seedlist) + srv|SRV|initial-dns-seedlist-discovery|srv_seedlist) cpjson initial-dns-seedlist-discovery/tests/ srv_seedlist ;; - old_srv_seedlist) - cpjson initial-dns-seedlist-discovery/tests srv_seedlist - ;; - retryable*reads) + retryable-reads|retryable_reads) cpjson retryable-reads/tests/ retryable_reads ;; - retryable*writes) + retryable-writes|retryable_writes) cpjson retryable-writes/tests/ retryable_writes ;; - sdam|SDAM) + sdam|SDAM|server-discovery-and-monitoring|discovery_and_monitoring) cpjson server-discovery-and-monitoring/tests/errors \ discovery_and_monitoring/errors cpjson server-discovery-and-monitoring/tests/rs \ @@ -126,10 +130,10 @@ do cpjson server-discovery-and-monitoring/tests/load-balanced \ discovery_and_monitoring/load-balanced ;; - sdam*monitoring) + sdam-monitoring|sdam_monitoring) cpjson server-discovery-and-monitoring/tests/monitoring sdam_monitoring ;; - server*selection) + server-selection|server_selection) cpjson server-selection/tests/ server_selection ;; sessions) @@ -140,13 +144,13 @@ do cpjson transactions-convenient-api/tests/ transactions-convenient-api rm $PYMONGO/test/transactions/legacy/errors-client.json # PYTHON-1894 ;; - unified) + unified|unified-test-format) cpjson unified-test-format/tests/ unified-test-format/ ;; - uri|uri*options) + uri|uri-options|uri_options) cpjson uri-options/tests uri_options ;; - stable-api) + stable-api|versioned-api) cpjson versioned-api/tests versioned-api ;; *) diff --git a/doc/changelog.rst b/doc/changelog.rst index 0fe2300120..3d2f7cadc4 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,37 @@ Changelog ========= +Changes in Version 4.2 +---------------------- + +Bug fixes +......... + +- Fixed a bug where :meth:`~pymongo.collection.Collection.estimated_document_count` + would fail with a "CommandNotSupportedOnView" error on views (`PYTHON-2885`_). + +Unavoidable breaking changes +............................ + +- :meth:`~pymongo.collection.Collection.estimated_document_count` now always uses + the `count`_ command. Due to an oversight in versions 5.0.0-5.0.8 of MongoDB, + the count command was not included in V1 of the :ref:`versioned-api-ref`. + Users of the Stable API with estimated_document_count are recommended to upgrade + their server version to 5.0.9+ or set :attr:`pymongo.server_api.ServerApi.strict` + to ``False`` to avoid encountering errors (`PYTHON-3167`_). + +.. _count: https://mongodb.com/docs/manual/reference/command/count/ + +Issues Resolved +............... + +See the `PyMongo 4.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-2885: https://jira.mongodb.org/browse/PYTHON-2885 +.. _PYTHON-3167: https://jira.mongodb.org/browse/PYTHON-3167 +.. _PyMongo 4.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33196 + Changes in Version 4.1.1 ------------------------- diff --git a/pymongo/collection.py b/pymongo/collection.py index 1d0eb1035e..0197198108 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1707,8 +1707,15 @@ def estimated_document_count(self, comment: Optional[Any] = None, **kwargs: Any) command. - `**kwargs` (optional): See list of options above. + .. versionchanged:: 4.2 + This method now always uses the `count`_ command. Due to an oversight in versions + 5.0.0-5.0.8 of MongoDB, the count command was not included in V1 of the + :ref:`versioned-api-ref`. Users of the Stable API with estimated_document_count are + recommended to upgrade their server version to 5.0.9+ or set + :attr:`pymongo.server_api.ServerApi.strict` to ``False`` to avoid encountering errors. .. versionadded:: 3.7 + .. _count: https://mongodb.com/docs/manual/reference/command/count/ """ if "session" in kwargs: raise ConfigurationError("estimated_document_count does not support sessions") @@ -1716,25 +1723,9 @@ def estimated_document_count(self, comment: Optional[Any] = None, **kwargs: Any) kwargs["comment"] = comment def _cmd(session, server, sock_info, read_preference): - if sock_info.max_wire_version >= 12: - # MongoDB 4.9+ - pipeline = [ - {"$collStats": {"count": {}}}, - {"$group": {"_id": 1, "n": {"$sum": "$count"}}}, - ] - cmd = SON([("aggregate", self.__name), ("pipeline", pipeline), ("cursor", {})]) - cmd.update(kwargs) - result = self._aggregate_one_result( - sock_info, read_preference, cmd, collation=None, session=session - ) - if not result: - return 0 - return int(result["n"]) - else: - # MongoDB < 4.9 - cmd = SON([("count", self.__name)]) - cmd.update(kwargs) - return self._count_cmd(session, sock_info, read_preference, cmd, collation=None) + cmd = SON([("count", self.__name)]) + cmd.update(kwargs) + return self._count_cmd(session, sock_info, read_preference, cmd, collation=None) return self._retryable_non_cursor_read(_cmd, None) diff --git a/test/collection_management/timeseries-collection.json b/test/collection_management/timeseries-collection.json index 99f642e597..b5638fd36e 100644 --- a/test/collection_management/timeseries-collection.json +++ b/test/collection_management/timeseries-collection.json @@ -82,14 +82,6 @@ "databaseName": "ts-tests" } }, - { - "commandStartedEvent": { - "command": { - "listCollections": 1 - }, - "databaseName": "ts-tests" - } - }, { "commandStartedEvent": { "command": { @@ -204,14 +196,6 @@ "databaseName": "ts-tests" } }, - { - "commandStartedEvent": { - "command": { - "listCollections": 1 - }, - "databaseName": "ts-tests" - } - }, { "commandStartedEvent": { "command": { diff --git a/test/crud/unified/aggregate-allowdiskuse.json b/test/crud/unified/aggregate-allowdiskuse.json new file mode 100644 index 0000000000..2e54175b8a --- /dev/null +++ b/test/crud/unified/aggregate-allowdiskuse.json @@ -0,0 +1,155 @@ +{ + "description": "aggregate-allowdiskuse", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Aggregate does not send allowDiskUse when value is not specified", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": {} + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": { + "$$exists": false + } + }, + "commandName": "aggregate", + "databaseName": "crud-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate sends allowDiskUse false when false is specified", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": false + }, + "commandName": "aggregate", + "databaseName": "crud-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate sends allowDiskUse true when true is specified", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": true + }, + "commandName": "aggregate", + "databaseName": "crud-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-comment.json b/test/crud/unified/bulkWrite-comment.json index fac9644543..0b2addc850 100644 --- a/test/crud/unified/bulkWrite-comment.json +++ b/test/crud/unified/bulkWrite-comment.json @@ -150,6 +150,12 @@ "u": { "_id": 1, "x": "replaced" + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } }, { @@ -160,6 +166,12 @@ "$set": { "x": "updated" } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -317,6 +329,12 @@ "u": { "_id": 1, "x": "replaced" + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } }, { @@ -327,6 +345,12 @@ "$set": { "x": "updated" } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -388,6 +412,7 @@ "description": "BulkWrite with comment - pre 4.4", "runOnRequirements": [ { + "minServerVersion": "3.4.0", "maxServerVersion": "4.2.99" } ], diff --git a/test/crud/unified/bulkWrite-replaceOne-let.json b/test/crud/unified/bulkWrite-replaceOne-let.json index df4eafe62f..70f63837a8 100644 --- a/test/crud/unified/bulkWrite-replaceOne-let.json +++ b/test/crud/unified/bulkWrite-replaceOne-let.json @@ -95,6 +95,12 @@ }, "u": { "x": 3 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -183,6 +189,12 @@ }, "u": { "x": 3 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], diff --git a/test/crud/unified/countDocuments-comment.json b/test/crud/unified/countDocuments-comment.json new file mode 100644 index 0000000000..e6c7ae8170 --- /dev/null +++ b/test/crud/unified/countDocuments-comment.json @@ -0,0 +1,208 @@ +{ + "description": "countDocuments-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "countDocuments-comments-test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "countDocuments-comments-test", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "countDocuments with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "comment": { + "key": "value" + } + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "comment": { + "key": "value" + } + }, + "commandName": "aggregate", + "databaseName": "countDocuments-comments-test" + } + } + ] + } + ] + }, + { + "description": "countDocuments with string comment", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "comment": "comment" + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "comment": "comment" + }, + "commandName": "aggregate", + "databaseName": "countDocuments-comments-test" + } + } + ] + } + ] + }, + { + "description": "countDocuments with document comment on less than 4.4.0 - server error", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.3.99" + } + ], + "operations": [ + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "comment": { + "key": "value" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "comment": { + "key": "value" + } + }, + "commandName": "aggregate", + "databaseName": "countDocuments-comments-test" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-comment.json b/test/crud/unified/deleteMany-comment.json index ea6a8524d9..6abc5fd58a 100644 --- a/test/crud/unified/deleteMany-comment.json +++ b/test/crud/unified/deleteMany-comment.json @@ -175,6 +175,7 @@ "description": "deleteMany with comment - pre 4.4", "runOnRequirements": [ { + "minServerVersion": "3.4.0", "maxServerVersion": "4.2.99" } ], diff --git a/test/crud/unified/deleteOne-comment.json b/test/crud/unified/deleteOne-comment.json index 37f356ec6f..0f42b086a3 100644 --- a/test/crud/unified/deleteOne-comment.json +++ b/test/crud/unified/deleteOne-comment.json @@ -177,6 +177,7 @@ "description": "deleteOne with comment - pre 4.4", "runOnRequirements": [ { + "minServerVersion": "3.4.0", "maxServerVersion": "4.2.99" } ], diff --git a/test/crud/unified/estimatedDocumentCount-comment.json b/test/crud/unified/estimatedDocumentCount-comment.json new file mode 100644 index 0000000000..6c0adacc8f --- /dev/null +++ b/test/crud/unified/estimatedDocumentCount-comment.json @@ -0,0 +1,170 @@ +{ + "description": "estimatedDocumentCount-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "edc-comment-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "edc-comment-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "estimatedDocumentCount with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.14" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "arguments": { + "comment": { + "key": "value" + } + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0", + "comment": { + "key": "value" + } + }, + "commandName": "count", + "databaseName": "edc-comment-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "arguments": { + "comment": "comment" + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0", + "comment": "comment" + }, + "commandName": "count", + "databaseName": "edc-comment-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount with document comment - pre 4.4.14, server error", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.13", + "topologies": [ + "single", + "replicaset" + ] + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "arguments": { + "comment": { + "key": "value" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0", + "comment": { + "key": "value" + } + }, + "commandName": "count", + "databaseName": "edc-comment-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/estimatedDocumentCount.json b/test/crud/unified/estimatedDocumentCount.json index bcd66ea954..1b650c1cb6 100644 --- a/test/crud/unified/estimatedDocumentCount.json +++ b/test/crud/unified/estimatedDocumentCount.json @@ -34,6 +34,13 @@ "database": "database0", "collectionName": "coll1" } + }, + { + "collection": { + "id": "collection0View", + "database": "database0", + "collectionName": "coll0view" + } } ], "initialData": [ @@ -58,12 +65,7 @@ ], "tests": [ { - "description": "estimatedDocumentCount uses $collStats on 4.9.0 or greater", - "runOnRequirements": [ - { - "minServerVersion": "4.9.0" - } - ], + "description": "estimatedDocumentCount always uses count", "operations": [ { "name": "estimatedDocumentCount", @@ -78,24 +80,9 @@ { "commandStartedEvent": { "command": { - "aggregate": "coll0", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] + "count": "coll0" }, - "commandName": "aggregate", + "commandName": "count", "databaseName": "edc-tests" } } @@ -104,12 +91,7 @@ ] }, { - "description": "estimatedDocumentCount with maxTimeMS on 4.9.0 or greater", - "runOnRequirements": [ - { - "minServerVersion": "4.9.0" - } - ], + "description": "estimatedDocumentCount with maxTimeMS", "operations": [ { "name": "estimatedDocumentCount", @@ -127,25 +109,10 @@ { "commandStartedEvent": { "command": { - "aggregate": "coll0", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ], + "count": "coll0", "maxTimeMS": 6000 }, - "commandName": "aggregate", + "commandName": "count", "databaseName": "edc-tests" } } @@ -154,12 +121,7 @@ ] }, { - "description": "estimatedDocumentCount on non-existent collection on 4.9.0 or greater", - "runOnRequirements": [ - { - "minServerVersion": "4.9.0" - } - ], + "description": "estimatedDocumentCount on non-existent collection", "operations": [ { "name": "estimatedDocumentCount", @@ -174,24 +136,9 @@ { "commandStartedEvent": { "command": { - "aggregate": "coll1", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] + "count": "coll1" }, - "commandName": "aggregate", + "commandName": "count", "databaseName": "edc-tests" } } @@ -200,78 +147,21 @@ ] }, { - "description": "estimatedDocumentCount errors correctly on 4.9.0 or greater--command error", + "description": "estimatedDocumentCount errors correctly--command error", "runOnRequirements": [ { - "minServerVersion": "4.9.0" - } - ], - "operations": [ - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "client0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 8 - } - } - } + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] }, { - "name": "estimatedDocumentCount", - "object": "collection0", - "expectError": { - "errorCode": 8 - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "aggregate": "coll0", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "commandName": "aggregate", - "databaseName": "edc-tests" - } - } + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" ] } - ] - }, - { - "description": "estimatedDocumentCount errors correctly on 4.9.0 or greater--socket error", - "runOnRequirements": [ - { - "minServerVersion": "4.9.0" - } ], "operations": [ { @@ -286,9 +176,9 @@ }, "data": { "failCommands": [ - "aggregate" + "count" ], - "closeConnection": true + "errorCode": 8 } } } @@ -297,56 +187,10 @@ "name": "estimatedDocumentCount", "object": "collection0", "expectError": { - "isError": true + "errorCode": 8 } } ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "aggregate": "coll0", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "commandName": "aggregate", - "databaseName": "edc-tests" - } - } - ] - } - ] - }, - { - "description": "estimatedDocumentCount uses count on less than 4.9.0", - "runOnRequirements": [ - { - "maxServerVersion": "4.8.99" - } - ], - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection0", - "expectResult": 3 - } - ], "expectEvents": [ { "client": "client0", @@ -365,77 +209,10 @@ ] }, { - "description": "estimatedDocumentCount with maxTimeMS on less than 4.9.0", - "runOnRequirements": [ - { - "maxServerVersion": "4.8.99" - } - ], - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection0", - "arguments": { - "maxTimeMS": 6000 - }, - "expectResult": 3 - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "count": "coll0", - "maxTimeMS": 6000 - }, - "commandName": "count", - "databaseName": "edc-tests" - } - } - ] - } - ] - }, - { - "description": "estimatedDocumentCount on non-existent collection on less than 4.9.0", - "runOnRequirements": [ - { - "maxServerVersion": "4.8.99" - } - ], - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection1", - "expectResult": 0 - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "count": "coll1" - }, - "commandName": "count", - "databaseName": "edc-tests" - } - } - ] - } - ] - }, - { - "description": "estimatedDocumentCount errors correctly on less than 4.9.0--command error", + "description": "estimatedDocumentCount errors correctly--socket error", "runOnRequirements": [ { "minServerVersion": "4.0.0", - "maxServerVersion": "4.8.99", "topologies": [ "single", "replicaset" @@ -443,7 +220,6 @@ }, { "minServerVersion": "4.2.0", - "maxServerVersion": "4.8.99", "topologies": [ "sharded" ] @@ -464,7 +240,7 @@ "failCommands": [ "count" ], - "errorCode": 8 + "closeConnection": true } } } @@ -473,7 +249,7 @@ "name": "estimatedDocumentCount", "object": "collection0", "expectError": { - "errorCode": 8 + "isError": true } } ], @@ -495,50 +271,41 @@ ] }, { - "description": "estimatedDocumentCount errors correctly on less than 4.9.0--socket error", + "description": "estimatedDocumentCount works correctly on views", "runOnRequirements": [ { - "minServerVersion": "4.0.0", - "maxServerVersion": "4.8.99", - "topologies": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.2.0", - "maxServerVersion": "4.8.99", - "topologies": [ - "sharded" - ] + "minServerVersion": "3.4.0" } ], "operations": [ { - "name": "failPoint", - "object": "testRunner", + "name": "dropCollection", + "object": "database0", "arguments": { - "client": "client0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "closeConnection": true + "collection": "coll0view" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "coll0view", + "viewOn": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } } - } + ] } }, { "name": "estimatedDocumentCount", - "object": "collection0", - "expectError": { - "isError": true - } + "object": "collection0View", + "expectResult": 2 } ], "expectEvents": [ @@ -548,7 +315,35 @@ { "commandStartedEvent": { "command": { - "count": "coll0" + "drop": "coll0view" + }, + "commandName": "drop", + "databaseName": "edc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "coll0view", + "viewOn": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ] + }, + "commandName": "create", + "databaseName": "edc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll0view" }, "commandName": "count", "databaseName": "edc-tests" diff --git a/test/crud/unified/find-allowdiskuse.json b/test/crud/unified/find-allowdiskuse.json index 789bb7fbf1..eb238ab93a 100644 --- a/test/crud/unified/find-allowdiskuse.json +++ b/test/crud/unified/find-allowdiskuse.json @@ -32,7 +32,7 @@ ], "tests": [ { - "description": "Find does not send allowDiskuse when value is not specified", + "description": "Find does not send allowDiskUse when value is not specified", "operations": [ { "object": "collection0", @@ -61,7 +61,7 @@ ] }, { - "description": "Find sends allowDiskuse false when false is specified", + "description": "Find sends allowDiskUse false when false is specified", "operations": [ { "object": "collection0", diff --git a/test/crud/unified/insertMany-comment.json b/test/crud/unified/insertMany-comment.json index 7e835e8011..2b4c80b3f0 100644 --- a/test/crud/unified/insertMany-comment.json +++ b/test/crud/unified/insertMany-comment.json @@ -166,6 +166,7 @@ "description": "insertMany with comment - pre 4.4", "runOnRequirements": [ { + "minServerVersion": "3.4.0", "maxServerVersion": "4.2.99" } ], diff --git a/test/crud/unified/insertOne-comment.json b/test/crud/unified/insertOne-comment.json index a9f735ab6c..dbd83d9f64 100644 --- a/test/crud/unified/insertOne-comment.json +++ b/test/crud/unified/insertOne-comment.json @@ -162,6 +162,7 @@ "description": "insertOne with comment - pre 4.4", "runOnRequirements": [ { + "minServerVersion": "3.4.0", "maxServerVersion": "4.2.99" } ], diff --git a/test/crud/unified/replaceOne-comment.json b/test/crud/unified/replaceOne-comment.json index 02fe90a44d..88bee5d7b7 100644 --- a/test/crud/unified/replaceOne-comment.json +++ b/test/crud/unified/replaceOne-comment.json @@ -75,6 +75,12 @@ }, "u": { "x": 22 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -137,6 +143,12 @@ }, "u": { "x": 22 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -166,6 +178,7 @@ "description": "ReplaceOne with comment - pre 4.4", "runOnRequirements": [ { + "minServerVersion": "3.4.0", "maxServerVersion": "4.2.99" } ], @@ -202,6 +215,12 @@ }, "u": { "x": 22 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], diff --git a/test/crud/unified/replaceOne-let.json b/test/crud/unified/replaceOne-let.json index 6cf8e15675..e7a7ee65a5 100644 --- a/test/crud/unified/replaceOne-let.json +++ b/test/crud/unified/replaceOne-let.json @@ -94,6 +94,12 @@ }, "u": { "x": "foo" + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -176,6 +182,12 @@ }, "u": { "x": "foo" + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], diff --git a/test/crud/unified/updateMany-comment.json b/test/crud/unified/updateMany-comment.json index 26abd92ed4..88b8b67f5a 100644 --- a/test/crud/unified/updateMany-comment.json +++ b/test/crud/unified/updateMany-comment.json @@ -80,7 +80,10 @@ "x": 22 } }, - "multi": true + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } } ], "comment": "comment" @@ -147,7 +150,10 @@ "x": 22 } }, - "multi": true + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } } ], "comment": { @@ -176,6 +182,7 @@ "description": "UpdateMany with comment - pre 4.4", "runOnRequirements": [ { + "minServerVersion": "3.4.0", "maxServerVersion": "4.2.99" } ], @@ -217,7 +224,10 @@ "x": 22 } }, - "multi": true + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } } ], "comment": "comment" diff --git a/test/crud/unified/updateMany-let.json b/test/crud/unified/updateMany-let.json index 8a19ac0933..cff3bd4c79 100644 --- a/test/crud/unified/updateMany-let.json +++ b/test/crud/unified/updateMany-let.json @@ -114,7 +114,10 @@ } } ], - "multi": true + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } } ], "let": { @@ -207,7 +210,10 @@ } } ], - "multi": true + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } } ], "let": { diff --git a/test/crud/unified/updateOne-comment.json b/test/crud/unified/updateOne-comment.json index 9b3b71d395..f4ee74db38 100644 --- a/test/crud/unified/updateOne-comment.json +++ b/test/crud/unified/updateOne-comment.json @@ -79,6 +79,12 @@ "$set": { "x": 22 } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -145,6 +151,12 @@ "$set": { "x": 22 } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -174,6 +186,7 @@ "description": "UpdateOne with comment - pre 4.4", "runOnRequirements": [ { + "minServerVersion": "3.4.0", "maxServerVersion": "4.2.99" } ], @@ -214,6 +227,12 @@ "$set": { "x": 22 } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], diff --git a/test/crud/unified/updateOne-let.json b/test/crud/unified/updateOne-let.json index 8237bef7e8..e43b979358 100644 --- a/test/crud/unified/updateOne-let.json +++ b/test/crud/unified/updateOne-let.json @@ -103,7 +103,13 @@ "x": "$$x" } } - ] + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } } ], "let": { @@ -184,7 +190,13 @@ "x": "$$x" } } - ] + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } } ], "let": { diff --git a/test/data_lake/estimatedDocumentCount.json b/test/data_lake/estimatedDocumentCount.json index 87b385208d..997a3ab3fc 100644 --- a/test/data_lake/estimatedDocumentCount.json +++ b/test/data_lake/estimatedDocumentCount.json @@ -15,24 +15,9 @@ { "command_started_event": { "command": { - "aggregate": "driverdata", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] + "count": "driverdata" }, - "command_name": "aggregate", + "command_name": "count", "database_name": "test" } } diff --git a/test/retryable_reads/legacy/estimatedDocumentCount-4.9.json b/test/retryable_reads/legacy/estimatedDocumentCount-4.9.json deleted file mode 100644 index a4c46fc074..0000000000 --- a/test/retryable_reads/legacy/estimatedDocumentCount-4.9.json +++ /dev/null @@ -1,246 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.9.0" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "EstimatedDocumentCount succeeds on first attempt", - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-4.9.json b/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-4.9.json deleted file mode 100644 index 756b02b3a8..0000000000 --- a/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-4.9.json +++ /dev/null @@ -1,911 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.9.0" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "EstimatedDocumentCount succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-pre4.9.json b/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors.json similarity index 99% rename from test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-pre4.9.json rename to test/retryable_reads/legacy/estimatedDocumentCount-serverErrors.json index 0b9a2615d1..6bb128f5f3 100644 --- a/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-pre4.9.json +++ b/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "4.0", - "maxServerVersion": "4.8.99", "topology": [ "single", "replicaset" @@ -10,7 +9,6 @@ }, { "minServerVersion": "4.1.7", - "maxServerVersion": "4.8.99", "topology": [ "sharded" ] diff --git a/test/retryable_reads/legacy/estimatedDocumentCount-pre4.9.json b/test/retryable_reads/legacy/estimatedDocumentCount.json similarity index 97% rename from test/retryable_reads/legacy/estimatedDocumentCount-pre4.9.json rename to test/retryable_reads/legacy/estimatedDocumentCount.json index 44be966ae7..8dfa15a2cd 100644 --- a/test/retryable_reads/legacy/estimatedDocumentCount-pre4.9.json +++ b/test/retryable_reads/legacy/estimatedDocumentCount.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "4.0", - "maxServerVersion": "4.8.99", "topology": [ "single", "replicaset" @@ -10,7 +9,6 @@ }, { "minServerVersion": "4.1.7", - "maxServerVersion": "4.8.99", "topology": [ "sharded" ] diff --git a/test/unified_format.py b/test/unified_format.py index 378fcc4759..459566d711 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -226,6 +226,7 @@ def __init__( self._observe_sensitive_commands = False self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) self._ignore_commands.add("configurefailpoint") + self.ignore_list_collections = False self._event_mapping = collections.defaultdict(list) self.entity_map = entity_map if store_events: @@ -256,7 +257,10 @@ def add_event(self, event): ) def _command_event(self, event): - if event.command_name.lower() not in self._ignore_commands: + if not ( + event.command_name.lower() in self._ignore_commands + or (self.ignore_list_collections and event.command_name == "listCollections") + ): self.add_event(event) def started(self, event): @@ -883,6 +887,17 @@ def _databaseOperation_listCollections(self, target, *args, **kwargs): cursor = target.list_collections(*args, **kwargs) return list(cursor) + def _databaseOperation_createCollection(self, target, *args, **kwargs): + # PYTHON-1936 Ignore the listCollections event from create_collection. + for listener in target.client.options.event_listeners: + if isinstance(listener, EventListenerUtil): + listener.ignore_list_collections = True + ret = target.create_collection(*args, **kwargs) + for listener in target.client.options.event_listeners: + if isinstance(listener, EventListenerUtil): + listener.ignore_list_collections = False + return ret + def __entityOperation_aggregate(self, target, *args, **kwargs): self.__raise_if_unsupported("aggregate", target, Database, Collection) return list(target.aggregate(*args, **kwargs)) diff --git a/test/versioned-api/crud-api-version-1-strict.json b/test/versioned-api/crud-api-version-1-strict.json index 29a0ec4e3b..c1c8ecce01 100644 --- a/test/versioned-api/crud-api-version-1-strict.json +++ b/test/versioned-api/crud-api-version-1-strict.json @@ -613,6 +613,15 @@ }, { "description": "estimatedDocumentCount appends declared API version", + "runOnRequirements": [ + { + "minServerVersion": "5.0.9", + "maxServerVersion": "5.0.99" + }, + { + "minServerVersion": "5.3.2" + } + ], "operations": [ { "name": "estimatedDocumentCount", @@ -627,22 +636,7 @@ { "commandStartedEvent": { "command": { - "aggregate": "test", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ], + "count": "test", "apiVersion": "1", "apiStrict": true, "apiDeprecationErrors": { diff --git a/test/versioned-api/crud-api-version-1.json b/test/versioned-api/crud-api-version-1.json index 1f135eea18..a387d0587e 100644 --- a/test/versioned-api/crud-api-version-1.json +++ b/test/versioned-api/crud-api-version-1.json @@ -604,7 +604,16 @@ ] }, { - "description": "estimatedDocumentCount appends declared API version on 4.9.0 or greater", + "description": "estimatedDocumentCount appends declared API version", + "runOnRequirements": [ + { + "minServerVersion": "5.0.9", + "maxServerVersion": "5.0.99" + }, + { + "minServerVersion": "5.3.2" + } + ], "operations": [ { "name": "estimatedDocumentCount", @@ -619,22 +628,7 @@ { "commandStartedEvent": { "command": { - "aggregate": "test", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ], + "count": "test", "apiVersion": "1", "apiStrict": { "$$unsetOrMatches": false From ede07f44dd0adbe9a664c7a19392d79e3cbea9f0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 3 May 2022 14:41:24 -0700 Subject: [PATCH 0140/1588] PYTHON-3250 Speed up majority writes in test suite (#936) --- test/__init__.py | 6 ++++++ test/test_encryption.py | 31 +++++++++++++++---------------- test/test_retryable_reads.py | 21 ++++++++++++--------- test/unified_format.py | 24 +++++++++++++----------- test/utils_spec_runner.py | 21 ++++++++++++++------- 5 files changed, 60 insertions(+), 43 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index 3800c7890e..64c812c112 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -1029,10 +1029,15 @@ def tearDown(self): super(MockClientTest, self).tearDown() +# Global knobs to speed up the test suite. +global_knobs = client_knobs(events_queue_frequency=0.05) + + def setup(): client_context.init() warnings.resetwarnings() warnings.simplefilter("always") + global_knobs.enable() def _get_executors(topology): @@ -1086,6 +1091,7 @@ def print_running_clients(): def teardown(): + global_knobs.disable() garbage = [] for g in gc.garbage: garbage.append("GARBAGE: %r" % (g,)) diff --git a/test/test_encryption.py b/test/test_encryption.py index ec854ff03a..366c406b03 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -24,7 +24,7 @@ import textwrap import traceback import uuid -from typing import Any +from typing import Any, Dict from pymongo.collection import Collection @@ -621,31 +621,30 @@ def maybe_skip_scenario(self, test): def setup_scenario(self, scenario_def): """Override a test's setup.""" key_vault_data = scenario_def["key_vault_data"] + json_schema = scenario_def["json_schema"] + data = scenario_def["data"] if key_vault_data: - coll = client_context.client.get_database( - "keyvault", write_concern=WriteConcern(w="majority"), codec_options=OPTS - )["datakeys"] - coll.drop() + coll = client_context.client.get_database("keyvault", codec_options=OPTS)["datakeys"] + coll.delete_many({}) coll.insert_many(key_vault_data) db_name = self.get_scenario_db_name(scenario_def) coll_name = self.get_scenario_coll_name(scenario_def) - db = client_context.client.get_database( - db_name, write_concern=WriteConcern(w="majority"), codec_options=OPTS - ) + db = client_context.client.get_database(db_name, codec_options=OPTS) coll = db[coll_name] coll.drop() - json_schema = scenario_def["json_schema"] + wc = WriteConcern(w="majority") + kwargs: Dict[str, Any] = {} if json_schema: - db.create_collection( - coll_name, validator={"$jsonSchema": json_schema}, codec_options=OPTS - ) - else: - db.create_collection(coll_name) + kwargs["validator"] = {"$jsonSchema": json_schema} + kwargs["codec_options"] = OPTS + if not data: + kwargs["write_concern"] = wc + db.create_collection(coll_name, **kwargs) - if scenario_def["data"]: + if data: # Load data. - coll.insert_many(scenario_def["data"]) + coll.with_options(write_concern=wc).insert_many(scenario_def["data"]) def allowable_errors(self, op): """Override expected error classes.""" diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 01fe6901ae..2b8bc17c58 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -112,17 +112,20 @@ def get_scenario_coll_name(self, scenario_def): def setup_scenario(self, scenario_def): """Override a test's setup to support GridFS tests.""" if "bucket_name" in scenario_def: + data = scenario_def["data"] db_name = self.get_scenario_db_name(scenario_def) - db = client_context.client.get_database( - db_name, write_concern=WriteConcern(w="majority") - ) - # Create a bucket for the retryable reads GridFS tests. - client_context.client.drop_database(db_name) - if scenario_def["data"]: - data = scenario_def["data"] - # Load data. + db = client_context.client[db_name] + # Create a bucket for the retryable reads GridFS tests with as few + # majority writes as possible. + wc = WriteConcern(w="majority") + if data: + db["fs.chunks"].drop() + db["fs.files"].drop() db["fs.chunks"].insert_many(data["fs.chunks"]) - db["fs.files"].insert_many(data["fs.files"]) + db.get_collection("fs.files", write_concern=wc).insert_many(data["fs.files"]) + else: + db.get_collection("fs.chunks").drop() + db.get_collection("fs.files", write_concern=wc).drop() else: super(TestSpec, self).setup_scenario(scenario_def) diff --git a/test/unified_format.py b/test/unified_format.py index 459566d711..9edf499ece 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -721,22 +721,24 @@ def should_run_on(run_on_spec): return False def insert_initial_data(self, initial_data): - for collection_data in initial_data: + for i, collection_data in enumerate(initial_data): coll_name = collection_data["collectionName"] db_name = collection_data["databaseName"] documents = collection_data["documents"] - coll = self.client.get_database(db_name).get_collection( - coll_name, write_concern=WriteConcern(w="majority") - ) - coll.drop() - - if len(documents) > 0: - coll.insert_many(documents) + # Setup the collection with as few majority writes as possible. + db = self.client[db_name] + db.drop_collection(coll_name) + # Only use majority wc only on the final write. + if i == len(initial_data) - 1: + wc = WriteConcern(w="majority") + else: + wc = WriteConcern(w=1) + if documents: + db.get_collection(coll_name, write_concern=wc).insert_many(documents) else: - # ensure collection exists - result = coll.insert_one({}) - coll.delete_one({"_id": result.inserted_id}) + # Ensure collection exists + db.create_collection(coll_name, write_concern=wc) @classmethod def setUpClass(cls): diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 4a71fef328..4ae4d1bfb4 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -453,13 +453,20 @@ def setup_scenario(self, scenario_def): """Allow specs to override a test's setup.""" db_name = self.get_scenario_db_name(scenario_def) coll_name = self.get_scenario_coll_name(scenario_def) - db = client_context.client.get_database(db_name, write_concern=WriteConcern(w="majority")) - coll = db[coll_name] - coll.drop() - db.create_collection(coll_name) - if scenario_def["data"]: - # Load data. - coll.insert_many(scenario_def["data"]) + documents = scenario_def["data"] + + # Setup the collection with as few majority writes as possible. + db = client_context.client.get_database(db_name) + coll_exists = bool(db.list_collection_names(filter={"name": coll_name})) + if coll_exists: + db[coll_name].delete_many({}) + # Only use majority wc only on the final write. + wc = WriteConcern(w="majority") + if documents: + db.get_collection(coll_name, write_concern=wc).insert_many(documents) + elif not coll_exists: + # Ensure collection exists. + db.create_collection(coll_name, write_concern=wc) def run_scenario(self, scenario_def, test): self.maybe_skip_scenario(test) From 252ed1cef67663e125ed07eee92ef8e096cc2ad0 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 3 May 2022 14:49:18 -0700 Subject: [PATCH 0141/1588] PYTHON-3247 Mitigate user issues caused by change in directConnection defaults in 4.x (#935) --- doc/changelog.rst | 9 +++++++++ doc/migrate-to-pymongo4.rst | 24 ++++++++++++++++++++++++ pymongo/mongo_client.py | 5 +++++ 3 files changed, 38 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index 3d2f7cadc4..97795fdfb9 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -120,6 +120,15 @@ Changes in Version 4.0 .. warning:: PyMongo 4.0 drops support for MongoDB 2.6, 3.0, 3.2, and 3.4. +.. warning:: PyMongo 4.0 changes the default value of the ``directConnection`` URI option and + keyword argument to :class:`~pymongo.mongo_client.MongoClient` + to ``False`` instead of ``None``, allowing for the automatic + discovery of replica sets. This means that if you + want a direct connection to a single server you must pass + ``directConnection=True`` as a URI option or keyword argument. + For more details, see the relevant section of the PyMongo 4.x migration + guide: :ref:`pymongo4-migration-direct-connection`. + PyMongo 4.0 brings a number of improvements as well as some backward breaking changes. For example, all APIs deprecated in PyMongo 3.X have been removed. Be sure to read the changes listed below and the :doc:`migrate-to-pymongo4` diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 5f75ed1760..d70d7b8a2c 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -65,6 +65,8 @@ get the same behavior. MongoClient ----------- +.. _pymongo4-migration-direct-connection: + ``directConnection`` defaults to False ...................................... @@ -74,6 +76,28 @@ allowing for the automatic discovery of replica sets. This means that if you want a direct connection to a single server you must pass ``directConnection=True`` as a URI option or keyword argument. +If you see any :exc:`~pymongo.errors.ServerSelectionTimeoutError`'s after upgrading from PyMongo 3 to 4.x, you likely +need to add ``directConnection=True`` when creating the client. +Here are some example errors: + +.. code-block:: + + pymongo.errors.ServerSelectionTimeoutError: mongo_node2: [Errno 8] nodename nor servname + provided, or not known,mongo_node1:27017 + +.. code-block:: + + ServerSelectionTimeoutError: No servers match selector "Primary()", Timeout: 30s, + Topology Description: ... + + +Additionally, the "isWritablePrimary" attribute of a hello command sent back by the server will +always be True if ``directConnection=False``:: + + >>> client.admin.command('hello')['isWritablePrimary'] + True + + The waitQueueMultiple parameter is removed .......................................... diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 5c7e7cb176..6601c18aca 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -137,6 +137,11 @@ def __init__( ) -> None: """Client for a MongoDB instance, a replica set, or a set of mongoses. + .. warning:: Starting in PyMongo 4.0, ``directConnection`` now has a default value of + False instead of None. + For more details, see the relevant section of the PyMongo 4.x migration guide: + :ref:`pymongo4-migration-direct-connection`. + The client object is thread-safe and has connection-pooling built in. If an operation fails because of a network error, :class:`~pymongo.errors.ConnectionFailure` is raised and the client From 9a829acf2e5a009e21012d7b381a65e54b0a0c02 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 4 May 2022 11:12:58 -0700 Subject: [PATCH 0142/1588] PYTHON-3251 Make extra whitespace visible in invalid port exception (#937) --- pymongo/uri_parser.py | 2 +- test/test_uri_parser.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index bfbf214bcb..cd18c067e7 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -134,7 +134,7 @@ def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Addr host, port = host.split(":", 1) if isinstance(port, str): if not port.isdigit() or int(port) > 65535 or int(port) <= 0: - raise ValueError("Port must be an integer between 0 and 65535: %s" % (port,)) + raise ValueError("Port must be an integer between 0 and 65535: %r" % (port,)) port = int(port) # Normalize hostname to lowercase, since DNS is case-insensitive: diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index 4fa288df44..2f81e3b512 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -147,6 +147,10 @@ def test_parse_uri(self): self.assertRaises(InvalidURI, parse_uri, "http://foo@foobar.com") self.assertRaises(ValueError, parse_uri, "mongodb://::1", 27017) + # Extra whitespace should be visible in error message. + with self.assertRaisesRegex(ValueError, "'27017 '"): + parse_uri("mongodb://localhost:27017 ") + orig: dict = { "nodelist": [("localhost", 27017)], "username": None, From 502effeebabd7092897273dc3a972e9f31160ec3 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 5 May 2022 10:52:53 -0700 Subject: [PATCH 0143/1588] PYTHON-3167 Fix mockupdb tests for estimated_document_count (#938) --- test/mockupdb/operations.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/test/mockupdb/operations.py b/test/mockupdb/operations.py index efb9e5084e..90d7f27c39 100644 --- a/test/mockupdb/operations.py +++ b/test/mockupdb/operations.py @@ -14,7 +14,7 @@ from collections import namedtuple -from mockupdb import OpMsg, OpMsgReply, OpReply +from mockupdb import OpMsgReply, OpReply from pymongo import ReadPreference @@ -61,12 +61,19 @@ not_master=not_master_reply, ), Operation( - "count", + "count_documents", lambda client: client.db.collection.count_documents({}), reply={"n": 1}, op_type="may-use-secondary", not_master=not_master_reply, ), + Operation( + "estimated_document_count", + lambda client: client.db.collection.estimated_document_count(), + reply={"n": 1}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), Operation( "aggregate", lambda client: client.db.collection.aggregate([]), @@ -109,12 +116,4 @@ Upgrade = namedtuple("Upgrade", ["name", "function", "old", "new", "wire_version"]) -upgrades = [ - Upgrade( - "estimated_document_count", - lambda client: client.db.collection.estimated_document_count(), - old=OpMsg("count", "collection", namespace="db"), - new=OpMsg("aggregate", "collection", namespace="db"), - wire_version=12, - ), -] +upgrades = [] From 75685c006c9184230af131a09611aa3e6e1ac649 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 5 May 2022 16:13:41 -0700 Subject: [PATCH 0144/1588] PYTHON-3235 Drop support for Python 3.6 (#939) --- .evergreen/build-mac.sh | 2 +- .evergreen/build-manylinux-internal.sh | 2 +- .evergreen/build-manylinux.sh | 1 - .evergreen/build-windows.sh | 2 +- .evergreen/config.yml | 77 ++++++++++++-------------- .evergreen/run-mongodb-aws-ecs-test.sh | 28 +++------- .evergreen/run-tests.sh | 8 +-- .evergreen/utils.sh | 10 ++-- .github/workflows/test-python.yml | 2 +- CONTRIBUTING.rst | 2 +- README.rst | 2 +- doc/changelog.rst | 2 + doc/examples/tls.rst | 2 +- doc/faq.rst | 2 +- doc/installation.rst | 4 +- doc/python3.rst | 18 +++--- pymongo/pool.py | 4 +- pymongo/pyopenssl_context.py | 1 - pymongo/ssl_context.py | 6 -- pymongo/ssl_support.py | 2 +- setup.py | 7 +-- test/test_mypy.py | 3 +- test/utils.py | 15 ++--- 23 files changed, 86 insertions(+), 116 deletions(-) diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index 5671ae6c6f..09950a592f 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -8,7 +8,7 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 3.6 3.7 3.8 3.9 3.10; do +for VERSION in 3.7 3.8 3.9 3.10; do PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 rm -rf build diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh index 1b74fc68e1..4fd43a67a3 100755 --- a/.evergreen/build-manylinux-internal.sh +++ b/.evergreen/build-manylinux-internal.sh @@ -11,7 +11,7 @@ mv dist/* validdist || true # Compile wheels for PYTHON in /opt/python/*/bin/python; do - if [[ ! $PYTHON =~ (cp36|cp37|cp38|cp39|cp310) ]]; then + if [[ ! $PYTHON =~ (cp37|cp38|cp39|cp310) ]]; then continue fi # https://github.com/pypa/manylinux/issues/49 diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index a9a7238cb2..cac435fb11 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -34,7 +34,6 @@ ls dist # Check for any unexpected files. unexpected=$(find dist \! \( -iname dist -or \ - -iname '*cp36*' -or \ -iname '*cp37*' -or \ -iname '*cp38*' -or \ -iname '*cp39*' -or \ diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh index 3a33558cc9..09f5e7f0b4 100755 --- a/.evergreen/build-windows.sh +++ b/.evergreen/build-windows.sh @@ -8,7 +8,7 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 36 37 38 39 310; do +for VERSION in 37 38 39 310; do _pythons=("C:/Python/Python${VERSION}/python.exe" \ "C:/Python/32/Python${VERSION}/python.exe") for PYTHON in "${_pythons[@]}"; do diff --git a/.evergreen/config.yml b/.evergreen/config.yml index a6d9375f26..2576307364 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -135,8 +135,8 @@ functions: # Coverage combine merges (and removes) all the coverage files and # generates a new .coverage file in the current directory. ls -la coverage/ - /opt/python/3.6/bin/python3 -m coverage combine coverage/coverage.* - /opt/python/3.6/bin/python3 -m coverage html -d htmlcov + /opt/python/3.7/bin/python3 -m coverage combine coverage/coverage.* + /opt/python/3.7/bin/python3 -m coverage html -d htmlcov # Upload the resulting html coverage report. - command: shell.exec params: @@ -932,7 +932,7 @@ functions: done # Build source distribution. cd src/ - /opt/python/3.6/bin/python3 setup.py sdist + /opt/python/3.7/bin/python3 setup.py sdist cp dist/* ../releases - command: archive.targz_pack params: @@ -1324,7 +1324,7 @@ tasks: commands: - func: "run tests" vars: - PYTHON_BINARY: /opt/python/3.6/bin/python3 + PYTHON_BINARY: /opt/python/3.7/bin/python3 - name: "atlas-connect" tags: ["atlas-connect"] @@ -1945,10 +1945,6 @@ axes: values: # Note: always display platform with python-version to avoid ambiguous display names. # Linux - - id: "3.6" - display_name: "Python 3.6" - variables: - PYTHON_BINARY: "/opt/python/3.6/bin/python3" - id: "3.7" display_name: "Python 3.7" variables: @@ -1965,10 +1961,6 @@ axes: display_name: "Python 3.10" variables: PYTHON_BINARY: "/opt/python/3.10/bin/python3" - - id: "pypy3.6" - display_name: "PyPy 3.6" - variables: - PYTHON_BINARY: "/opt/python/pypy3.6/bin/pypy3" - id: "pypy3.7" display_name: "PyPy 3.7" variables: @@ -1977,6 +1969,10 @@ axes: display_name: "PyPy 3.8" variables: PYTHON_BINARY: "/opt/python/pypy3.8/bin/pypy3" + + - id: python-version-mac + display_name: "Python" + values: - id: "system-python3" display_name: "Python3" variables: @@ -1985,10 +1981,6 @@ axes: - id: python-version-windows display_name: "Python" values: - - id: "3.6" - display_name: "Python 3.6" - variables: - PYTHON_BINARY: "C:/python/Python36/python.exe" - id: "3.7" display_name: "Python 3.7" variables: @@ -2009,10 +2001,6 @@ axes: - id: python-version-windows-32 display_name: "Python" values: - - id: "3.6" - display_name: "32-bit Python 3.6" - variables: - PYTHON_BINARY: "C:/python/32/Python36/python.exe" - id: "3.7" display_name: "32-bit Python 3.7" variables: @@ -2281,7 +2269,7 @@ buildvariants: # Only test "noauth" with Python 3.7. exclude_spec: platform: ubuntu-18.04 - python-version: ["3.6", "3.8", "3.9", "3.10", "pypy3.6", "pypy3.7", "pypy3.8"] + python-version: ["3.8", "3.9", "3.10", "pypy3.7", "pypy3.8"] auth: "noauth" ssl: "ssl" pyopenssl: "*" @@ -2334,7 +2322,7 @@ buildvariants: exclude_spec: # These interpreters are always tested without extensions. - platform: ubuntu-18.04 - python-version: ["pypy3.6", "pypy3.7", "pypy3.8"] + python-version: ["pypy3.7", "pypy3.8"] c-extensions: "*" auth-ssl: "*" coverage: "*" @@ -2350,7 +2338,7 @@ buildvariants: exclude_spec: # These interpreters are always tested without extensions. - platform: ubuntu-18.04 - python-version: ["pypy3.6", "pypy3.7", "pypy3.8"] + python-version: ["pypy3.7", "pypy3.8"] c-extensions: "with-c-extensions" compression: "*" display_name: "${compression} ${c-extensions} ${python-version} ${platform}" @@ -2379,7 +2367,7 @@ buildvariants: exclude_spec: # Don't test green frameworks on these Python versions. - platform: ubuntu-18.04 - python-version: ["pypy3.6", "pypy3.7", "pypy3.8", "system-python3"] + python-version: ["pypy3.7", "pypy3.8"] green-framework: "*" auth-ssl: "*" display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" @@ -2405,7 +2393,7 @@ buildvariants: matrix_spec: platform: awslinux # Python 3.10+ requires OpenSSL 1.1.1+ - python-version: ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7", "pypy3.8"] + python-version: ["3.7", "3.8", "3.9", "pypy3.7", "pypy3.8"] auth-ssl: "*" display_name: "OpenSSL 1.0.2 ${python-version} ${platform} ${auth-ssl}" tasks: @@ -2420,12 +2408,12 @@ buildvariants: display_name: "Encryption ${platform} ${python-version-windows} ${auth-ssl}" tasks: *encryption-server-versions -# Storage engine tests on Ubuntu 18.04 (x86_64) with Python 3.6. +# Storage engine tests on Ubuntu 18.04 (x86_64) with Python 3.7. - matrix_name: "tests-storage-engines" matrix_spec: platform: ubuntu-18.04 storage-engine: "*" - python-version: 3.6 + python-version: 3.7 display_name: "Storage ${storage-engine} ${python-version} ${platform}" rules: - if: @@ -2452,12 +2440,12 @@ buildvariants: - "test-3.6-standalone" - "test-3.6-replica_set" -# enableTestCommands=0 tests on Ubuntu18 (x86_64) with Python 3.6. +# enableTestCommands=0 tests on Ubuntu18 (x86_64) with Python 3.7. - matrix_name: "test-disableTestCommands" matrix_spec: platform: ubuntu-18.04 disableTestCommands: "*" - python-version: "3.6" + python-version: "3.7" display_name: "Disable test commands ${python-version} ${platform}" tasks: - ".latest" @@ -2483,7 +2471,7 @@ buildvariants: - matrix_name: "tests-mod-wsgi" matrix_spec: platform: ubuntu-18.04 - python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"] + python-version: ["3.7", "3.8", "3.9", "3.10"] mod-wsgi-version: "*" exclude_spec: # mod-wsgi 3.5 won't build against CPython 3.8+ @@ -2498,7 +2486,7 @@ buildvariants: - matrix_name: "mockupdb-tests" matrix_spec: platform: ubuntu-18.04 - python-version: 3.6 + python-version: 3.7 display_name: "MockupDB Tests" tasks: - name: "mockupdb" @@ -2543,11 +2531,6 @@ buildvariants: python-version: "*" auth-ssl: auth-ssl serverless: "*" - exclude_spec: - - platform: ubuntu-18.04 - python-version: ["system-python3"] - auth-ssl: auth-ssl - serverless: "*" display_name: "Serverless ${python-version} ${platform}" tasks: - "serverless_task_group" @@ -2555,7 +2538,7 @@ buildvariants: - matrix_name: "data-lake-spec-tests" matrix_spec: platform: ubuntu-18.04 - python-version: ["3.6", "3.10"] + python-version: ["3.7", "3.10"] auth: "auth" c-extensions: "*" display_name: "Atlas Data Lake ${python-version} ${c-extensions}" @@ -2565,7 +2548,7 @@ buildvariants: - matrix_name: "stable-api-tests" matrix_spec: platform: ubuntu-18.04 - python-version: ["3.6", "3.10"] + python-version: ["3.7", "3.10"] auth: "auth" versionedApi: "*" display_name: "Versioned API ${versionedApi} ${python-version}" @@ -2580,7 +2563,7 @@ buildvariants: # OCSP stapling is not supported on Ubuntu 18.04. # See https://jira.mongodb.org/browse/SERVER-51364. platform: ubuntu-20.04 - python-version: ["3.6", "3.10", "pypy3.6", "pypy3.8"] + python-version: ["3.7", "3.10", "pypy3.7", "pypy3.8"] mongodb-version: ["4.4", "5.0", "latest"] auth: "noauth" ssl: "ssl" @@ -2592,7 +2575,7 @@ buildvariants: - matrix_name: "ocsp-test-windows" matrix_spec: platform: windows-64-vsMulti-small - python-version-windows: ["3.6", "3.10"] + python-version-windows: ["3.7", "3.10"] mongodb-version: ["4.4", "5.0", "latest"] auth: "noauth" ssl: "ssl" @@ -2616,14 +2599,24 @@ buildvariants: - matrix_name: "aws-auth-test" matrix_spec: - platform: [ubuntu-18.04, macos-1014] - python-version: ["system-python3"] + platform: [ubuntu-18.04] + python-version: ["3.7"] display_name: "MONGODB-AWS Auth ${platform} ${python-version}" tasks: - name: "aws-auth-test-4.4" - name: "aws-auth-test-5.0" - name: "aws-auth-test-latest" +- matrix_name: "aws-auth-test-mac" + matrix_spec: + platform: [macos-1014] + python-version-mac: ["system-python3"] + display_name: "MONGODB-AWS Auth ${platform} ${python-version-mac}" + tasks: + - name: "aws-auth-test-4.4" + - name: "aws-auth-test-5.0" + - name: "aws-auth-test-latest" + - matrix_name: "aws-auth-test-windows" matrix_spec: platform: [windows-64-vsMulti-small] diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index 3484f41f43..83f3975e9e 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -20,32 +20,18 @@ fi # Now we can safely enable xtrace set -o xtrace -if command -v virtualenv ; then - VIRTUALENV=$(command -v virtualenv) -else - if ! python3 -m pip --version ; then - echo "Installing pip..." - apt-get update - apt install python3-pip -y - fi - echo "Installing virtualenv..." - python3 -m pip install --user virtualenv - VIRTUALENV='python3 -m virtualenv' -fi +# Install python3.7 with pip. +apt-get update +apt install python3.7 python3-pip -y authtest () { echo "Running MONGODB-AWS ECS authentication tests with $PYTHON" $PYTHON --version - - $VIRTUALENV -p $PYTHON --never-download venvaws - . venvaws/bin/activate - + $PYTHON -m pip install --upgrade wheel setuptools pip cd src - python -m pip install '.[aws]' - python test/auth_aws/test_auth_aws.py + $PYTHON -m pip install '.[aws]' + $PYTHON test/auth_aws/test_auth_aws.py cd - - deactivate - rm -rf venvaws } -PYTHON=$(command -v python3) authtest +PYTHON="python3.7" authtest diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index ade267d2b1..4a48b4a33b 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -66,13 +66,13 @@ fi if [ -z "$PYTHON_BINARY" ]; then # Use Python 3 from the server toolchain to test on ARM, POWER or zSeries if a - # system python3 doesn't exist or exists but is older than 3.6. - if is_python_36 "$(command -v python3)"; then + # system python3 doesn't exist or exists but is older than 3.7. + if is_python_37 "$(command -v python3)"; then PYTHON=$(command -v python3) - elif is_python_36 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then + elif is_python_37 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then PYTHON=$(command -v /opt/mongodbtoolchain/v3/bin/python3) else - echo "Cannot test without python3.6+ installed!" + echo "Cannot test without python3.7+ installed!" fi elif [ "$COMPRESSORS" = "snappy" ]; then createvirtualenv $PYTHON_BINARY snappytest diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index b7f65104e8..67fa272683 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -59,15 +59,15 @@ testinstall () { fi } -# Function that returns success if the provided Python binary is version 3.6 or later +# Function that returns success if the provided Python binary is version 3.7 or later # Usage: -# is_python_36 /path/to/python +# is_python_37 /path/to/python # * param1: Python binary -is_python_36() { +is_python_37() { if [ -z "$1" ]; then return 1 - elif $1 -c "import sys; exit(sys.version_info[:2] < (3, 6))"; then - # runs when sys.version_info[:2] >= (3, 6) + elif $1 -c "import sys; exit(sys.version_info[:2] < (3, 7))"; then + # runs when sys.version_info[:2] >= (3, 7) return 0 else return 1 diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 1eea4ff166..89d9830e82 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -23,7 +23,7 @@ jobs: strategy: matrix: os: [ubuntu-20.04] - python-version: ["3.6", "3.10", "pypy-3.8"] + python-version: ["3.7", "3.10", "pypy-3.8"] name: CPython ${{ matrix.python-version }}-${{ matrix.os }} steps: - uses: actions/checkout@v2 diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 1a4423f3ef..b8bbad93f6 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -19,7 +19,7 @@ that might not be of interest or that has already been addressed. Supported Interpreters ---------------------- -PyMongo supports CPython 3.6+ and PyPy3.6+. Language +PyMongo supports CPython 3.7+ and PyPy3.7+. Language features not supported by all interpreters can not be used. Style Guide diff --git a/README.rst b/README.rst index c3c3757289..c301932643 100644 --- a/README.rst +++ b/README.rst @@ -88,7 +88,7 @@ is incompatible with PyMongo. Dependencies ============ -PyMongo supports CPython 3.6.2+ and PyPy3.6+. +PyMongo supports CPython 3.7+ and PyPy3.7+. Optional dependencies: diff --git a/doc/changelog.rst b/doc/changelog.rst index 97795fdfb9..7f002fb470 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,6 +4,8 @@ Changelog Changes in Version 4.2 ---------------------- +.. warning:: PyMongo 4.2 drops support for Python 3.6: Python 3.7+ is now required. + Bug fixes ......... diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 6dcb7a1759..557ee7d9b9 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -182,7 +182,7 @@ server's certificate:: This often occurs because OpenSSL does not have access to the system's root certificates or the certificates are out of date. Linux users should ensure that they have the latest root certificate updates installed from -their Linux vendor. macOS users using Python 3.6.2 or newer downloaded +their Linux vendor. macOS users using Python 3.7 or newer downloaded from python.org `may have to run a script included with python `_ to install root certificates:: diff --git a/doc/faq.rst b/doc/faq.rst index 06559ddb9b..ca83f5de4c 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -145,7 +145,7 @@ they are returned to the pool. Does PyMongo support Python 3? ------------------------------ -PyMongo supports CPython 3.6.2+ and PyPy3.6+. See the :doc:`python3` for details. +PyMongo supports CPython 3.7+ and PyPy3.7+. See the :doc:`python3` for details. Does PyMongo support asynchronous frameworks like Gevent, asyncio, Tornado, or Twisted? --------------------------------------------------------------------------------------- diff --git a/doc/installation.rst b/doc/installation.rst index 4f14b31125..788faf46cc 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -28,7 +28,7 @@ To upgrade using pip:: Dependencies ------------ -PyMongo supports CPython 3.6.2+ and PyPy3.6+. +PyMongo supports CPython 3.7+ and PyPy3.7+. Optional dependencies: @@ -133,7 +133,7 @@ See `http://bugs.python.org/issue11623 `_ for a more detailed explanation. **Lion (10.7) and newer** - PyMongo's C extensions can be built against -versions of Python 3.6.2+ downloaded from python.org. In all cases Xcode must be +versions of Python 3.7+ downloaded from python.org. In all cases Xcode must be installed with 'UNIX Development Support'. **Xcode 5.1**: Starting with version 5.1 the version of clang that ships with diff --git a/doc/python3.rst b/doc/python3.rst index c14224166a..812bc33b35 100644 --- a/doc/python3.rst +++ b/doc/python3.rst @@ -6,7 +6,7 @@ Python 3 FAQ What Python 3 versions are supported? ------------------------------------- -PyMongo supports CPython 3.6.2+ and PyPy3.6+. +PyMongo supports CPython 3.7+ and PyPy3.7+. Are there any PyMongo behavior changes with Python 3? ----------------------------------------------------- @@ -20,8 +20,8 @@ with subtype 0. For example, let's insert a :class:`bytes` instance using Python 3 then read it back. Notice the byte string is decoded back to :class:`bytes`:: - Python 3.6.8 (v3.6.8:3c6b436a57, Dec 24 2018, 02:04:31) - [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] on darwin + Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) + [Clang 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pymongo >>> c = pymongo.MongoClient() @@ -49,8 +49,8 @@ decoded to :class:`~bson.binary.Binary` with subtype 0. For example, let's decode a JSON binary subtype 0 using Python 3. Notice the byte string is decoded to :class:`bytes`:: - Python 3.6.8 (v3.6.8:3c6b436a57, Dec 24 2018, 02:04:31) - [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] on darwin + Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) + [Clang 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> from bson.json_util import loads >>> loads('{"b": {"$binary": "dGhpcyBpcyBhIGJ5dGUgc3RyaW5n", "$type": "00"}}') @@ -86,8 +86,8 @@ Python 3 you must pass ``encoding='latin-1'`` to pickle.loads:: >>> pickle.dumps(oid) 'ccopy_reg\n_reconstructor\np0\n(cbson.objectid\...' - Python 3.6.8 (v3.6.8:3c6b436a57, Dec 24 2018, 02:04:31) - [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] on darwin + Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) + [Clang 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pickle >>> pickle.loads(b'ccopy_reg\n_reconstructor\np0\n(cbson.objectid\...', encoding='latin-1') @@ -97,8 +97,8 @@ Python 3 you must pass ``encoding='latin-1'`` to pickle.loads:: If you need to pickle ObjectIds using Python 3 and unpickle them using Python 2 you must use ``protocol <= 2``:: - Python 3.6.8 (v3.6.8:3c6b436a57, Dec 24 2018, 02:04:31) - [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] on darwin + Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) + [Clang 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pickle >>> from bson.objectid import ObjectId diff --git a/pymongo/pool.py b/pymongo/pool.py index 1aaae4067f..13d0e78d1e 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -357,9 +357,9 @@ def __init__( # { # 'driver': { # 'name': 'PyMongo|MyDriver', - # 'version': '3.7.0|1.2.3', + # 'version': '4.2.0|1.2.3', # }, - # 'platform': 'CPython 3.6.0|MyPlatform' + # 'platform': 'CPython 3.7.0|MyPlatform' # } if driver: if driver.name: diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index eae38daef8..3736a4f381 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -55,7 +55,6 @@ # Always available HAS_SNI = True -CHECK_HOSTNAME_SAFE = True IS_PYOPENSSL = True # Base Exception class diff --git a/pymongo/ssl_context.py b/pymongo/ssl_context.py index 148bef936d..4e997a439e 100644 --- a/pymongo/ssl_context.py +++ b/pymongo/ssl_context.py @@ -15,7 +15,6 @@ """A fake SSLContext implementation.""" import ssl as _ssl -import sys as _sys # PROTOCOL_TLS_CLIENT is Python 3.6+ PROTOCOL_SSLv23 = getattr(_ssl, "PROTOCOL_TLS_CLIENT", _ssl.PROTOCOL_SSLv23) @@ -35,8 +34,3 @@ if hasattr(_ssl, "VERIFY_CRL_CHECK_LEAF"): from ssl import VERIFY_CRL_CHECK_LEAF # noqa: F401 -# Python 3.7 uses OpenSSL's hostname matching implementation -# making it the obvious version to start using SSLConext.check_hostname. -# Python 3.6 might have been a good version, but it suffers -# from https://bugs.python.org/issue32185. -CHECK_HOSTNAME_SAFE = _sys.version_info[:2] >= (3, 7) diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 06ef7ef185..6adf629ad3 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -55,7 +55,7 @@ def get_ssl_context( ctx = _ssl.SSLContext(_ssl.PROTOCOL_SSLv23) # SSLContext.check_hostname was added in CPython 3.4. if hasattr(ctx, "check_hostname"): - if _ssl.CHECK_HOSTNAME_SAFE and verify_mode != CERT_NONE: + if verify_mode != CERT_NONE: ctx.check_hostname = not allow_invalid_hostnames else: ctx.check_hostname = False diff --git a/setup.py b/setup.py index 9e8e919e88..40fb484ad1 100755 --- a/setup.py +++ b/setup.py @@ -4,8 +4,8 @@ import sys import warnings -if sys.version_info[:3] < (3, 6, 2): - raise RuntimeError("Python version >= 3.6.2 required.") +if sys.version_info[:3] < (3, 7): + raise RuntimeError("Python version >= 3.7 required.") # Hack to silence atexit traceback in some Python versions @@ -321,7 +321,7 @@ def build_extension(self, ext): keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], install_requires=[], license="Apache License, Version 2.0", - python_requires=">=3.6.2", + python_requires=">=3.7", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", @@ -331,7 +331,6 @@ def build_extension(self, ext): "Operating System :: POSIX", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", diff --git a/test/test_mypy.py b/test/test_mypy.py index 12a6cffbe6..07af61ed36 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -23,7 +23,7 @@ try: from typing import TypedDict # type: ignore[attr-defined] - # Not available in Python 3.6 and Python 3.7 + # Not available in Python 3.7 class Movie(TypedDict): # type: ignore[misc] name: str year: int @@ -131,6 +131,7 @@ def test_list_databases(self) -> None: def test_default_document_type(self) -> None: client = rs_or_single_client() + self.addCleanup(client.close) coll = client.test.test doc = {"my": "doc"} coll.insert_one(doc) diff --git a/test/utils.py b/test/utils.py index 2c50797266..9e8d6448d9 100644 --- a/test/utils.py +++ b/test/utils.py @@ -875,17 +875,14 @@ def lazy_client_trial(reset, target, test, get_client): def gevent_monkey_patched(): """Check if gevent's monkey patching is active.""" - # In Python 3.6 importing gevent.socket raises an ImportWarning. - with warnings.catch_warnings(): - warnings.simplefilter("ignore", ImportWarning) - try: - import socket + try: + import socket - import gevent.socket + import gevent.socket - return socket.socket is gevent.socket.socket - except ImportError: - return False + return socket.socket is gevent.socket.socket + except ImportError: + return False def eventlet_monkey_patched(): From aa16f1c5feb41833e634a5d1e841854d31b32ba1 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 6 May 2022 12:02:36 -0700 Subject: [PATCH 0145/1588] PYTHON-3242 Test against MongoDB 6.0 (#940) --- .evergreen/config.yml | 107 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 94 insertions(+), 13 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 2576307364..97d13654c0 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1233,6 +1233,33 @@ tasks: TOPOLOGY: "sharded_cluster" - func: "run tests" + - name: "test-6.0-standalone" + tags: ["6.0", "standalone"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "6.0" + TOPOLOGY: "server" + - func: "run tests" + + - name: "test-6.0-replica_set" + tags: ["6.0", "replica_set"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "6.0" + TOPOLOGY: "replica_set" + - func: "run tests" + + - name: "test-6.0-sharded_cluster" + tags: ["6.0", "sharded_cluster"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "6.0" + TOPOLOGY: "sharded_cluster" + - func: "run tests" + - name: "test-latest-standalone" tags: ["latest", "standalone"] commands: @@ -1694,6 +1721,22 @@ tasks: - func: "run aws auth test with aws EC2 credentials" - func: "run aws ECS auth test" + - name: "aws-auth-test-6.0" + commands: + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + VERSION: "6.0" + - func: "add aws auth variables to file" + - func: "run aws auth test with regular aws credentials" + - func: "run aws auth test with assume role credentials" + - func: "run aws auth test with aws credentials as environment variables" + - func: "run aws auth test with aws credentials and session token as environment variables" + - func: "run aws auth test with aws EC2 credentials" + - func: "run aws ECS auth test" + - name: "aws-auth-test-latest" commands: - func: "bootstrap mongo-orchestration" @@ -1709,6 +1752,21 @@ tasks: - func: "run aws auth test with aws credentials and session token as environment variables" - func: "run aws auth test with aws EC2 credentials" - func: "run aws ECS auth test" + - name: "aws-auth-test-rapid" + commands: + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + VERSION: "rapid" + - func: "add aws auth variables to file" + - func: "run aws auth test with regular aws credentials" + - func: "run aws auth test with assume role credentials" + - func: "run aws auth test with aws credentials as environment variables" + - func: "run aws auth test with aws credentials and session token as environment variables" + - func: "run aws auth test with aws EC2 credentials" + - func: "run aws ECS auth test" - name: load-balancer-test commands: @@ -1930,6 +1988,10 @@ axes: display_name: "MongoDB 5.0" variables: VERSION: "5.0" + - id: "6.0" + display_name: "MongoDB 6.0" + variables: + VERSION: "6.0" - id: "latest" display_name: "MongoDB latest" variables: @@ -2159,9 +2221,8 @@ buildvariants: - awslinux auth-ssl: "*" display_name: "${platform} ${auth-ssl}" - tasks: &all-server-versions - - ".rapid" - - ".latest" + tasks: + - ".6.0" - ".5.0" - ".4.4" - ".4.2" @@ -2176,8 +2237,8 @@ buildvariants: auth-ssl: "*" encryption: "*" display_name: "Encryption ${platform} ${auth-ssl}" - tasks: &encryption-server-versions - - ".latest" + tasks: + - ".6.0" - ".5.0" - ".4.4" - ".4.2" @@ -2222,6 +2283,7 @@ buildvariants: display_name: "${platform} ${auth} ${ssl}" tasks: - ".latest" + - ".6.0" - ".5.0" - ".4.4" - ".4.2" @@ -2236,7 +2298,13 @@ buildvariants: ssl: "nossl" encryption: "*" display_name: "Encryption ${platform} ${auth} ${ssl}" - tasks: *encryption-server-versions + tasks: &encryption-server-versions + - ".latest" + - ".6.0" + - ".5.0" + - ".4.4" + - ".4.2" + - ".4.0" # Test one server version (4.2) with zSeries, POWER8, and ARM. - matrix_name: "test-different-cpu-architectures" @@ -2257,7 +2325,15 @@ buildvariants: auth-ssl: "*" coverage: "*" display_name: "${python-version} ${platform} ${auth-ssl} ${coverage}" - tasks: *all-server-versions + tasks: &all-server-versions + - ".rapid" + - ".latest" + - ".6.0" + - ".5.0" + - ".4.4" + - ".4.2" + - ".4.0" + - ".3.6" - matrix_name: "tests-pyopenssl" matrix_spec: @@ -2397,7 +2473,7 @@ buildvariants: auth-ssl: "*" display_name: "OpenSSL 1.0.2 ${python-version} ${platform} ${auth-ssl}" tasks: - - ".latest" + - ".5.0" - matrix_name: "tests-windows-encryption" matrix_spec: @@ -2564,7 +2640,7 @@ buildvariants: # See https://jira.mongodb.org/browse/SERVER-51364. platform: ubuntu-20.04 python-version: ["3.7", "3.10", "pypy3.7", "pypy3.8"] - mongodb-version: ["4.4", "5.0", "latest"] + mongodb-version: ["4.4", "5.0", "6.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${python-version} ${mongodb-version}" @@ -2576,7 +2652,7 @@ buildvariants: matrix_spec: platform: windows-64-vsMulti-small python-version-windows: ["3.7", "3.10"] - mongodb-version: ["4.4", "5.0", "latest"] + mongodb-version: ["4.4", "5.0", "6.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${python-version-windows} ${mongodb-version}" @@ -2588,7 +2664,7 @@ buildvariants: - matrix_name: "ocsp-test-macos" matrix_spec: platform: macos-1014 - mongodb-version: ["4.4", "5.0", "latest"] + mongodb-version: ["4.4", "5.0", "6.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${mongodb-version}" @@ -2606,6 +2682,8 @@ buildvariants: - name: "aws-auth-test-4.4" - name: "aws-auth-test-5.0" - name: "aws-auth-test-latest" + - name: "aws-auth-test-6.0" + - name: "aws-auth-test-rapid" - matrix_name: "aws-auth-test-mac" matrix_spec: @@ -2616,7 +2694,8 @@ buildvariants: - name: "aws-auth-test-4.4" - name: "aws-auth-test-5.0" - name: "aws-auth-test-latest" - + - name: "aws-auth-test-6.0" + - name: "aws-auth-test-rapid" - matrix_name: "aws-auth-test-windows" matrix_spec: platform: [windows-64-vsMulti-small] @@ -2626,11 +2705,13 @@ buildvariants: - name: "aws-auth-test-4.4" - name: "aws-auth-test-5.0" - name: "aws-auth-test-latest" + - name: "aws-auth-test-6.0" + - name: "aws-auth-test-rapid" - matrix_name: "load-balancer" matrix_spec: platform: ubuntu-18.04 - mongodb-version: ["rapid", "latest"] + mongodb-version: ["rapid", "latest", "6.0"] auth-ssl: "*" python-version: "*" loadbalancer: "*" From 3e57bde2ee3a3f44180e7d388b8515366db1ffe8 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 6 May 2022 13:53:30 -0700 Subject: [PATCH 0146/1588] PYTHON-3230 Migrate to newer zSeries, POWER8, and ARM platforms (#942) --- .evergreen/config.yml | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 97d13654c0..3f8955f40e 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1882,17 +1882,17 @@ axes: batchtime: 10080 # 7 days variables: python3_binary: python3 - - id: ubuntu1804-zseries - display_name: "Ubuntu 18.04 (zSeries)" - run_on: ubuntu1804-zseries-small + - id: rhel83-zseries + display_name: "RHEL 8.3 (zSeries)" + run_on: rhel83-zseries-small batchtime: 10080 # 7 days - - id: ubuntu1804-power8 - display_name: "Ubuntu 18.04 (POWER8)" - run_on: ubuntu1804-power8-small + - id: rhel81-power8 + display_name: "RHEL 8.1 (POWER8)" + run_on: rhel81-power8-small batchtime: 10080 # 7 days - - id: ubuntu1804-arm64 - display_name: "Ubuntu 18.04 (ARM64)" - run_on: ubuntu1804-arm64-small + - id: rhel82-arm64 + display_name: "RHEL 8.2 (ARM64)" + run_on: rhel82-arm64-small batchtime: 10080 # 7 days variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-arm64/master/latest/libmongocrypt.tar.gz @@ -2306,17 +2306,17 @@ buildvariants: - ".4.2" - ".4.0" -# Test one server version (4.2) with zSeries, POWER8, and ARM. +# Test one server version with zSeries, POWER8, and ARM. - matrix_name: "test-different-cpu-architectures" matrix_spec: platform: - - ubuntu1804-zseries # Ubuntu 18 or RHEL 8.x? - - ubuntu1804-power8 # Ubuntu 18 or RHEL 7? - - ubuntu1804-arm64 + - rhel83-zseries # Added in 5.0.8 (SERVER-44074) + - rhel81-power8 # Added in 4.2.7 (SERVER-44072) + - rhel82-arm64 # Added in 4.4.2 (SERVER-48282) auth-ssl: "*" display_name: "${platform} ${auth-ssl}" tasks: - - ".4.2" + - ".6.0" - matrix_name: "tests-python-version-ubuntu18-test-ssl" matrix_spec: From 21ead3a7e5ff5cdc6fd4cad6f92d5efbb0899757 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 6 May 2022 16:48:02 -0500 Subject: [PATCH 0147/1588] PYTHON-3189 Change Stream event document missing to field for rename events (#924) --- .../unified/change-streams.json | 322 +++++++++++++++++- test/utils.py | 2 + 2 files changed, 317 insertions(+), 7 deletions(-) diff --git a/test/change_streams/unified/change-streams.json b/test/change_streams/unified/change-streams.json index 5fd2544ce0..8bc0c956cd 100644 --- a/test/change_streams/unified/change-streams.json +++ b/test/change_streams/unified/change-streams.json @@ -3,6 +3,7 @@ "schemaVersion": "1.0", "runOnRequirements": [ { + "minServerVersion": "3.6", "topologies": [ "replicaset", "sharded-replicaset" @@ -167,7 +168,6 @@ "description": "Test with document comment - pre 4.4", "runOnRequirements": [ { - "minServerVersion": "3.6.0", "maxServerVersion": "4.2.99" } ], @@ -211,11 +211,6 @@ }, { "description": "Test with string comment", - "runOnRequirements": [ - { - "minServerVersion": "3.6.0" - } - ], "operations": [ { "name": "createChangeStream", @@ -343,7 +338,6 @@ "description": "Test that comment is not set on getMore - pre 4.4", "runOnRequirements": [ { - "minServerVersion": "3.6.0", "maxServerVersion": "4.3.99", "topologies": [ "replicaset" @@ -426,6 +420,320 @@ ] } ] + }, + { + "description": "to field is set in a rename change event", + "runOnRequirements": [ + { + "minServerVersion": "4.0.1" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "collection1" + } + }, + { + "name": "rename", + "object": "collection0", + "arguments": { + "to": "collection1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "rename", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "to": { + "db": "database0", + "coll": "collection1" + } + } + } + ] + }, + { + "description": "Test unknown operationType MUST NOT err", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "operationType": "addedInFutureMongoDBVersion", + "ns": 1 + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "addedInFutureMongoDBVersion", + "ns": { + "db": "database0", + "coll": "collection0" + } + } + } + ] + }, + { + "description": "Test newField added in response MUST NOT err", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "operationType": 1, + "ns": 1, + "newField": "newFieldValue" + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "newField": "newFieldValue" + } + } + ] + }, + { + "description": "Test new structure in ns document MUST NOT err", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "maxServerVersion": "5.2" + }, + { + "minServerVersion": "6.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "operationType": "insert", + "ns.viewOn": "db.coll" + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "viewOn": "db.coll" + } + } + } + ] + }, + { + "description": "Test modified structure in ns document MUST NOT err", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "operationType": "insert", + "ns": { + "db": "$ns.db", + "coll": "$ns.coll", + "viewOn": "db.coll" + } + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0", + "viewOn": "db.coll" + } + } + } + ] + }, + { + "description": "Test server error on projecting out _id", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0 + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "errorCode": 280, + "errorCodeName": "ChangeStreamFatalError", + "errorLabelsContain": [ + "NonResumableChangeStreamError" + ] + } + } + ] + }, + { + "description": "Test projection in change stream returns expected fields", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "optype": "$operationType", + "ns": 1, + "newField": "value" + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "optype": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "newField": "value" + } + } + ] } ] } diff --git a/test/utils.py b/test/utils.py index 9e8d6448d9..8a79c97d93 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1080,5 +1080,7 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac arguments["keys"] = list(arguments.pop(arg_name).items()) elif opname == "drop_index" and arg_name == "name": arguments["index_or_name"] = arguments.pop(arg_name) + elif opname == "rename" and arg_name == "to": + arguments["new_name"] = arguments.pop(arg_name) else: arguments[c2s] = arguments.pop(arg_name) From cbab615231487d514e6c37eb8a853624610b961f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 9 May 2022 12:01:36 -0700 Subject: [PATCH 0148/1588] PYTHON-3065 Ignore SRV polling update when topology is discovered to be a replica set (#943) --- pymongo/topology.py | 2 ++ pymongo/topology_description.py | 1 + 2 files changed, 3 insertions(+) diff --git a/pymongo/topology.py b/pymongo/topology.py index 03e0d4ee17..1d4c9a86a8 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -358,6 +358,8 @@ def _process_srv_update(self, seedlist): Hold the lock when calling this. """ td_old = self._description + if td_old.topology_type not in SRV_POLLING_TOPOLOGIES: + return self._description = _updated_topology_description_srv_polling(self._description, seedlist) self._update_servers() diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index b3dd60680f..b32a86e2d7 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -477,6 +477,7 @@ def _updated_topology_description_srv_polling(topology_description, seedlist): - `seedlist`: a list of new seeds new ServerDescription that resulted from a hello call """ + assert topology_description.topology_type in SRV_POLLING_TOPOLOGIES # Create a copy of the server descriptions. sds = topology_description.server_descriptions() From a1c33e0b84743b26a8f44a5fd67a60304b0c92cd Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 9 May 2022 15:37:48 -0700 Subject: [PATCH 0149/1588] PYTHON-3257 Fix "connection pool paused" errors in child after fork (#944) --- pymongo/pool.py | 4 ++-- pymongo/topology.py | 30 +++++++++++++++++------------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index 13d0e78d1e..e2f9698212 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1420,7 +1420,7 @@ def _get_socket(self): # See test.test_client:TestClient.test_fork for an example of # what could go wrong otherwise if self.pid != os.getpid(): - self.reset() + self.reset_without_pause() if self.closed: if self.enabled_for_cmap: @@ -1526,7 +1526,7 @@ def return_socket(self, sock_info): if self.enabled_for_cmap: listeners.publish_connection_checked_in(self.address, sock_info.id) if self.pid != os.getpid(): - self.reset() + self.reset_without_pause() else: if self.closed: sock_info.close_socket(ConnectionClosedReason.POOL_CLOSED) diff --git a/pymongo/topology.py b/pymongo/topology.py index 1d4c9a86a8..4b5ff87bb5 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -169,20 +169,24 @@ def open(self): forking. """ + pid = os.getpid() if self._pid is None: - self._pid = os.getpid() - else: - if os.getpid() != self._pid: - warnings.warn( - "MongoClient opened before fork. Create MongoClient only " - "after forking. See PyMongo's documentation for details: " - "https://pymongo.readthedocs.io/en/stable/faq.html#" - "is-pymongo-fork-safe" - ) - with self._lock: - # Reset the session pool to avoid duplicate sessions in - # the child process. - self._session_pool.reset() + self._pid = pid + elif pid != self._pid: + self._pid = pid + warnings.warn( + "MongoClient opened before fork. Create MongoClient only " + "after forking. See PyMongo's documentation for details: " + "https://pymongo.readthedocs.io/en/stable/faq.html#" + "is-pymongo-fork-safe" + ) + with self._lock: + # Close servers and clear the pools. + for server in self._servers.values(): + server.close() + # Reset the session pool to avoid duplicate sessions in + # the child process. + self._session_pool.reset() with self._lock: self._ensure_opened() From a6241973385fbe59eb86d433a952e06d91f5ff79 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 10 May 2022 10:29:48 -0700 Subject: [PATCH 0150/1588] PYTHON-3260 Improve test_transaction_starts_with_batched_write and test_continuous_network_errors (#945) --- test/test_client.py | 7 +++---- test/test_transactions.py | 10 ++++++---- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/test/test_client.py b/test/test_client.py index 59a8324d6e..3630cec06c 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1596,7 +1596,6 @@ def test_direct_connection(self): with self.assertRaises(ConfigurationError): MongoClient(["host1", "host2"], directConnection=True) - @unittest.skipIf(sys.platform.startswith("java"), "Jython does not support gc.get_objects") @unittest.skipIf("PyPy" in sys.version, "PYTHON-2927 fails often on PyPy") def test_continuous_network_errors(self): def server_description_count(): @@ -1612,7 +1611,7 @@ def server_description_count(): gc.collect() with client_knobs(min_heartbeat_interval=0.003): client = MongoClient( - "invalid:27017", heartbeatFrequencyMS=3, serverSelectionTimeoutMS=100 + "invalid:27017", heartbeatFrequencyMS=3, serverSelectionTimeoutMS=150 ) initial_count = server_description_count() self.addCleanup(client.close) @@ -1622,8 +1621,8 @@ def server_description_count(): final_count = server_description_count() # If a bug like PYTHON-2433 is reintroduced then too many # ServerDescriptions will be kept alive and this test will fail: - # AssertionError: 4 != 22 within 5 delta (18 difference) - self.assertAlmostEqual(initial_count, final_count, delta=10) + # AssertionError: 19 != 46 within 15 delta (27 difference) + self.assertAlmostEqual(initial_count, final_count, delta=15) @client_context.require_failCommand_fail_point def test_network_error_message(self): diff --git a/test/test_transactions.py b/test/test_transactions.py index 34dbbba34b..136a19baaa 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -30,6 +30,8 @@ ) from test.utils_spec_runner import SpecRunner +from bson import encode +from bson.raw_bson import RawBSONDocument from gridfs import GridFS, GridFSBucket from pymongo import WriteConcern, client_session from pymongo.client_session import TransactionOptions @@ -330,14 +332,14 @@ def test_transaction_starts_with_batched_write(self): listener.reset() self.addCleanup(client.close) self.addCleanup(coll.drop) - large_str = "\0" * (10 * 1024 * 1024) - ops = [InsertOne({"a": large_str}) for _ in range(10)] + large_str = "\0" * (1 * 1024 * 1024) + ops = [InsertOne(RawBSONDocument(encode({"a": large_str}))) for _ in range(48)] with client.start_session() as session: with session.start_transaction(): coll.bulk_write(ops, session=session) # Assert commands were constructed properly. self.assertEqual( - ["insert", "insert", "insert", "commitTransaction"], listener.started_command_names() + ["insert", "insert", "commitTransaction"], listener.started_command_names() ) first_cmd = listener.results["started"][0].command self.assertTrue(first_cmd["startTransaction"]) @@ -347,7 +349,7 @@ def test_transaction_starts_with_batched_write(self): self.assertNotIn("startTransaction", event.command) self.assertEqual(lsid, event.command["lsid"]) self.assertEqual(txn_number, event.command["txnNumber"]) - self.assertEqual(10, coll.count_documents({})) + self.assertEqual(48, coll.count_documents({})) class PatchSessionTimeout(object): From a7579b02d24ad4af1b984317383bb8d68d1973fe Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 10 May 2022 11:05:59 -0700 Subject: [PATCH 0151/1588] PYTHON-3259 Improve migration guide for loads/JSONOptions/tz_aware (#946) --- bson/json_util.py | 5 +++++ doc/changelog.rst | 6 +++--- doc/migrate-to-pymongo4.rst | 24 +++++++++++++++++++++--- 3 files changed, 29 insertions(+), 6 deletions(-) diff --git a/bson/json_util.py b/bson/json_util.py index 99dbc62609..369c3d5f4a 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -438,6 +438,11 @@ def loads(s: str, *args: Any, **kwargs: Any) -> Any: decoding of MongoDB Extended JSON types. Defaults to :const:`DEFAULT_JSON_OPTIONS`. + .. versionchanged:: 4.0 + Now loads :class:`datetime.datetime` instances as naive by default. To + load timezone aware instances utilize the `json_options` parameter. + See :ref:`tz_aware_default_change` for an example. + .. versionchanged:: 3.5 Parses Relaxed and Canonical Extended JSON as well as PyMongo's legacy format. Now raises ``TypeError`` or ``ValueError`` when parsing JSON diff --git a/doc/changelog.rst b/doc/changelog.rst index 7f002fb470..5538467d0c 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -256,9 +256,9 @@ Breaking Changes in 4.0 :class:`~bson.dbref.DBRef`. - The "tls" install extra is no longer necessary or supported and will be ignored by pip. -- ``tz_aware``, an argument for :class:`~bson.json_util.JSONOptions`, - now defaults to ``False`` instead of ``True``. ``json_util.loads`` now - decodes datetime as naive by default. +- The ``tz_aware`` argument to :class:`~bson.json_util.JSONOptions` + now defaults to ``False`` instead of ``True``. :meth:`bson.json_util.loads` now + decodes datetime as naive by default. See :ref:`tz_aware_default_change` for more info. - ``directConnection`` URI option and keyword argument to :class:`~pymongo.mongo_client.MongoClient` defaults to ``False`` instead of ``None``, allowing for the automatic discovery of replica sets. This means that if you diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index d70d7b8a2c..eca479c7c7 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -253,12 +253,30 @@ can be changed to this:: client.options.pool_options.min_pool_size client.options.pool_options.max_idle_time_seconds +.. _tz_aware_default_change: + ``tz_aware`` defaults to ``False`` .................................. -``tz_aware``, an argument for :class:`~bson.json_util.JSONOptions`, -now defaults to ``False`` instead of ``True``. ``json_util.loads`` now -decodes datetime as naive by default. +The ``tz_aware`` argument to :class:`~bson.json_util.JSONOptions` +now defaults to ``False`` instead of ``True``. :meth:`bson.json_util.loads` +now decodes datetime as naive by default:: + + >>> from bson import json_util + >>> s = '{"dt": {"$date": "2022-05-09T17:54:00Z"}}' + >>> json_util.loads(s) + {'dt': datetime.datetime(2022, 5, 9, 17, 54)} + +To retain the PyMongo 3 behavior set ``tz_aware=True``, for example:: + + >>> from bson import json_util + >>> opts = json_util.JSONOptions(tz_aware=True) + >>> s = '{"dt": {"$date": "2022-05-09T17:54:00Z"}}' + >>> json_util.loads(s, json_options=opts) + {'dt': datetime.datetime(2022, 5, 9, 17, 54, tzinfo=)} + +This change was made to match the default behavior of +:class:`~bson.codec_options.CodecOptions` and :class:`bson.decode`. MongoClient cannot execute operations after ``close()`` ....................................................... From e02eb287e896ff0e301e67648ba9398f6fe5d799 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 12 May 2022 15:42:06 -0500 Subject: [PATCH 0152/1588] PYTHON-3254 Bump maxWireVersion for MongoDB 6.0 (#948) --- pymongo/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/common.py b/pymongo/common.py index 5a6ffbd369..552faf94a2 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -63,7 +63,7 @@ # What this version of PyMongo supports. MIN_SUPPORTED_SERVER_VERSION = "3.6" MIN_SUPPORTED_WIRE_VERSION = 6 -MAX_SUPPORTED_WIRE_VERSION = 15 +MAX_SUPPORTED_WIRE_VERSION = 17 # Frequency to call hello on servers, in seconds. HEARTBEAT_FREQUENCY = 10 From 89d3fd035519055c6fac963bd3047f661b1c9219 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 23 May 2022 12:44:44 -0700 Subject: [PATCH 0153/1588] PYTHON-3279 Don't link check flakey wiki.centos.org (#951) --- doc/changelog.rst | 4 ++-- doc/conf.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 5538467d0c..f1085c4bff 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -755,8 +755,8 @@ Changes in Version 3.8.0 .. warning:: PyMongo no longer supports Python 2.6. RHEL 6 users should install Python 2.7 or newer from `Red Hat Software Collections - `_. CentOS 6 users should install Python - 2.7 or newer from `SCL + `_. + CentOS 6 users should install Python 2.7 or newer from `SCL `_ .. warning:: PyMongo no longer supports PyPy3 versions older than 3.5. Users diff --git a/doc/conf.py b/doc/conf.py index a5c5be2694..7b1580de32 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -84,7 +84,8 @@ # The anchors on the rendered markdown page are created after the fact, # so this link results in a 404. linkcheck_ignore = [ - "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check" + "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check", + r"https://wiki.centos.org/[\w/]*", ] # -- Options for extensions ---------------------------------------------------- From 9f191d6bb35ae3252dec267256c45694cd373685 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 25 May 2022 05:55:36 -0500 Subject: [PATCH 0154/1588] PYTHON-3283 Remove Generic Typing from the ClientSession Class (#952) --- pymongo/client_session.py | 12 +++++------- pymongo/mongo_client.py | 4 ++-- test/test_mypy.py | 9 +++++++++ 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index a0c269cb8d..7d70eb8f19 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -140,7 +140,6 @@ Any, Callable, ContextManager, - Generic, Mapping, NoReturn, Optional, @@ -164,7 +163,6 @@ from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.server_type import SERVER_TYPE -from pymongo.typings import _DocumentType from pymongo.write_concern import WriteConcern @@ -461,7 +459,7 @@ def _within_time_limit(start_time): from pymongo.mongo_client import MongoClient -class ClientSession(Generic[_DocumentType]): +class ClientSession: """A session for ordering sequential operations. :class:`ClientSession` instances are **not thread-safe or fork-safe**. @@ -476,13 +474,13 @@ class ClientSession(Generic[_DocumentType]): def __init__( self, - client: "MongoClient[_DocumentType]", + client: "MongoClient", server_session: Any, options: SessionOptions, implicit: bool, ) -> None: # A MongoClient, a _ServerSession, a SessionOptions, and a set. - self._client: MongoClient[_DocumentType] = client + self._client: MongoClient = client self._server_session = server_session self._options = options self._cluster_time = None @@ -515,14 +513,14 @@ def _check_ended(self): if self._server_session is None: raise InvalidOperation("Cannot use ended session") - def __enter__(self) -> "ClientSession[_DocumentType]": + def __enter__(self) -> "ClientSession": return self def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self._end_session(lock=True) @property - def client(self) -> "MongoClient[_DocumentType]": + def client(self) -> "MongoClient": """The :class:`~pymongo.mongo_client.MongoClient` this session was created from. """ diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 6601c18aca..e1aa80e2f9 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1630,7 +1630,7 @@ def start_session( causal_consistency: Optional[bool] = None, default_transaction_options: Optional[client_session.TransactionOptions] = None, snapshot: Optional[bool] = False, - ) -> client_session.ClientSession[_DocumentType]: + ) -> client_session.ClientSession: """Start a logical session. This method takes the same parameters as @@ -1681,7 +1681,7 @@ def _ensure_session(self, session=None): @contextlib.contextmanager def _tmp_session( self, session: Optional[client_session.ClientSession], close: bool = True - ) -> "Generator[Optional[client_session.ClientSession[Any]], None, None]": + ) -> "Generator[Optional[client_session.ClientSession], None, None]": """If provided session is None, lend a temporary session.""" if session is not None: if not isinstance(session, client_session.ClientSession): diff --git a/test/test_mypy.py b/test/test_mypy.py index 07af61ed36..dfdcefbdb3 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -43,6 +43,7 @@ class Movie(TypedDict): # type: ignore[misc] from bson import CodecOptions, decode, decode_all, decode_file_iter, decode_iter, encode from bson.raw_bson import RawBSONDocument from bson.son import SON +from pymongo import ASCENDING from pymongo.collection import Collection from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne @@ -313,6 +314,14 @@ def test_son_document_type(self) -> None: def test_son_document_type_runtime(self) -> None: client = MongoClient(document_class=SON[str, Any], connect=False) + @only_type_check + def test_create_index(self) -> None: + client: MongoClient[Dict[str, str]] = MongoClient("test") + db = client.test + with client.start_session() as session: + index = db.test.create_index([("user_id", ASCENDING)], unique=True, session=session) + assert isinstance(index, str) + class TestCommandDocumentType(unittest.TestCase): @only_type_check From 78476d0217289e5a3fafb5c599a8a88558d87d92 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 26 May 2022 15:14:59 -0700 Subject: [PATCH 0155/1588] PYTHON-3187 Avoid tight poll() loop on pyopenssl connections (#953) --- pymongo/pool.py | 33 +++++++-------------------------- pymongo/pyopenssl_context.py | 15 +++++++++++++-- pymongo/ssl_support.py | 14 +++++--------- test/test_encryption.py | 10 +++------- test/test_ssl.py | 7 +------ 5 files changed, 29 insertions(+), 50 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index e2f9698212..d68ba238f2 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -15,7 +15,6 @@ import collections import contextlib import copy -import ipaddress import os import platform import socket @@ -61,20 +60,7 @@ from pymongo.server_api import _add_to_command from pymongo.server_type import SERVER_TYPE from pymongo.socket_checker import SocketChecker -from pymongo.ssl_support import HAS_SNI as _HAVE_SNI -from pymongo.ssl_support import IPADDR_SAFE as _IPADDR_SAFE -from pymongo.ssl_support import SSLError as _SSLError - - -# For SNI support. According to RFC6066, section 3, IPv4 and IPv6 literals are -# not permitted for SNI hostname. -def is_ip_address(address): - try: - ipaddress.ip_address(address) - return True - except (ValueError, UnicodeError): # noqa: B014 - return False - +from pymongo.ssl_support import HAS_SNI, SSLError try: from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl @@ -263,7 +249,7 @@ def _raise_connection_failure( msg = msg_prefix + msg if isinstance(error, socket.timeout): raise NetworkTimeout(msg) from error - elif isinstance(error, _SSLError) and "timed out" in str(error): + elif isinstance(error, SSLError) and "timed out" in str(error): # Eventlet does not distinguish TLS network timeouts from other # SSLErrors (https://github.com/eventlet/eventlet/issues/692). # Luckily, we can work around this limitation because the phrase @@ -924,7 +910,7 @@ def _raise_connection_failure(self, error): reason = ConnectionClosedReason.ERROR self.close_socket(reason) # SSLError from PyOpenSSL inherits directly from Exception. - if isinstance(error, (IOError, OSError, _SSLError)): + if isinstance(error, (IOError, OSError, SSLError)): _raise_connection_failure(self.address, error) else: raise @@ -1024,14 +1010,9 @@ def _configured_socket(address, options): if ssl_context is not None: host = address[0] try: - # According to RFC6066, section 3, IPv4 and IPv6 literals are - # not permitted for SNI hostname. - # Previous to Python 3.7 wrap_socket would blindly pass - # IP addresses as SNI hostname. - # https://bugs.python.org/issue32185 # We have to pass hostname / ip address to wrap_socket # to use SSLContext.check_hostname. - if _HAVE_SNI and (not is_ip_address(host) or _IPADDR_SAFE): + if HAS_SNI: sock = ssl_context.wrap_socket(sock, server_hostname=host) else: sock = ssl_context.wrap_socket(sock) @@ -1040,7 +1021,7 @@ def _configured_socket(address, options): # Raise _CertificateError directly like we do after match_hostname # below. raise - except (IOError, OSError, _SSLError) as exc: # noqa: B014 + except (IOError, OSError, SSLError) as exc: # noqa: B014 sock.close() # We raise AutoReconnect for transient and permanent SSL handshake # failures alike. Permanent handshake failures, like protocol @@ -1048,7 +1029,7 @@ def _configured_socket(address, options): _raise_connection_failure(address, exc, "SSL handshake failed: ") if ( ssl_context.verify_mode - and not getattr(ssl_context, "check_hostname", False) + and not ssl_context.check_hostname and not options.tls_allow_invalid_hostnames ): try: @@ -1336,7 +1317,7 @@ def connect(self): self.address, conn_id, ConnectionClosedReason.ERROR ) - if isinstance(error, (IOError, OSError, _SSLError)): + if isinstance(error, (IOError, OSError, SSLError)): _raise_connection_failure(self.address, error) raise diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 3736a4f381..1a57ff4f2b 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -70,6 +70,8 @@ _REVERSE_VERIFY_MAP = dict((value, key) for key, value in _VERIFY_MAP.items()) +# For SNI support. According to RFC6066, section 3, IPv4 and IPv6 literals are +# not permitted for SNI hostname. def _is_ip_address(address): try: _ip_address(address) @@ -104,8 +106,17 @@ def _call(self, call, *args, **kwargs): while True: try: return call(*args, **kwargs) - except _RETRY_ERRORS: - self.socket_checker.select(self, True, True, timeout) + except _RETRY_ERRORS as exc: + if isinstance(exc, _SSL.WantReadError): + want_read = True + want_write = False + elif isinstance(exc, _SSL.WantWriteError): + want_read = False + want_write = True + else: + want_read = True + want_write = True + self.socket_checker.select(self, want_read, want_write, timeout) if timeout and _time.monotonic() - start > timeout: raise _socket.timeout("timed out") continue diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 6adf629ad3..d1381ce0e4 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -14,8 +14,6 @@ """Support for SSL in PyMongo.""" -import sys - from pymongo.errors import ConfigurationError HAVE_SSL = True @@ -38,7 +36,7 @@ from ssl import CERT_NONE, CERT_REQUIRED HAS_SNI = _ssl.HAS_SNI - IPADDR_SAFE = _ssl.IS_PYOPENSSL or sys.version_info[:2] >= (3, 7) + IPADDR_SAFE = True SSLError = _ssl.SSLError def get_ssl_context( @@ -53,12 +51,10 @@ def get_ssl_context( """Create and return an SSLContext object.""" verify_mode = CERT_NONE if allow_invalid_certificates else CERT_REQUIRED ctx = _ssl.SSLContext(_ssl.PROTOCOL_SSLv23) - # SSLContext.check_hostname was added in CPython 3.4. - if hasattr(ctx, "check_hostname"): - if verify_mode != CERT_NONE: - ctx.check_hostname = not allow_invalid_hostnames - else: - ctx.check_hostname = False + if verify_mode != CERT_NONE: + ctx.check_hostname = not allow_invalid_hostnames + else: + ctx.check_hostname = False if hasattr(ctx, "check_ocsp_endpoint"): ctx.check_ocsp_endpoint = not disable_ocsp_endpoint_check if hasattr(ctx, "options"): diff --git a/test/test_encryption.py b/test/test_encryption.py index 366c406b03..c0d278d577 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -145,13 +145,10 @@ def test_init_kms_tls_options(self): self.assertEqual(opts._kms_ssl_contexts, {}) opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tls": True}, "aws": {}}) ctx = opts._kms_ssl_contexts["kmip"] - # On < 3.7 we check hostnames manually. - if sys.version_info[:2] >= (3, 7): - self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) ctx = opts._kms_ssl_contexts["aws"] - if sys.version_info[:2] >= (3, 7): - self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) opts = AutoEncryptionOpts( {}, @@ -159,8 +156,7 @@ def test_init_kms_tls_options(self): kms_tls_options={"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}}, ) ctx = opts._kms_ssl_contexts["kmip"] - if sys.version_info[:2] >= (3, 7): - self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) diff --git a/test/test_ssl.py b/test/test_ssl.py index 0c45275fac..9b58c2251b 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -65,8 +65,6 @@ CRL_PEM = os.path.join(CERT_PATH, "crl.pem") MONGODB_X509_USERNAME = "C=US,ST=New York,L=New York City,O=MDB,OU=Drivers,CN=client" -_PY37PLUS = sys.version_info[:2] >= (3, 7) - # To fully test this start a mongod instance (built with SSL support) like so: # mongod --dbpath /path/to/data/directory --sslOnNormalPorts \ # --sslPEMKeyFile /path/to/pymongo/test/certificates/server.pem \ @@ -306,10 +304,7 @@ def test_cert_ssl_validation_hostname_matching(self): ctx = get_ssl_context(None, None, None, None, False, True, False) self.assertFalse(ctx.check_hostname) ctx = get_ssl_context(None, None, None, None, False, False, False) - if _PY37PLUS or _HAVE_PYOPENSSL: - self.assertTrue(ctx.check_hostname) - else: - self.assertFalse(ctx.check_hostname) + self.assertTrue(ctx.check_hostname) response = self.client.admin.command(HelloCompat.LEGACY_CMD) From f4fc742ff38110aa41f2a46b267319403d89f3b1 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 27 May 2022 12:34:22 -0700 Subject: [PATCH 0156/1588] PYTHON-3276 [pymongo] FLE 1.0 shared library (#947) --- .evergreen/config.yml | 71 +++++++++++++++++++++++++---------- .evergreen/run-tests.sh | 13 ++++++- pymongo/encryption.py | 9 ++++- pymongo/encryption_options.py | 13 ++++++- test/test_encryption.py | 16 ++++++++ 5 files changed, 99 insertions(+), 23 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 3f8955f40e..c12d4167b7 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -450,6 +450,9 @@ functions: export LIBMONGOCRYPT_URL="${libmongocrypt_url}" export TEST_ENCRYPTION=1 fi + if [ -n "${test_csfle}" ]; then + export TEST_CSFLE=1 + fi if [ -n "${test_pyopenssl}" ]; then export TEST_PYOPENSSL=1 fi @@ -1232,7 +1235,6 @@ tasks: VERSION: "5.0" TOPOLOGY: "sharded_cluster" - func: "run tests" - - name: "test-6.0-standalone" tags: ["6.0", "standalone"] commands: @@ -2161,6 +2163,14 @@ axes: variables: test_encryption: true batchtime: 10080 # 7 days + - id: "encryption_with_csfle" + display_name: "Encryption with CSFLE" + tags: ["encryption_tag", "csfle"] + variables: + test_encryption: true + test_csfle: true + batchtime: 10080 # 7 days + # Run pyopenssl tests? - id: pyopenssl @@ -2229,21 +2239,6 @@ buildvariants: - ".4.0" - ".3.6" -- matrix_name: "tests-all-encryption" - matrix_spec: - platform: - # OSes that support versions of MongoDB>=2.6 with SSL. - - awslinux - auth-ssl: "*" - encryption: "*" - display_name: "Encryption ${platform} ${auth-ssl}" - tasks: - - ".6.0" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" - - matrix_name: "tests-archlinux" matrix_spec: platform: @@ -2297,14 +2292,27 @@ buildvariants: auth: "auth" ssl: "nossl" encryption: "*" - display_name: "Encryption ${platform} ${auth} ${ssl}" + display_name: "${encryption} ${platform} ${auth} ${ssl}" tasks: &encryption-server-versions + - ".rapid" - ".latest" - ".6.0" - ".5.0" - ".4.4" - ".4.2" - ".4.0" + rules: &encryption-exclude-rules + - if: + platform: "*" + auth: "*" + ssl: "*" + encryption: [ "encryption_with_csfle" ] + then: + remove_tasks: + - ".5.0" + - ".4.4" + - ".4.2" + - ".4.0" # Test one server version with zSeries, POWER8, and ARM. - matrix_name: "test-different-cpu-architectures" @@ -2385,8 +2393,21 @@ buildvariants: # dependency tests-python-version-rhel62-test-encryption_.../test-2.6-standalone is not present in the project config # coverage: "*" encryption: "*" - display_name: "Encryption ${python-version} ${platform} ${auth-ssl}" + display_name: "${encryption} ${python-version} ${platform} ${auth-ssl}" tasks: *encryption-server-versions + rules: + - if: + platform: "*" + python-version: "*" + auth-ssl: "*" + encryption: [ "encryption_with_csfle" ] + then: + remove_tasks: + - ".5.0" + - ".4.4" + - ".4.2" + - ".4.0" + - matrix_name: "tests-python-version-ubuntu18-without-c-extensions" matrix_spec: @@ -2481,8 +2502,20 @@ buildvariants: python-version-windows: "*" auth-ssl: "*" encryption: "*" - display_name: "Encryption ${platform} ${python-version-windows} ${auth-ssl}" + display_name: "${encryption} ${platform} ${python-version-windows} ${auth-ssl}" tasks: *encryption-server-versions + rules: + - if: + platform: "*" + python-version-windows: "*" + auth-ssl: "*" + encryption: [ "encryption_with_csfle" ] + then: + remove_tasks: + - ".5.0" + - ".4.4" + - ".4.2" + - ".4.0" # Storage engine tests on Ubuntu 18.04 (x86_64) with Python 3.7. - matrix_name: "tests-storage-engines" diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 4a48b4a33b..96f42fa517 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -11,6 +11,7 @@ set -o errexit # Exit the script with error if any of the commands fail # COVERAGE If non-empty, run the test suite with coverage. # TEST_ENCRYPTION If non-empty, install pymongocrypt. # LIBMONGOCRYPT_URL The URL to download libmongocrypt. +# TEST_CSFLE If non-empty, install CSFLE if [ -n "${SET_XTRACE_ON}" ]; then set -o xtrace @@ -27,6 +28,7 @@ COVERAGE=${COVERAGE:-} COMPRESSORS=${COMPRESSORS:-} MONGODB_API_VERSION=${MONGODB_API_VERSION:-} TEST_ENCRYPTION=${TEST_ENCRYPTION:-} +TEST_CSFLE=${TEST_CSFLE:-} LIBMONGOCRYPT_URL=${LIBMONGOCRYPT_URL:-} DATA_LAKE=${DATA_LAKE:-} @@ -153,7 +155,16 @@ if [ -z "$DATA_LAKE" ]; then else TEST_ARGS="-s test.test_data_lake" fi - +if [ -z $TEST_CSFLE ]; then + echo "CSFLE not being tested" +else + $PYTHON $DRIVERS_TOOLS/.evergreen/mongodl.py --component csfle \ + --version latest --out ../csfle/ + export DYLD_FALLBACK_LIBRARY_PATH=../csfle/lib/:$DYLD_FALLBACK_LIBRARY_PATH + export LD_LIBRARY_PATH=../csfle/lib:$LD_LIBRARY_PATH + export PATH=../csfle/bin:$PATH + TEST_ARGS="-s test.test_encryption" +fi # Don't download unittest-xml-reporting from pypi, which often fails. if $PYTHON -c "import xmlrunner"; then # The xunit output dir must be a Python style absolute path. diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 1e06f7062d..1a29131890 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -296,7 +296,14 @@ def _get_internal_client(encrypter, mongo_client): io_callbacks = _EncryptionIO(metadata_client, key_vault_coll, mongocryptd_client, opts) self._auto_encrypter = AutoEncrypter( - io_callbacks, MongoCryptOptions(opts._kms_providers, schema_map) + io_callbacks, + MongoCryptOptions( + opts._kms_providers, + schema_map, + csfle_path=opts._csfle_path, + csfle_required=opts._csfle_required, + bypass_encryption=opts._bypass_auto_encryption, + ), ) self._closed = False diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 2ac12bc4b4..0ce828ae4c 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -39,12 +39,14 @@ def __init__( key_vault_namespace: str, key_vault_client: Optional["MongoClient"] = None, schema_map: Optional[Mapping[str, Any]] = None, - bypass_auto_encryption: Optional[bool] = False, + bypass_auto_encryption: bool = False, mongocryptd_uri: str = "mongodb://localhost:27020", mongocryptd_bypass_spawn: bool = False, mongocryptd_spawn_path: str = "mongocryptd", mongocryptd_spawn_args: Optional[List[str]] = None, kms_tls_options: Optional[Mapping[str, Any]] = None, + csfle_path: Optional[str] = None, + csfle_required: bool = False, ) -> None: """Options to configure automatic client-side field level encryption. @@ -140,6 +142,12 @@ def __init__( Or to supply a client certificate:: kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} + - `csfle_path` (optional): Override the path to load the CSFLE library. + - `csfle_required` (optional): If 'true', refuse to continue encryption without a CSFLE + library + + .. versionchanged:: 4.2 + Added `csfle_path` and `csfle_required` parameters .. versionchanged:: 4.0 Added the `kms_tls_options` parameter and the "kmip" KMS provider. @@ -152,7 +160,8 @@ def __init__( "install a compatible version with: " "python -m pip install 'pymongo[encryption]'" ) - + self._csfle_path = csfle_path + self._csfle_required = csfle_required self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace self._key_vault_client = key_vault_client diff --git a/test/test_encryption.py b/test/test_encryption.py index c0d278d577..fc9d4eec3b 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -82,6 +82,18 @@ def get_client_opts(client): class TestAutoEncryptionOpts(PyMongoTestCase): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @unittest.skipUnless(os.environ.get("TEST_CSFLE"), "csfle is not installed") + def test_csfle(self): + # Test that we can pick up csfle automatically + client = MongoClient( + auto_encryption_opts=AutoEncryptionOpts( + KMS_PROVIDERS, "keyvault.datakeys", csfle_required=True + ), + connect=False, + ) + self.addCleanup(client.close) + @unittest.skipIf(_HAVE_PYMONGOCRYPT, "pymongocrypt is installed") def test_init_requires_pymongocrypt(self): with self.assertRaises(ConfigurationError): @@ -1749,6 +1761,10 @@ def test_case_8(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#bypass-spawning-mongocryptd class TestBypassSpawningMongocryptdProse(EncryptionIntegrationTest): + @unittest.skipIf( + os.environ.get("TEST_CSFLE"), + "this prose test does not work when CSFLE is on a system dynamic library search path.", + ) def test_mongocryptd_bypass_spawn(self): # Lower the mongocryptd timeout to reduce the test run time. self._original_timeout = encryption._MONGOCRYPTD_TIMEOUT_MS From 62a630218179a77e662630a6799d7c3f267459c0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 1 Jun 2022 18:26:52 -0500 Subject: [PATCH 0157/1588] PYTHON-2683 Convert change stream spec tests to unified test format (#950) --- .../legacy/change-streams-errors.json | 153 -- .../change-streams-resume-allowlist.json | 1750 ------------ .../change-streams-resume-errorLabels.json | 1652 ------------ .../change_streams/legacy/change-streams.json | 795 ------ .../unified/change-streams-errors.json | 246 ++ .../change-streams-resume-allowlist.json | 2348 +++++++++++++++++ .../change-streams-resume-errorLabels.json | 2125 +++++++++++++++ .../unified/change-streams.json | 1101 +++++++- test/test_change_stream.py | 128 +- test/test_unified_format.py | 6 + ...ctedEventsForClient-ignoreExtraEvents.json | 151 ++ .../valid-pass/poc-change-streams.json | 43 +- test/unified_format.py | 27 +- 13 files changed, 6039 insertions(+), 4486 deletions(-) delete mode 100644 test/change_streams/legacy/change-streams-errors.json delete mode 100644 test/change_streams/legacy/change-streams-resume-allowlist.json delete mode 100644 test/change_streams/legacy/change-streams-resume-errorLabels.json delete mode 100644 test/change_streams/legacy/change-streams.json create mode 100644 test/change_streams/unified/change-streams-errors.json create mode 100644 test/change_streams/unified/change-streams-resume-allowlist.json create mode 100644 test/change_streams/unified/change-streams-resume-errorLabels.json create mode 100644 test/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.json diff --git a/test/change_streams/legacy/change-streams-errors.json b/test/change_streams/legacy/change-streams-errors.json deleted file mode 100644 index 7b3fa80689..0000000000 --- a/test/change_streams/legacy/change-streams-errors.json +++ /dev/null @@ -1,153 +0,0 @@ -{ - "collection_name": "test", - "database_name": "change-stream-tests", - "collection2_name": "test2", - "database2_name": "change-stream-tests-2", - "tests": [ - { - "description": "The watch helper must not throw a custom exception when executed against a single server topology, but instead depend on a server error", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "single" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [], - "expectations": null, - "result": { - "error": { - "code": 40573 - } - } - }, - { - "description": "Change Stream should error when an invalid aggregation stage is passed in", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [ - { - "$unsupported": "foo" - } - ], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - }, - { - "$unsupported": "foo" - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "error": { - "code": 40324 - } - } - }, - { - "description": "Change Stream should error when _id is projected out", - "minServerVersion": "4.1.11", - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [ - { - "$project": { - "_id": 0 - } - } - ], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "result": { - "error": { - "code": 280 - } - } - }, - { - "description": "change stream errors on ElectionInProgress", - "minServerVersion": "4.2", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 216, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "result": { - "error": { - "code": 216 - } - } - } - ] -} diff --git a/test/change_streams/legacy/change-streams-resume-allowlist.json b/test/change_streams/legacy/change-streams-resume-allowlist.json deleted file mode 100644 index baffc8fba9..0000000000 --- a/test/change_streams/legacy/change-streams-resume-allowlist.json +++ /dev/null @@ -1,1750 +0,0 @@ -{ - "collection_name": "test", - "database_name": "change-stream-tests", - "tests": [ - { - "description": "change stream resumes after a network error", - "minServerVersion": "4.2", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "closeConnection": true - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after HostUnreachable", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 6, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after HostNotFound", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 7, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after NetworkTimeout", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 89, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after ShutdownInProgress", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 91, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after PrimarySteppedDown", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 189, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after ExceededTimeLimit", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 262, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after SocketException", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 9001, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after NotWritablePrimary", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 10107, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after InterruptedAtShutdown", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 11600, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after InterruptedDueToReplStateChange", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 11602, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after NotPrimaryNoSecondaryOk", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 13435, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after NotPrimaryOrSecondary", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 13436, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after StaleShardVersion", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 63, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after StaleEpoch", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 150, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after RetryChangeStream", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 234, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after FailedToSatisfyReadPreference", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 133, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after CursorNotFound", - "minServerVersion": "4.2", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 43, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - } - ] -} diff --git a/test/change_streams/legacy/change-streams-resume-errorLabels.json b/test/change_streams/legacy/change-streams-resume-errorLabels.json deleted file mode 100644 index 2bac61d3b1..0000000000 --- a/test/change_streams/legacy/change-streams-resume-errorLabels.json +++ /dev/null @@ -1,1652 +0,0 @@ -{ - "collection_name": "test", - "database_name": "change-stream-tests", - "tests": [ - { - "description": "change stream resumes after HostUnreachable", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 6, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after HostNotFound", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 7, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after NetworkTimeout", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 89, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after ShutdownInProgress", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 91, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after PrimarySteppedDown", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 189, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after ExceededTimeLimit", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 262, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after SocketException", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 9001, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after NotWritablePrimary", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 10107, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after InterruptedAtShutdown", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 11600, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after InterruptedDueToReplStateChange", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 11602, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after NotPrimaryNoSecondaryOk", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 13435, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after NotPrimaryOrSecondary", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 13436, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after StaleShardVersion", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 63, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after StaleEpoch", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 150, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after RetryChangeStream", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 234, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after FailedToSatisfyReadPreference", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 133, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes if error contains ResumableChangeStreamError", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 50, - "closeConnection": false, - "errorLabels": [ - "ResumableChangeStreamError" - ] - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream does not resume if error does not contain ResumableChangeStreamError", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 6, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "result": { - "error": { - "code": 6 - } - } - } - ] -} diff --git a/test/change_streams/legacy/change-streams.json b/test/change_streams/legacy/change-streams.json deleted file mode 100644 index 54b76af0a3..0000000000 --- a/test/change_streams/legacy/change-streams.json +++ /dev/null @@ -1,795 +0,0 @@ -{ - "collection_name": "test", - "database_name": "change-stream-tests", - "collection2_name": "test2", - "database2_name": "change-stream-tests-2", - "tests": [ - { - "description": "$changeStream must be the first stage in a change stream pipeline sent to the server", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "The server returns change stream responses in the specified server response format", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": null, - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "Executing a watch helper on a Collection results in notifications for changes to the specified collection", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test2", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - }, - { - "database": "change-stream-tests-2", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "y": 2 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "z": { - "$numberInt": "3" - } - } - } - ] - } - }, - { - "description": "Change Stream should allow valid aggregate pipeline stages", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [ - { - "$match": { - "fullDocument.z": 3 - } - } - ], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "y": 2 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - }, - { - "$match": { - "fullDocument.z": { - "$numberInt": "3" - } - } - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "z": { - "$numberInt": "3" - } - } - } - ] - } - }, - { - "description": "Executing a watch helper on a Database results in notifications for changes to all collections in the specified database.", - "minServerVersion": "3.8.0", - "target": "database", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test2", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - }, - { - "database": "change-stream-tests-2", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "y": 2 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": { - "$numberInt": "1" - }, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test2" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - }, - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "z": { - "$numberInt": "3" - } - } - } - ] - } - }, - { - "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", - "minServerVersion": "3.8.0", - "target": "client", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test2", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - }, - { - "database": "change-stream-tests-2", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "y": 2 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": { - "$numberInt": "1" - }, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "command_name": "aggregate", - "database_name": "admin" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test2" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - }, - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests-2", - "coll": "test" - }, - "fullDocument": { - "y": { - "$numberInt": "2" - } - } - }, - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "z": { - "$numberInt": "3" - } - } - } - ] - } - }, - { - "description": "Test insert, update, replace, and delete event types", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "updateOne", - "arguments": { - "filter": { - "x": 1 - }, - "update": { - "$set": { - "x": 2 - } - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "replaceOne", - "arguments": { - "filter": { - "x": 2 - }, - "replacement": { - "x": 3 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "deleteOne", - "arguments": { - "filter": { - "x": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - }, - { - "operationType": "update", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "updateDescription": { - "updatedFields": { - "x": { - "$numberInt": "2" - } - } - } - }, - { - "operationType": "replace", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "3" - } - } - }, - { - "operationType": "delete", - "ns": { - "db": "change-stream-tests", - "coll": "test" - } - } - ] - } - }, - { - "description": "Test rename and invalidate event types", - "minServerVersion": "4.0.1", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "rename", - "arguments": { - "to": "test2" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "rename", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "to": { - "db": "change-stream-tests", - "coll": "test2" - } - }, - { - "operationType": "invalidate" - } - ] - } - }, - { - "description": "Test drop and invalidate event types", - "minServerVersion": "4.0.1", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "drop" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "drop", - "ns": { - "db": "change-stream-tests", - "coll": "test" - } - }, - { - "operationType": "invalidate" - } - ] - } - }, - { - "description": "Test consecutive resume", - "minServerVersion": "4.1.7", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": { - "batchSize": 1 - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "getMore" - ], - "closeConnection": true - } - }, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 2 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": { - "batchSize": 1 - }, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - }, - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "2" - } - } - }, - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "3" - } - } - } - ] - } - } - ] -} diff --git a/test/change_streams/unified/change-streams-errors.json b/test/change_streams/unified/change-streams-errors.json new file mode 100644 index 0000000000..4a413fce84 --- /dev/null +++ b/test/change_streams/unified/change-streams-errors.json @@ -0,0 +1,246 @@ +{ + "description": "change-streams-errors", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "globalClient", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "globalDatabase0", + "client": "globalClient", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "globalCollection0", + "database": "globalDatabase0", + "collectionName": "collection0" + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "The watch helper must not throw a custom exception when executed against a single server topology, but instead depend on a server error", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "single" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "expectError": { + "errorCode": 40573 + } + } + ] + }, + { + "description": "Change Stream should error when an invalid aggregation stage is passed in", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$unsupported": "foo" + } + ] + }, + "expectError": { + "errorCode": 40324 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + }, + { + "$unsupported": "foo" + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Change Stream should error when _id is projected out", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0 + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "errorCode": 280 + } + } + ] + }, + { + "description": "change stream errors on ElectionInProgress", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 216, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "errorCode": 216 + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-resume-allowlist.json b/test/change_streams/unified/change-streams-resume-allowlist.json new file mode 100644 index 0000000000..b4953ec736 --- /dev/null +++ b/test/change_streams/unified/change-streams-resume-allowlist.json @@ -0,0 +1,2348 @@ +{ + "description": "change-streams-resume-allowlist", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "globalClient", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "globalDatabase0", + "client": "globalClient", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "globalCollection0", + "database": "globalDatabase0", + "collectionName": "collection0" + } + } + ], + "tests": [ + { + "description": "change stream resumes after a network error", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after HostUnreachable", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 6, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after HostNotFound", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 7, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NetworkTimeout", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 89, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after ShutdownInProgress", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 91, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after PrimarySteppedDown", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 189, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after ExceededTimeLimit", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 262, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after SocketException", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 9001, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotWritablePrimary", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 10107, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after InterruptedAtShutdown", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 11600, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after InterruptedDueToReplStateChange", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 11602, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotPrimaryNoSecondaryOk", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 13435, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotPrimaryOrSecondary", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 13436, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after StaleShardVersion", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 63, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after StaleEpoch", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 150, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after RetryChangeStream", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 234, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after FailedToSatisfyReadPreference", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 133, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after CursorNotFound", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 43, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-resume-errorLabels.json b/test/change_streams/unified/change-streams-resume-errorLabels.json new file mode 100644 index 0000000000..c156b550ce --- /dev/null +++ b/test/change_streams/unified/change-streams-resume-errorLabels.json @@ -0,0 +1,2125 @@ +{ + "description": "change-streams-resume-errorlabels", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "globalClient", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "globalDatabase0", + "client": "globalClient", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "globalCollection0", + "database": "globalDatabase0", + "collectionName": "collection0" + } + } + ], + "tests": [ + { + "description": "change stream resumes after HostUnreachable", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 6, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after HostNotFound", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 7, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NetworkTimeout", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 89, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 91, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 189, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after ExceededTimeLimit", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 262, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after SocketException", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 9001, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotWritablePrimary", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 10107, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after InterruptedAtShutdown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 11600, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after InterruptedDueToReplStateChange", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 11602, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotPrimaryNoSecondaryOk", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 13435, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotPrimaryOrSecondary", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 13436, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after StaleShardVersion", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 63, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after StaleEpoch", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 150, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after RetryChangeStream", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 234, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after FailedToSatisfyReadPreference", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 133, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes if error contains ResumableChangeStreamError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 50, + "closeConnection": false, + "errorLabels": [ + "ResumableChangeStreamError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream does not resume if error does not contain ResumableChangeStreamError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 6, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "errorCode": 6 + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams.json b/test/change_streams/unified/change-streams.json index 8bc0c956cd..572d2d6e97 100644 --- a/test/change_streams/unified/change-streams.json +++ b/test/change_streams/unified/change-streams.json @@ -1,13 +1,14 @@ { "description": "change-streams", - "schemaVersion": "1.0", + "schemaVersion": "1.7", "runOnRequirements": [ { "minServerVersion": "3.6", "topologies": [ "replicaset", "sharded-replicaset" - ] + ], + "serverless": "forbid" } ], "createEntities": [ @@ -16,7 +17,17 @@ "id": "client0", "observeEvents": [ "commandStartedEvent" - ] + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "globalClient", + "useMultipleMongoses": false } }, { @@ -32,6 +43,62 @@ "database": "database0", "collectionName": "collection0" } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "collection1" + } + }, + { + "database": { + "id": "globalDatabase0", + "client": "globalClient", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "globalCollection0", + "database": "globalDatabase0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "globalDatabase1", + "client": "globalClient", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "globalCollection1", + "database": "globalDatabase1", + "collectionName": "collection1" + } + }, + { + "collection": { + "id": "globalDb1Collection0", + "database": "globalDatabase1", + "collectionName": "collection0" + } + }, + { + "collection": { + "id": "globalDb0Collection1", + "database": "globalDatabase0", + "collectionName": "collection1" + } } ], "initialData": [ @@ -557,7 +624,7 @@ "runOnRequirements": [ { "minServerVersion": "3.6", - "maxServerVersion": "5.2" + "maxServerVersion": "5.2.99" }, { "minServerVersion": "6.0" @@ -734,6 +801,1032 @@ } } ] + }, + { + "description": "$changeStream must be the first stage in a change stream pipeline sent to the server", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "The server returns change stream responses in the specified server response format", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ] + }, + { + "description": "Executing a watch helper on a Collection results in notifications for changes to the specified collection", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalDb0Collection1", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "globalDb1Collection0", + "arguments": { + "document": { + "y": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "z": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Change Stream should allow valid aggregate pipeline stages", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "fullDocument.z": 3 + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "y": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "z": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + }, + { + "$match": { + "fullDocument.z": 3 + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Executing a watch helper on a Database results in notifications for changes to all collections in the specified database.", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalDb0Collection1", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "globalDb1Collection0", + "arguments": { + "document": { + "y": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection1" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "z": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "client0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalDb0Collection1", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "globalDb1Collection0", + "arguments": { + "document": { + "y": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection1" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database1", + "coll": "collection0" + }, + "fullDocument": { + "y": 2, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "z": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "Test insert, update, replace, and delete event types", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "updateOne", + "object": "globalCollection0", + "arguments": { + "filter": { + "x": 1 + }, + "update": { + "$set": { + "x": 2 + } + } + } + }, + { + "name": "replaceOne", + "object": "globalCollection0", + "arguments": { + "filter": { + "x": 2 + }, + "replacement": { + "x": 3 + } + } + }, + { + "name": "deleteOne", + "object": "globalCollection0", + "arguments": { + "filter": { + "x": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": { + "x": 2 + }, + "removedFields": [], + "truncatedArrays": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "replace", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 3, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "delete", + "ns": { + "db": "database0", + "coll": "collection0" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test rename and invalidate event types", + "runOnRequirements": [ + { + "minServerVersion": "4.0.1", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "collection1" + } + }, + { + "name": "rename", + "object": "globalCollection0", + "arguments": { + "to": "collection1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "rename", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "to": { + "db": "database0", + "coll": "collection1" + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "invalidate" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test drop and invalidate event types", + "runOnRequirements": [ + { + "minServerVersion": "4.0.1", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "collection0" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "drop", + "ns": { + "db": "database0", + "coll": "collection0" + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "invalidate" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test consecutive resume", + "runOnRequirements": [ + { + "minServerVersion": "4.1.7", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "batchSize": 1 + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 2, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": { + "batchSize": 1 + }, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test wallTime field is set in a change event", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "wallTime": { + "$$exists": true + } + } + } + ] } ] } diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 73768fd0f6..f3f206d965 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -16,7 +16,6 @@ import os import random -import re import string import sys import threading @@ -36,7 +35,7 @@ wait_until, ) -from bson import SON, ObjectId, Timestamp, encode, json_util +from bson import SON, ObjectId, Timestamp, encode from bson.binary import ALL_UUID_REPRESENTATIONS, PYTHON_LEGACY, STANDARD, Binary from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument from pymongo import MongoClient @@ -1141,131 +1140,6 @@ def tearDown(self): _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "change_streams") -def camel_to_snake(camel): - # Regex to convert CamelCase to snake_case. - snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) - return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() - - -def get_change_stream(client, scenario_def, test): - # Get target namespace on which to instantiate change stream - target = test["target"] - if target == "collection": - db = client.get_database(scenario_def["database_name"]) - cs_target = db.get_collection(scenario_def["collection_name"]) - elif target == "database": - cs_target = client.get_database(scenario_def["database_name"]) - elif target == "client": - cs_target = client - else: - raise ValueError("Invalid target in spec") - - # Construct change stream kwargs dict - cs_pipeline = test["changeStreamPipeline"] - options = test["changeStreamOptions"] - cs_options = {} - for key, value in options.items(): - cs_options[camel_to_snake(key)] = value - - # Create and return change stream - return cs_target.watch(pipeline=cs_pipeline, **cs_options) - - -def run_operation(client, operation): - # Apply specified operations - opname = camel_to_snake(operation["name"]) - arguments = operation.get("arguments", {}) - if opname == "rename": - # Special case for rename operation. - arguments = {"new_name": arguments["to"]} - cmd = getattr( - client.get_database(operation["database"]).get_collection(operation["collection"]), opname - ) - return cmd(**arguments) - - -def create_test(scenario_def, test): - def run_scenario(self): - # Set up - self.setUpCluster(scenario_def) - self.setFailPoint(test) - is_error = test["result"].get("error", False) - try: - with get_change_stream(self.client, scenario_def, test) as change_stream: - for operation in test["operations"]: - # Run specified operations - run_operation(self.client, operation) - num_expected_changes = len(test["result"].get("success", [])) - changes = [change_stream.next() for _ in range(num_expected_changes)] - # Run a next() to induce an error if one is expected and - # there are no changes. - if is_error and not changes: - change_stream.next() - - except OperationFailure as exc: - if not is_error: - raise - expected_code = test["result"]["error"]["code"] - self.assertEqual(exc.code, expected_code) - - else: - # Check for expected output from change streams - if test["result"].get("success"): - for change, expected_changes in zip(changes, test["result"]["success"]): - self.assert_dict_is_subset(change, expected_changes) - self.assertEqual(len(changes), len(test["result"]["success"])) - - finally: - # Check for expected events - results = self.listener.results - # Note: expectations may be missing, null, or a list of events. - # Extra events emitted by the test are intentionally ignored. - for idx, expectation in enumerate(test.get("expectations") or []): - for event_type, event_desc in expectation.items(): - results_key = event_type.split("_")[1] - event = results[results_key][idx] if len(results[results_key]) > idx else None - self.check_event(event, event_desc) - - return run_scenario - - -def create_tests(): - for dirpath, _, filenames in os.walk(os.path.join(_TEST_PATH, "legacy")): - dirname = os.path.split(dirpath)[-1] - - for filename in filenames: - with open(os.path.join(dirpath, filename)) as scenario_stream: - scenario_def = json_util.loads(scenario_stream.read()) - - test_type = os.path.splitext(filename)[0] - - for test in scenario_def["tests"]: - new_test = create_test(scenario_def, test) - new_test = client_context.require_no_mmap(new_test) - - if "minServerVersion" in test: - min_ver = tuple(int(elt) for elt in test["minServerVersion"].split(".")) - new_test = client_context.require_version_min(*min_ver)(new_test) - if "maxServerVersion" in test: - max_ver = tuple(int(elt) for elt in test["maxServerVersion"].split(".")) - new_test = client_context.require_version_max(*max_ver)(new_test) - - topologies = test["topology"] - new_test = client_context.require_cluster_type(topologies)(new_test) - - test_name = "test_%s_%s_%s" % ( - dirname, - test_type.replace("-", "_"), - str(test["description"].replace(" ", "_")), - ) - - new_test.__name__ = test_name - setattr(TestAllLegacyScenarios, new_test.__name__, new_test) - - -create_tests() - - globals().update( generate_test_classes( os.path.join(_TEST_PATH, "unified"), diff --git a/test/test_unified_format.py b/test/test_unified_format.py index e36959a224..8a6e3da549 100644 --- a/test/test_unified_format.py +++ b/test/test_unified_format.py @@ -65,6 +65,12 @@ def test_unsetOrMatches(self): for actual in [{}, {"x": {}}, {"x": {"y": 2}}]: self.match_evaluator.match_result(spec, actual) + spec = {"y": {"$$unsetOrMatches": {"$$exists": True}}} + self.match_evaluator.match_result(spec, {}) + self.match_evaluator.match_result(spec, {"y": 2}) + self.match_evaluator.match_result(spec, {"x": 1}) + self.match_evaluator.match_result(spec, {"y": {}}) + def test_type(self): self.match_evaluator.match_result( { diff --git a/test/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.json b/test/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.json new file mode 100644 index 0000000000..178b756c2c --- /dev/null +++ b/test/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.json @@ -0,0 +1,151 @@ +{ + "description": "expectedEventsForClient-ignoreExtraEvents", + "schemaVersion": "1.7", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "ignoreExtraEvents can be set to false", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": false, + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1 + } + ] + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "ignoreExtraEvents can be set to true", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2 + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2 + } + ] + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "ignoreExtraEvents defaults to false if unset", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 4 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 4 + } + ] + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-change-streams.json b/test/unified-test-format/valid-pass/poc-change-streams.json index 2a2c41a682..50f0d06f08 100644 --- a/test/unified-test-format/valid-pass/poc-change-streams.json +++ b/test/unified-test-format/valid-pass/poc-change-streams.json @@ -1,6 +1,11 @@ { "description": "poc-change-streams", - "schemaVersion": "1.0", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "createEntities": [ { "client": { @@ -89,6 +94,42 @@ } ], "tests": [ + { + "description": "saveResultAsEntity is optional for createChangeStream", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "client0", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1 + }, + "commandName": "aggregate", + "databaseName": "admin" + } + } + ] + } + ] + }, { "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", "runOnRequirements": [ diff --git a/test/unified_format.py b/test/unified_format.py index 9edf499ece..61c96d6021 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -470,9 +470,15 @@ def __init__(self, test_class): def _operation_exists(self, spec, actual, key_to_compare): if spec is True: - self.test.assertIn(key_to_compare, actual) + if key_to_compare is None: + assert actual is not None + else: + self.test.assertIn(key_to_compare, actual) elif spec is False: - self.test.assertNotIn(key_to_compare, actual) + if key_to_compare is None: + assert actual is None + else: + self.test.assertNotIn(key_to_compare, actual) else: self.test.fail("Expected boolean value for $$exists operator, got %s" % (spec,)) @@ -704,7 +710,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.5") + SCHEMA_VERSION = Version.from_string("1.7") RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True TEST_SPEC: Any @@ -1181,19 +1187,32 @@ def check_events(self, spec): events = event_spec["events"] # Valid types: 'command', 'cmap' event_type = event_spec.get("eventType", "command") + ignore_extra_events = event_spec.get("ignoreExtraEvents", False) + server_connection_id = event_spec.get("serverConnectionId") + has_server_connection_id = event_spec.get("hasServerConnectionId", False) + assert event_type in ("command", "cmap") listener = self.entity_map.get_listener_for_client(client_name) actual_events = listener.get_events(event_type) + if ignore_extra_events: + actual_events = actual_events[: len(events)] + if len(events) == 0: self.assertEqual(actual_events, []) continue - self.assertGreaterEqual(len(actual_events), len(events), actual_events) + self.assertEqual(len(actual_events), len(events), actual_events) for idx, expected_event in enumerate(events): self.match_evaluator.match_event(event_type, expected_event, actual_events[idx]) + if has_server_connection_id: + assert server_connection_id is not None + assert server_connection_id >= 0 + else: + assert server_connection_id is None + def verify_outcome(self, spec): for collection_data in spec: coll_name = collection_data["collectionName"] From cf08d46ff943c515d4c191650f478fbe9f737b01 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 1 Jun 2022 16:48:08 -0700 Subject: [PATCH 0158/1588] PYTHON-3277 Rename csfle library to crypt_shared (#956) --- .evergreen/config.yml | 19 +++++++++---------- .evergreen/run-tests.sh | 31 ++++++++++++++++--------------- pymongo/encryption.py | 4 ++-- pymongo/encryption_options.py | 16 ++++++++-------- test/test_encryption.py | 13 +++++++------ 5 files changed, 42 insertions(+), 41 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index c12d4167b7..721de7cc61 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -450,8 +450,8 @@ functions: export LIBMONGOCRYPT_URL="${libmongocrypt_url}" export TEST_ENCRYPTION=1 fi - if [ -n "${test_csfle}" ]; then - export TEST_CSFLE=1 + if [ -n "${test_crypt_shared}" ]; then + export TEST_CRYPT_SHARED=1 fi if [ -n "${test_pyopenssl}" ]; then export TEST_PYOPENSSL=1 @@ -2163,15 +2163,14 @@ axes: variables: test_encryption: true batchtime: 10080 # 7 days - - id: "encryption_with_csfle" - display_name: "Encryption with CSFLE" - tags: ["encryption_tag", "csfle"] + - id: "encryption_crypt_shared" + display_name: "Encryption shared lib" + tags: ["encryption_tag"] variables: test_encryption: true - test_csfle: true + test_crypt_shared: true batchtime: 10080 # 7 days - # Run pyopenssl tests? - id: pyopenssl display_name: "PyOpenSSL" @@ -2306,7 +2305,7 @@ buildvariants: platform: "*" auth: "*" ssl: "*" - encryption: [ "encryption_with_csfle" ] + encryption: [ "encryption_crypt_shared" ] then: remove_tasks: - ".5.0" @@ -2400,7 +2399,7 @@ buildvariants: platform: "*" python-version: "*" auth-ssl: "*" - encryption: [ "encryption_with_csfle" ] + encryption: [ "encryption_crypt_shared" ] then: remove_tasks: - ".5.0" @@ -2509,7 +2508,7 @@ buildvariants: platform: "*" python-version-windows: "*" auth-ssl: "*" - encryption: [ "encryption_with_csfle" ] + encryption: [ "encryption_crypt_shared" ] then: remove_tasks: - ".5.0" diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 96f42fa517..5f5bda7dc1 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -11,7 +11,7 @@ set -o errexit # Exit the script with error if any of the commands fail # COVERAGE If non-empty, run the test suite with coverage. # TEST_ENCRYPTION If non-empty, install pymongocrypt. # LIBMONGOCRYPT_URL The URL to download libmongocrypt. -# TEST_CSFLE If non-empty, install CSFLE +# TEST_CRYPT_SHARED If non-empty, install crypt_shared lib. if [ -n "${SET_XTRACE_ON}" ]; then set -o xtrace @@ -28,9 +28,10 @@ COVERAGE=${COVERAGE:-} COMPRESSORS=${COMPRESSORS:-} MONGODB_API_VERSION=${MONGODB_API_VERSION:-} TEST_ENCRYPTION=${TEST_ENCRYPTION:-} -TEST_CSFLE=${TEST_CSFLE:-} +TEST_CRYPT_SHARED=${TEST_CRYPT_SHARED:-} LIBMONGOCRYPT_URL=${LIBMONGOCRYPT_URL:-} DATA_LAKE=${DATA_LAKE:-} +TEST_ARGS="" if [ -n "$COMPRESSORS" ]; then export COMPRESSORS=$COMPRESSORS @@ -148,23 +149,23 @@ if [ -n "$TEST_ENCRYPTION" ]; then # Get access to the AWS temporary credentials: # CSFLE_AWS_TEMP_ACCESS_KEY_ID, CSFLE_AWS_TEMP_SECRET_ACCESS_KEY, CSFLE_AWS_TEMP_SESSION_TOKEN . $DRIVERS_TOOLS/.evergreen/csfle/set-temp-creds.sh + + if [ -n "$TEST_CRYPT_SHARED" ]; then + echo "Testing CSFLE with crypt_shared lib" + $PYTHON $DRIVERS_TOOLS/.evergreen/mongodl.py --component crypt_shared \ + --version latest --out ../crypt_shared/ + export DYLD_FALLBACK_LIBRARY_PATH=../crypt_shared/lib/:$DYLD_FALLBACK_LIBRARY_PATH + export LD_LIBRARY_PATH=../crypt_shared/lib:$LD_LIBRARY_PATH + export PATH=../crypt_shared/bin:$PATH + fi + # Only run the encryption tests. + TEST_ARGS="-s test.test_encryption" fi -if [ -z "$DATA_LAKE" ]; then - TEST_ARGS="" -else +if [ -n "$DATA_LAKE" ]; then TEST_ARGS="-s test.test_data_lake" fi -if [ -z $TEST_CSFLE ]; then - echo "CSFLE not being tested" -else - $PYTHON $DRIVERS_TOOLS/.evergreen/mongodl.py --component csfle \ - --version latest --out ../csfle/ - export DYLD_FALLBACK_LIBRARY_PATH=../csfle/lib/:$DYLD_FALLBACK_LIBRARY_PATH - export LD_LIBRARY_PATH=../csfle/lib:$LD_LIBRARY_PATH - export PATH=../csfle/bin:$PATH - TEST_ARGS="-s test.test_encryption" -fi + # Don't download unittest-xml-reporting from pypi, which often fails. if $PYTHON -c "import xmlrunner"; then # The xunit output dir must be a Python style absolute path. diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 1a29131890..40f7d20f23 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -300,8 +300,8 @@ def _get_internal_client(encrypter, mongo_client): MongoCryptOptions( opts._kms_providers, schema_map, - csfle_path=opts._csfle_path, - csfle_required=opts._csfle_required, + crypt_shared_lib_path=opts._crypt_shared_lib_path, + crypt_shared_lib_required=opts._crypt_shared_lib_required, bypass_encryption=opts._bypass_auto_encryption, ), ) diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 0ce828ae4c..cdb77c9707 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -45,8 +45,8 @@ def __init__( mongocryptd_spawn_path: str = "mongocryptd", mongocryptd_spawn_args: Optional[List[str]] = None, kms_tls_options: Optional[Mapping[str, Any]] = None, - csfle_path: Optional[str] = None, - csfle_required: bool = False, + crypt_shared_lib_path: Optional[str] = None, + crypt_shared_lib_required: bool = False, ) -> None: """Options to configure automatic client-side field level encryption. @@ -142,12 +142,12 @@ def __init__( Or to supply a client certificate:: kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} - - `csfle_path` (optional): Override the path to load the CSFLE library. - - `csfle_required` (optional): If 'true', refuse to continue encryption without a CSFLE - library + - `crypt_shared_lib_path` (optional): Override the path to load the crypt_shared library. + - `crypt_shared_lib_required` (optional): If True, raise an error if libmongocrypt is + unable to load the crypt_shared library. .. versionchanged:: 4.2 - Added `csfle_path` and `csfle_required` parameters + Added `crypt_shared_lib_path` and `crypt_shared_lib_required` parameters .. versionchanged:: 4.0 Added the `kms_tls_options` parameter and the "kmip" KMS provider. @@ -160,8 +160,8 @@ def __init__( "install a compatible version with: " "python -m pip install 'pymongo[encryption]'" ) - self._csfle_path = csfle_path - self._csfle_required = csfle_required + self._crypt_shared_lib_path = crypt_shared_lib_path + self._crypt_shared_lib_required = crypt_shared_lib_required self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace self._key_vault_client = key_vault_client diff --git a/test/test_encryption.py b/test/test_encryption.py index fc9d4eec3b..500c95af04 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -83,12 +83,12 @@ def get_client_opts(client): class TestAutoEncryptionOpts(PyMongoTestCase): @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") - @unittest.skipUnless(os.environ.get("TEST_CSFLE"), "csfle is not installed") - def test_csfle(self): - # Test that we can pick up csfle automatically + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + def test_crypt_shared(self): + # Test that we can pick up crypt_shared lib automatically client = MongoClient( auto_encryption_opts=AutoEncryptionOpts( - KMS_PROVIDERS, "keyvault.datakeys", csfle_required=True + KMS_PROVIDERS, "keyvault.datakeys", crypt_shared_lib_required=True ), connect=False, ) @@ -1762,8 +1762,9 @@ def test_case_8(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#bypass-spawning-mongocryptd class TestBypassSpawningMongocryptdProse(EncryptionIntegrationTest): @unittest.skipIf( - os.environ.get("TEST_CSFLE"), - "this prose test does not work when CSFLE is on a system dynamic library search path.", + os.environ.get("TEST_CRYPT_SHARED"), + "this prose test does not work when crypt_shared is on a system dynamic " + "library search path.", ) def test_mongocryptd_bypass_spawn(self): # Lower the mongocryptd timeout to reduce the test run time. From 09385be54977aa0c6e620f342a41316d748109d6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 2 Jun 2022 10:55:15 -0700 Subject: [PATCH 0159/1588] PYTHON-2924 Improve test_load_balancing (#955) --- test/test_server_selection_in_window.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index 4b24d0d7b0..cae2d7661b 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -17,7 +17,13 @@ import os import threading from test import IntegrationTest, client_context, unittest -from test.utils import OvertCommandListener, TestCreator, rs_client, wait_until +from test.utils import ( + OvertCommandListener, + TestCreator, + get_pool, + rs_client, + wait_until, +) from test.utils_selection_tests import create_topology from pymongo.common import clean_node @@ -98,11 +104,10 @@ def run(self): class TestProse(IntegrationTest): - def frequencies(self, client, listener): + def frequencies(self, client, listener, n_finds=10): coll = client.test.test - N_FINDS = 10 N_THREADS = 10 - threads = [FinderThread(coll, N_FINDS) for _ in range(N_THREADS)] + threads = [FinderThread(coll, n_finds) for _ in range(N_THREADS)] for thread in threads: thread.start() for thread in threads: @@ -111,7 +116,7 @@ def frequencies(self, client, listener): self.assertTrue(thread.passed) events = listener.results["started"] - self.assertEqual(len(events), N_FINDS * N_THREADS) + self.assertEqual(len(events), n_finds * N_THREADS) nodes = client.nodes self.assertEqual(len(nodes), 2) freqs = {address: 0.0 for address in nodes} @@ -131,10 +136,12 @@ def test_load_balancing(self): client_context.mongos_seeds(), appName="loadBalancingTest", event_listeners=[listener], - localThresholdMS=10000, + localThresholdMS=30000, + minPoolSize=10, ) self.addCleanup(client.close) wait_until(lambda: len(client.nodes) == 2, "discover both nodes") + wait_until(lambda: len(get_pool(client).sockets) >= 10, "create 10 connections") # Delay find commands on delay_finds = { "configureFailPoint": "failCommand", @@ -153,7 +160,7 @@ def test_load_balancing(self): freqs = self.frequencies(client, listener) self.assertLessEqual(freqs[delayed_server], 0.25) listener.reset() - freqs = self.frequencies(client, listener) + freqs = self.frequencies(client, listener, n_finds=100) self.assertAlmostEqual(freqs[delayed_server], 0.50, delta=0.15) From 154d8787c5cc4fe6d077f7bc60cbc6c0b689e693 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 3 Jun 2022 13:11:28 -0700 Subject: [PATCH 0160/1588] PYTHON-3245 Support explicit queryable encryption (#959) --- .evergreen/resync-specs.sh | 1 + .evergreen/run-tests.sh | 2 +- pymongo/encryption.py | 49 +++++- pymongo/encryption_options.py | 9 +- .../etc/data/encryptedFields.json | 33 ++++ .../etc/data/keys/key1-document.json | 30 ++++ .../etc/data/keys/key1-id.json | 6 + .../etc/data/keys/key2-document.json | 30 ++++ .../etc/data/keys/key2-id.json | 6 + test/test_encryption.py | 150 ++++++++++++++++-- 10 files changed, 298 insertions(+), 18 deletions(-) create mode 100644 test/client-side-encryption/etc/data/encryptedFields.json create mode 100644 test/client-side-encryption/etc/data/keys/key1-document.json create mode 100644 test/client-side-encryption/etc/data/keys/key1-id.json create mode 100644 test/client-side-encryption/etc/data/keys/key2-document.json create mode 100644 test/client-side-encryption/etc/data/keys/key2-id.json diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index a98b091d59..1177ebb04a 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -93,6 +93,7 @@ do cpjson client-side-encryption/corpus/ client-side-encryption/corpus cpjson client-side-encryption/external/ client-side-encryption/external cpjson client-side-encryption/limits/ client-side-encryption/limits + cpjson client-side-encryption/etc/data client-side-encryption/etc/data ;; cmap|CMAP|connection-monitoring-and-pooling) cpjson connection-monitoring-and-pooling/tests cmap diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 5f5bda7dc1..4367bad246 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -139,7 +139,7 @@ if [ -n "$TEST_ENCRYPTION" ]; then export PYMONGOCRYPT_LIB # TODO: Test with 'pip install pymongocrypt' - git clone --branch master https://github.com/mongodb/libmongocrypt.git libmongocrypt_git + git clone https://github.com/mongodb/libmongocrypt.git libmongocrypt_git python -m pip install --prefer-binary -r .evergreen/test-encryption-requirements.txt python -m pip install ./libmongocrypt_git/bindings/python python -c "import pymongocrypt; print('pymongocrypt version: '+pymongocrypt.__version__)" diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 40f7d20f23..71642aaa2a 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -15,6 +15,7 @@ """Support for explicit client-side field level encryption.""" import contextlib +import enum import uuid import weakref from typing import Any, Mapping, Optional, Sequence @@ -303,6 +304,7 @@ def _get_internal_client(encrypter, mongo_client): crypt_shared_lib_path=opts._crypt_shared_lib_path, crypt_shared_lib_required=opts._crypt_shared_lib_required, bypass_encryption=opts._bypass_auto_encryption, + bypass_query_analysis=opts._bypass_query_analysis, ), ) self._closed = False @@ -352,11 +354,33 @@ def close(self): self._internal_client = None -class Algorithm(object): +class Algorithm(str, enum.Enum): """An enum that defines the supported encryption algorithms.""" AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + """AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic.""" AEAD_AES_256_CBC_HMAC_SHA_512_Random = "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + """AEAD_AES_256_CBC_HMAC_SHA_512_Random.""" + INDEXED = "Indexed" + """Indexed. + + .. versionadded:: 4.2 + """ + UNINDEXED = "Unindexed" + """Unindexed. + + .. versionadded:: 4.2 + """ + + +class QueryType(enum.IntEnum): + """An enum that defines the supported values for explicit encryption query_type. + + .. versionadded:: 4.2 + """ + + EQUALITY = 1 + """Used to encrypt a value for an equality query.""" class ClientEncryption(object): @@ -550,6 +574,9 @@ def encrypt( algorithm: str, key_id: Optional[Binary] = None, key_alt_name: Optional[str] = None, + index_key_id: Optional[Binary] = None, + query_type: Optional[int] = None, + contention_factor: Optional[int] = None, ) -> Binary: """Encrypt a BSON value with a given key and algorithm. @@ -564,20 +591,38 @@ def encrypt( :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - `key_alt_name`: Identifies a key vault document by 'keyAltName'. + - `index_key_id` (bytes): the index key id to use for Queryable Encryption. + - `query_type` (int): The query type to execute. See + :class:`QueryType` for valid options. + - `contention_factor` (int): The contention factor to use + when the algorithm is "Indexed". :Returns: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. + + .. versionchanged:: 4.2 + Added the `index_key_id`, `query_type`, and `contention_factor` parameters. """ self._check_closed() if key_id is not None and not ( isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE ): raise TypeError("key_id must be a bson.binary.Binary with subtype 4") + if index_key_id is not None and not ( + isinstance(index_key_id, Binary) and index_key_id.subtype == UUID_SUBTYPE + ): + raise TypeError("index_key_id must be a bson.binary.Binary with subtype 4") doc = encode({"v": value}, codec_options=self._codec_options) with _wrap_encryption_errors(): encrypted_doc = self._encryption.encrypt( - doc, algorithm, key_id=key_id, key_alt_name=key_alt_name + doc, + algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + index_key_id=index_key_id, + query_type=query_type, + contention_factor=contention_factor, ) return decode(encrypted_doc)["v"] # type: ignore[index] diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index cdb77c9707..5acc55042a 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -47,6 +47,7 @@ def __init__( kms_tls_options: Optional[Mapping[str, Any]] = None, crypt_shared_lib_path: Optional[str] = None, crypt_shared_lib_required: bool = False, + bypass_query_analysis: bool = False, ) -> None: """Options to configure automatic client-side field level encryption. @@ -145,9 +146,14 @@ def __init__( - `crypt_shared_lib_path` (optional): Override the path to load the crypt_shared library. - `crypt_shared_lib_required` (optional): If True, raise an error if libmongocrypt is unable to load the crypt_shared library. + - `bypass_query_analysis` (optional): If ``True``, disable automatic analysis of + outgoing commands. Set `bypass_query_analysis` to use explicit + encryption on indexed fields without the MongoDB Enterprise Advanced + licensed crypt_shared library. .. versionchanged:: 4.2 - Added `crypt_shared_lib_path` and `crypt_shared_lib_required` parameters + Added `crypt_shared_lib_path`, `crypt_shared_lib_required`, and `bypass_query_analysis` + parameters. .. versionchanged:: 4.0 Added the `kms_tls_options` parameter and the "kmip" KMS provider. @@ -179,3 +185,4 @@ def __init__( self._mongocryptd_spawn_args.append("--idleShutdownTimeoutSecs=60") # Maps KMS provider name to a SSLContext. self._kms_ssl_contexts = _parse_kms_tls_options(kms_tls_options) + self._bypass_query_analysis = bypass_query_analysis diff --git a/test/client-side-encryption/etc/data/encryptedFields.json b/test/client-side-encryption/etc/data/encryptedFields.json new file mode 100644 index 0000000000..2364590e4c --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields.json @@ -0,0 +1,33 @@ +{ + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] +} diff --git a/test/client-side-encryption/etc/data/keys/key1-document.json b/test/client-side-encryption/etc/data/keys/key1-document.json new file mode 100644 index 0000000000..566b56c354 --- /dev/null +++ b/test/client-side-encryption/etc/data/keys/key1-document.json @@ -0,0 +1,30 @@ +{ + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } +} diff --git a/test/client-side-encryption/etc/data/keys/key1-id.json b/test/client-side-encryption/etc/data/keys/key1-id.json new file mode 100644 index 0000000000..7d18f52ebb --- /dev/null +++ b/test/client-side-encryption/etc/data/keys/key1-id.json @@ -0,0 +1,6 @@ +{ + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } +} diff --git a/test/client-side-encryption/etc/data/keys/key2-document.json b/test/client-side-encryption/etc/data/keys/key2-document.json new file mode 100644 index 0000000000..a654d980ba --- /dev/null +++ b/test/client-side-encryption/etc/data/keys/key2-document.json @@ -0,0 +1,30 @@ +{ + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } +} diff --git a/test/client-side-encryption/etc/data/keys/key2-id.json b/test/client-side-encryption/etc/data/keys/key2-id.json new file mode 100644 index 0000000000..6e9b87bbc2 --- /dev/null +++ b/test/client-side-encryption/etc/data/keys/key2-id.json @@ -0,0 +1,6 @@ +{ + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } +} diff --git a/test/test_encryption.py b/test/test_encryption.py index 500c95af04..288c137c7e 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -51,14 +51,14 @@ from test.utils_spec_runner import SpecRunner from bson import encode, json_util -from bson.binary import JAVA_LEGACY, STANDARD, UUID_SUBTYPE, Binary, UuidRepresentation +from bson.binary import UUID_SUBTYPE, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.errors import BSONError from bson.json_util import JSONOptions from bson.son import SON from pymongo import encryption from pymongo.cursor import CursorType -from pymongo.encryption import Algorithm, ClientEncryption +from pymongo.encryption import Algorithm, ClientEncryption, QueryType from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts from pymongo.errors import ( BulkWriteError, @@ -212,11 +212,11 @@ def assertBinaryUUID(self, val): BASE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "client-side-encryption") SPEC_PATH = os.path.join(BASE, "spec") -OPTS = CodecOptions(uuid_representation=STANDARD) +OPTS = CodecOptions() # Use SON to preserve the order of fields while parsing json. Use tz_aware # =False to match how CodecOptions decodes dates. -JSON_OPTS = JSONOptions(document_class=SON, uuid_representation=STANDARD, tz_aware=False) +JSON_OPTS = JSONOptions(document_class=SON, tz_aware=False) def read(*paths): @@ -324,7 +324,7 @@ def test_use_after_close(self): class TestEncryptedBulkWrite(BulkTestBase, EncryptionIntegrationTest): - def test_upsert_uuid_standard_encrypte(self): + def test_upsert_uuid_standard_encrypt(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) @@ -449,11 +449,19 @@ def test_validation(self): msg = "key_id must be a bson.binary.Binary with subtype 4" algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + uid = uuid.uuid4() with self.assertRaisesRegex(TypeError, msg): - client_encryption.encrypt("str", algo, key_id=uuid.uuid4()) # type: ignore[arg-type] + client_encryption.encrypt("str", algo, key_id=uid) # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, msg): client_encryption.encrypt("str", algo, key_id=Binary(b"123")) + msg = "index_key_id must be a bson.binary.Binary with subtype 4" + algo = Algorithm.INDEXED + with self.assertRaisesRegex(TypeError, msg): + client_encryption.encrypt("str", algo, index_key_id=uid) # type: ignore[arg-type] + with self.assertRaisesRegex(TypeError, msg): + client_encryption.encrypt("str", algo, index_key_id=Binary(b"123")) + def test_bson_errors(self): client_encryption = ClientEncryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS @@ -466,7 +474,7 @@ def test_bson_errors(self): client_encryption.encrypt( unencodable_value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=Binary(uuid.uuid4().bytes, UUID_SUBTYPE), + key_id=Binary.from_uuid(uuid.uuid4()), ) def test_codec_options(self): @@ -475,7 +483,7 @@ def test_codec_options(self): KMS_PROVIDERS, "keyvault.datakeys", client_context.client, None # type: ignore[arg-type] ) - opts = CodecOptions(uuid_representation=JAVA_LEGACY) + opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) client_encryption_legacy = ClientEncryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, opts ) @@ -493,8 +501,9 @@ def test_codec_options(self): self.assertEqual(decrypted_value_legacy, value) # Encrypt the same UUID with STANDARD codec options. + opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) client_encryption = ClientEncryption( - KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, opts ) self.addCleanup(client_encryption.close) encrypted_standard = client_encryption.encrypt( @@ -986,9 +995,7 @@ def _test_corpus(self, opts): ) self.addCleanup(vault.drop) - client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, uuidRepresentation="standard" - ) + client_encrypted = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client_encrypted.close) client_encryption = ClientEncryption( @@ -1436,7 +1443,7 @@ def _test_explicit(self, expectation): ciphertext = client_encryption.encrypt( "string0", algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=Binary.from_uuid(self.DEK["_id"], STANDARD), + key_id=self.DEK["_id"], ) self.assertEqual(bytes(ciphertext), base64.b64decode(expectation)) @@ -1972,9 +1979,124 @@ def test_04_kmip(self): with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): self.client_encryption_expired.create_data_key("kmip") # Invalid cert hostname error. - with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + ): self.client_encryption_invalid_hostname.create_data_key("kmip") +# https://github.com/mongodb/specifications/blob/d4c9432/source/client-side-encryption/tests/README.rst#explicit-encryption +class TestExplicitQueryableEncryption(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(6, 0, -1) + def setUp(self): + super().setUp() + self.encrypted_fields = json_data("etc", "data", "encryptedFields.json") + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + self.db = self.client.test_queryable_encryption + self.client.drop_database(self.db) + self.db.command("create", self.encrypted_fields["escCollection"]) + self.db.command("create", self.encrypted_fields["eccCollection"]) + self.db.command("create", self.encrypted_fields["ecocCollection"]) + self.db.command("create", "explicit_encryption", encryptedFields=self.encrypted_fields) + key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(key_vault.drop) + self.key_vault_client = self.client + self.client_encryption = ClientEncryption( + {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS + ) + self.addCleanup(self.client_encryption.close) + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + key_vault.full_name, + bypass_query_analysis=True, + ) + self.encrypted_client = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(self.encrypted_client.close) + + def test_01_insert_encrypted_indexed_and_find(self): + val = "encrypted indexed value" + insert_payload = self.client_encryption.encrypt(val, Algorithm.INDEXED, self.key1_id) + self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"encryptedIndexed": insert_payload} + ) + + find_payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY + ) + docs = list( + self.encrypted_client[self.db.name].explicit_encryption.find( + {"encryptedIndexed": find_payload} + ) + ) + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["encryptedIndexed"], val) + + def test_02_insert_encrypted_indexed_and_find_contention(self): + val = "encrypted indexed value" + contention = 10 + for _ in range(contention): + insert_payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=contention + ) + self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"encryptedIndexed": insert_payload} + ) + + # Find without contention_factor non-deterministically returns 0-9 documents. + find_payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY + ) + docs = list( + self.encrypted_client[self.db.name].explicit_encryption.find( + {"encryptedIndexed": find_payload} + ) + ) + self.assertLessEqual(len(docs), 10) + for doc in docs: + self.assertEqual(doc["encryptedIndexed"], val) + + # Find with contention_factor will return all 10 documents. + find_payload = self.client_encryption.encrypt( + val, + Algorithm.INDEXED, + self.key1_id, + query_type=QueryType.EQUALITY, + contention_factor=contention, + ) + docs = list( + self.encrypted_client[self.db.name].explicit_encryption.find( + {"encryptedIndexed": find_payload} + ) + ) + self.assertEqual(len(docs), 10) + for doc in docs: + self.assertEqual(doc["encryptedIndexed"], val) + + def test_03_insert_encrypted_unindexed(self): + val = "encrypted unindexed value" + insert_payload = self.client_encryption.encrypt(val, Algorithm.UNINDEXED, self.key1_id) + self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"_id": 1, "encryptedUnindexed": insert_payload} + ) + + docs = list(self.encrypted_client[self.db.name].explicit_encryption.find({"_id": 1})) + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["encryptedUnindexed"], val) + + def test_04_roundtrip_encrypted_indexed(self): + val = "encrypted indexed value" + payload = self.client_encryption.encrypt(val, Algorithm.INDEXED, self.key1_id) + decrypted = self.client_encryption.decrypt(payload) + self.assertEqual(decrypted, val) + + def test_05_roundtrip_encrypted_unindexed(self): + val = "encrypted indexed value" + payload = self.client_encryption.encrypt(val, Algorithm.UNINDEXED, self.key1_id) + decrypted = self.client_encryption.decrypt(payload) + self.assertEqual(decrypted, val) + + if __name__ == "__main__": unittest.main() From d98e44e27e4ecbbaee301649c1ecb6a41fc2e895 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 3 Jun 2022 13:43:47 -0700 Subject: [PATCH 0161/1588] PYTHON-3245 Fix docs for index_key_id (#960) --- pymongo/encryption.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 71642aaa2a..25d216d5b5 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -591,11 +591,12 @@ def encrypt( :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - `key_alt_name`: Identifies a key vault document by 'keyAltName'. - - `index_key_id` (bytes): the index key id to use for Queryable Encryption. + - `index_key_id`: The index key id to use for Queryable Encryption. Must be + a :class:`~bson.binary.Binary` with subtype 4 (:attr:`~bson.binary.UUID_SUBTYPE`). - `query_type` (int): The query type to execute. See :class:`QueryType` for valid options. - `contention_factor` (int): The contention factor to use - when the algorithm is "Indexed". + when the algorithm is :attr:`Algorithm.INDEXED`. :Returns: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. From 6b088ffa4e813272ffb25637dd05cc05fe42288f Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 6 Jun 2022 09:33:31 -0700 Subject: [PATCH 0162/1588] PYTHON-3241 Add Queryable Encryption API to AutoEncryptionOpts (#957) --- pymongo/collection.py | 23 +- pymongo/common.py | 12 + pymongo/database.py | 117 +- pymongo/encryption.py | 6 + pymongo/encryption_options.py | 33 +- .../spec/{ => legacy}/aggregate.json | 0 .../spec/{ => legacy}/awsTemporary.json | 0 .../spec/{ => legacy}/azureKMS.json | 0 .../spec/{ => legacy}/badQueries.json | 0 .../spec/{ => legacy}/badSchema.json | 0 .../spec/{ => legacy}/basic.json | 0 .../spec/{ => legacy}/bulk.json | 0 .../{ => legacy}/bypassAutoEncryption.json | 0 .../spec/{ => legacy}/bypassedCommand.json | 0 .../spec/{ => legacy}/count.json | 0 .../spec/{ => legacy}/countDocuments.json | 0 .../spec/legacy/create-and-createIndexes.json | 115 + .../spec/{ => legacy}/delete.json | 0 .../spec/{ => legacy}/distinct.json | 0 .../spec/{ => legacy}/explain.json | 0 .../spec/{ => legacy}/find.json | 0 .../spec/{ => legacy}/findOneAndDelete.json | 0 .../spec/{ => legacy}/findOneAndReplace.json | 0 .../spec/{ => legacy}/findOneAndUpdate.json | 0 .../spec/legacy/fle2-BypassQueryAnalysis.json | 289 +++ .../spec/legacy/fle2-Compact.json | 232 ++ .../spec/legacy/fle2-CreateCollection.json | 2239 +++++++++++++++++ .../spec/legacy/fle2-DecryptExistingData.json | 148 ++ .../spec/legacy/fle2-Delete.json | 305 +++ ...EncryptedFields-vs-EncryptedFieldsMap.json | 217 ++ .../fle2-EncryptedFields-vs-jsonSchema.json | 304 +++ .../fle2-EncryptedFieldsMap-defaults.json | 105 + .../spec/legacy/fle2-FindOneAndUpdate.json | 602 +++++ .../spec/legacy/fle2-InsertFind-Indexed.json | 300 +++ .../legacy/fle2-InsertFind-Unindexed.json | 250 ++ .../spec/legacy/fle2-MissingKey.json | 118 + .../spec/legacy/fle2-NoEncryption.json | 86 + .../spec/legacy/fle2-Update.json | 610 +++++ ...e2-validatorAndPartialFieldExpression.json | 520 ++++ .../spec/{ => legacy}/gcpKMS.json | 0 .../spec/{ => legacy}/getMore.json | 0 .../spec/{ => legacy}/insert.json | 0 .../spec/{ => legacy}/keyAltName.json | 0 .../spec/{ => legacy}/kmipKMS.json | 0 .../spec/{ => legacy}/localKMS.json | 0 .../spec/{ => legacy}/localSchema.json | 0 .../{ => legacy}/malformedCiphertext.json | 0 .../spec/{ => legacy}/maxWireVersion.json | 0 .../spec/{ => legacy}/missingKey.json | 0 .../spec/{ => legacy}/noSchema.json | 0 .../spec/{ => legacy}/replaceOne.json | 0 .../spec/{ => legacy}/types.json | 0 .../spec/{ => legacy}/unsupportedCommand.json | 0 .../spec/{ => legacy}/updateMany.json | 0 .../spec/{ => legacy}/updateOne.json | 0 .../validatorAndPartialFieldExpression.json | 642 +++++ .../spec/unified/addKeyAltName.json | 603 +++++ .../createKey-kms_providers-invalid.json | 112 + .../spec/unified/createKey.json | 711 ++++++ .../spec/unified/deleteKey.json | 553 ++++ .../spec/unified/getKey.json | 313 +++ .../spec/unified/getKeyByAltName.json | 283 +++ .../spec/unified/getKeys.json | 260 ++ .../spec/unified/removeKeyAltName.json | 572 +++++ .../rewrapManyDataKey-decrypt_failure.json | 162 ++ .../rewrapManyDataKey-encrypt_failure.json | 250 ++ .../spec/unified/rewrapManyDataKey.json | 1373 ++++++++++ test/test_encryption.py | 17 +- test/utils.py | 18 +- test/utils_spec_runner.py | 27 +- 70 files changed, 12489 insertions(+), 38 deletions(-) rename test/client-side-encryption/spec/{ => legacy}/aggregate.json (100%) rename test/client-side-encryption/spec/{ => legacy}/awsTemporary.json (100%) rename test/client-side-encryption/spec/{ => legacy}/azureKMS.json (100%) rename test/client-side-encryption/spec/{ => legacy}/badQueries.json (100%) rename test/client-side-encryption/spec/{ => legacy}/badSchema.json (100%) rename test/client-side-encryption/spec/{ => legacy}/basic.json (100%) rename test/client-side-encryption/spec/{ => legacy}/bulk.json (100%) rename test/client-side-encryption/spec/{ => legacy}/bypassAutoEncryption.json (100%) rename test/client-side-encryption/spec/{ => legacy}/bypassedCommand.json (100%) rename test/client-side-encryption/spec/{ => legacy}/count.json (100%) rename test/client-side-encryption/spec/{ => legacy}/countDocuments.json (100%) create mode 100644 test/client-side-encryption/spec/legacy/create-and-createIndexes.json rename test/client-side-encryption/spec/{ => legacy}/delete.json (100%) rename test/client-side-encryption/spec/{ => legacy}/distinct.json (100%) rename test/client-side-encryption/spec/{ => legacy}/explain.json (100%) rename test/client-side-encryption/spec/{ => legacy}/find.json (100%) rename test/client-side-encryption/spec/{ => legacy}/findOneAndDelete.json (100%) rename test/client-side-encryption/spec/{ => legacy}/findOneAndReplace.json (100%) rename test/client-side-encryption/spec/{ => legacy}/findOneAndUpdate.json (100%) create mode 100644 test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Compact.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-CreateCollection.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Delete.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-MissingKey.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-NoEncryption.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Update.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json rename test/client-side-encryption/spec/{ => legacy}/gcpKMS.json (100%) rename test/client-side-encryption/spec/{ => legacy}/getMore.json (100%) rename test/client-side-encryption/spec/{ => legacy}/insert.json (100%) rename test/client-side-encryption/spec/{ => legacy}/keyAltName.json (100%) rename test/client-side-encryption/spec/{ => legacy}/kmipKMS.json (100%) rename test/client-side-encryption/spec/{ => legacy}/localKMS.json (100%) rename test/client-side-encryption/spec/{ => legacy}/localSchema.json (100%) rename test/client-side-encryption/spec/{ => legacy}/malformedCiphertext.json (100%) rename test/client-side-encryption/spec/{ => legacy}/maxWireVersion.json (100%) rename test/client-side-encryption/spec/{ => legacy}/missingKey.json (100%) rename test/client-side-encryption/spec/{ => legacy}/noSchema.json (100%) rename test/client-side-encryption/spec/{ => legacy}/replaceOne.json (100%) rename test/client-side-encryption/spec/{ => legacy}/types.json (100%) rename test/client-side-encryption/spec/{ => legacy}/unsupportedCommand.json (100%) rename test/client-side-encryption/spec/{ => legacy}/updateMany.json (100%) rename test/client-side-encryption/spec/{ => legacy}/updateOne.json (100%) create mode 100644 test/client-side-encryption/spec/legacy/validatorAndPartialFieldExpression.json create mode 100644 test/client-side-encryption/spec/unified/addKeyAltName.json create mode 100644 test/client-side-encryption/spec/unified/createKey-kms_providers-invalid.json create mode 100644 test/client-side-encryption/spec/unified/createKey.json create mode 100644 test/client-side-encryption/spec/unified/deleteKey.json create mode 100644 test/client-side-encryption/spec/unified/getKey.json create mode 100644 test/client-side-encryption/spec/unified/getKeyByAltName.json create mode 100644 test/client-side-encryption/spec/unified/getKeys.json create mode 100644 test/client-side-encryption/spec/unified/removeKeyAltName.json create mode 100644 test/client-side-encryption/spec/unified/rewrapManyDataKey-decrypt_failure.json create mode 100644 test/client-side-encryption/spec/unified/rewrapManyDataKey-encrypt_failure.json create mode 100644 test/client-side-encryption/spec/unified/rewrapManyDataKey.json diff --git a/pymongo/collection.py b/pymongo/collection.py index 0197198108..ffd883e939 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -35,7 +35,7 @@ from bson.raw_bson import RawBSONDocument from bson.son import SON from bson.timestamp import Timestamp -from pymongo import common, helpers, message +from pymongo import ASCENDING, common, helpers, message from pymongo.aggregation import ( _CollectionAggregationCommand, _CollectionRawAggregationCommand, @@ -44,6 +44,7 @@ from pymongo.change_stream import CollectionChangeStream from pymongo.collation import validate_collation_or_none from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor +from pymongo.common import _ecc_coll_name, _ecoc_coll_name, _esc_coll_name from pymongo.cursor import Cursor, RawBatchCursor from pymongo.errors import ( ConfigurationError, @@ -115,6 +116,7 @@ def __init__( write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> None: """Get / create a Mongo collection. @@ -197,7 +199,6 @@ def __init__( write_concern or database.write_concern, read_concern or database.read_concern, ) - if not isinstance(name, str): raise TypeError("name must be an instance of str") @@ -215,7 +216,16 @@ def __init__( self.__name = name self.__full_name = "%s.%s" % (self.__database.name, self.__name) if create or kwargs or collation: - self.__create(kwargs, collation, session) + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + opts = {"clusteredIndex": {"key": {"_id": 1}, "unique": True}} + self.__create(_esc_coll_name(encrypted_fields, name), opts, None, session) + self.__create(_ecc_coll_name(encrypted_fields, name), opts, None, session) + self.__create(_ecoc_coll_name(encrypted_fields, name), opts, None, session) + self.__create(name, kwargs, collation, session, encrypted_fields=encrypted_fields) + self.create_index([("__safeContent__", ASCENDING)], session) + else: + self.__create(name, kwargs, collation, session) self.__write_response_codec_options = self.codec_options._replace( unicode_decode_error_handler="replace", document_class=dict @@ -286,9 +296,12 @@ def _command( user_fields=user_fields, ) - def __create(self, options, collation, session): + def __create(self, name, options, collation, session, encrypted_fields=None): """Sends a create command with the given options.""" - cmd = SON([("create", self.__name)]) + cmd = SON([("create", name)]) + if encrypted_fields: + cmd["encryptedFields"] = encrypted_fields + if options: if "size" in options: options["size"] = float(options["size"]) diff --git a/pymongo/common.py b/pymongo/common.py index 552faf94a2..4376654405 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -792,6 +792,18 @@ def get_validated_options( return validated_options +def _esc_coll_name(encrypted_fields, name): + return encrypted_fields.get("escCollection", f"enxcol_.{name}.esc") + + +def _ecc_coll_name(encrypted_fields, name): + return encrypted_fields.get("eccCollection", f"enxcol_.{name}.ecc") + + +def _ecoc_coll_name(encrypted_fields, name): + return encrypted_fields.get("ecocCollection", f"enxcol_.{name}.ecoc") + + # List of write-concern-related options. WRITE_CONCERN_OPTIONS = frozenset(["w", "wtimeout", "wtimeoutms", "fsync", "j", "journal"]) diff --git a/pymongo/database.py b/pymongo/database.py index 2156a5e972..bb91196f2e 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -38,6 +38,7 @@ from pymongo.change_stream import DatabaseChangeStream from pymongo.collection import Collection from pymongo.command_cursor import CommandCursor +from pymongo.common import _ecc_coll_name, _ecoc_coll_name, _esc_coll_name from pymongo.errors import CollectionInvalid, InvalidName from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.typings import _CollationIn, _DocumentType, _Pipeline @@ -290,6 +291,7 @@ def create_collection( write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> Collection[_DocumentType]: """Create a new :class:`~pymongo.collection.Collection` in this @@ -321,6 +323,29 @@ def create_collection( :class:`~pymongo.collation.Collation`. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `encrypted_fields`: Document that describes the encrypted fields for Queryable + Encryption. + For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + + } } - `**kwargs` (optional): additional keyword arguments will be passed as options for the `create collection command`_ @@ -369,6 +394,17 @@ def create_collection( .. _create collection command: https://mongodb.com/docs/manual/reference/command/create """ + if ( + not encrypted_fields + and self.client.options.auto_encryption_opts + and self.client.options.auto_encryption_opts._encrypted_fields_map + ): + encrypted_fields = self.client.options.auto_encryption_opts._encrypted_fields_map.get( + "%s.%s" % (self.name, name) + ) + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + with self.__client._tmp_session(session) as s: # Skip this check in a transaction where listCollections is not # supported. @@ -376,7 +412,6 @@ def create_collection( filter={"name": name}, session=s ): raise CollectionInvalid("collection %s already exists" % name) - return Collection( self, name, @@ -386,6 +421,7 @@ def create_collection( write_concern, read_concern, session=s, + encrypted_fields=encrypted_fields, **kwargs, ) @@ -874,11 +910,27 @@ def list_collection_names( return [result["name"] for result in self.list_collections(session=session, **kwargs)] + def _drop_helper(self, name, session=None, comment=None): + command = SON([("drop", name)]) + if comment is not None: + command["comment"] = comment + + with self.__client._socket_for_writes(session) as sock_info: + return self._command( + sock_info, + command, + allowable_errors=["ns not found", 26], + write_concern=self._write_concern_for(session), + parse_write_concern_error=True, + session=session, + ) + def drop_collection( self, name_or_collection: Union[str, Collection], session: Optional["ClientSession"] = None, comment: Optional[Any] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, ) -> Dict[str, Any]: """Drop a collection. @@ -889,6 +941,29 @@ def drop_collection( :class:`~pymongo.client_session.ClientSession`. - `comment` (optional): A user-provided comment to attach to this command. + - `encrypted_fields`: Document that describes the encrypted fields for Queryable + Encryption. + For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + + } .. note:: The :attr:`~pymongo.database.Database.write_concern` of @@ -911,20 +986,34 @@ def drop_collection( if not isinstance(name, str): raise TypeError("name_or_collection must be an instance of str") - - command = SON([("drop", name)]) - if comment is not None: - command["comment"] = comment - - with self.__client._socket_for_writes(session) as sock_info: - return self._command( - sock_info, - command, - allowable_errors=["ns not found", 26], - write_concern=self._write_concern_for(session), - parse_write_concern_error=True, - session=session, + full_name = "%s.%s" % (self.name, name) + if ( + not encrypted_fields + and self.client.options.auto_encryption_opts + and self.client.options.auto_encryption_opts._encrypted_fields_map + ): + encrypted_fields = self.client.options.auto_encryption_opts._encrypted_fields_map.get( + full_name + ) + if not encrypted_fields and self.client.options.auto_encryption_opts: + colls = list( + self.list_collections(filter={"name": name}, session=session, comment=comment) ) + if colls and colls[0]["options"].get("encryptedFields"): + encrypted_fields = colls[0]["options"]["encryptedFields"] + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + self._drop_helper( + _esc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + self._drop_helper( + _ecc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + self._drop_helper( + _ecoc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + + return self._drop_helper(name, session, comment) def validate_collection( self, diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 25d216d5b5..a7a69dbe34 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -264,6 +264,11 @@ def __init__(self, client, opts): schema_map = None else: schema_map = _dict_to_bson(opts._schema_map, False, _DATA_KEY_OPTS) + + if opts._encrypted_fields_map is None: + encrypted_fields_map = None + else: + encrypted_fields_map = _dict_to_bson(opts._encrypted_fields_map, False, _DATA_KEY_OPTS) self._bypass_auto_encryption = opts._bypass_auto_encryption self._internal_client = None @@ -304,6 +309,7 @@ def _get_internal_client(encrypter, mongo_client): crypt_shared_lib_path=opts._crypt_shared_lib_path, crypt_shared_lib_required=opts._crypt_shared_lib_required, bypass_encryption=opts._bypass_auto_encryption, + encrypted_fields_map=encrypted_fields_map, bypass_query_analysis=opts._bypass_query_analysis, ), ) diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 5acc55042a..eedc2ee23c 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -23,6 +23,7 @@ except ImportError: _HAVE_PYMONGOCRYPT = False +from pymongo.common import validate_is_mapping from pymongo.errors import ConfigurationError from pymongo.uri_parser import _parse_kms_tls_options @@ -48,6 +49,7 @@ def __init__( crypt_shared_lib_path: Optional[str] = None, crypt_shared_lib_required: bool = False, bypass_query_analysis: bool = False, + encrypted_fields_map: Optional[Mapping] = None, ) -> None: """Options to configure automatic client-side field level encryption. @@ -150,10 +152,33 @@ def __init__( outgoing commands. Set `bypass_query_analysis` to use explicit encryption on indexed fields without the MongoDB Enterprise Advanced licensed crypt_shared library. + - `encrypted_fields_map`: Map of collection namespace ("db.coll") to documents that + described the encrypted fields for Queryable Encryption. For example:: + + { + "db.encryptedCollection": { + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + } .. versionchanged:: 4.2 - Added `crypt_shared_lib_path`, `crypt_shared_lib_required`, and `bypass_query_analysis` - parameters. + Added `encrypted_fields_map` `crypt_shared_lib_path`, `crypt_shared_lib_required`, + and `bypass_query_analysis` parameters. .. versionchanged:: 4.0 Added the `kms_tls_options` parameter and the "kmip" KMS provider. @@ -166,6 +191,10 @@ def __init__( "install a compatible version with: " "python -m pip install 'pymongo[encryption]'" ) + if encrypted_fields_map: + validate_is_mapping("encrypted_fields_map", encrypted_fields_map) + self._encrypted_fields_map = encrypted_fields_map + self._bypass_query_analysis = bypass_query_analysis self._crypt_shared_lib_path = crypt_shared_lib_path self._crypt_shared_lib_required = crypt_shared_lib_required self._kms_providers = kms_providers diff --git a/test/client-side-encryption/spec/aggregate.json b/test/client-side-encryption/spec/legacy/aggregate.json similarity index 100% rename from test/client-side-encryption/spec/aggregate.json rename to test/client-side-encryption/spec/legacy/aggregate.json diff --git a/test/client-side-encryption/spec/awsTemporary.json b/test/client-side-encryption/spec/legacy/awsTemporary.json similarity index 100% rename from test/client-side-encryption/spec/awsTemporary.json rename to test/client-side-encryption/spec/legacy/awsTemporary.json diff --git a/test/client-side-encryption/spec/azureKMS.json b/test/client-side-encryption/spec/legacy/azureKMS.json similarity index 100% rename from test/client-side-encryption/spec/azureKMS.json rename to test/client-side-encryption/spec/legacy/azureKMS.json diff --git a/test/client-side-encryption/spec/badQueries.json b/test/client-side-encryption/spec/legacy/badQueries.json similarity index 100% rename from test/client-side-encryption/spec/badQueries.json rename to test/client-side-encryption/spec/legacy/badQueries.json diff --git a/test/client-side-encryption/spec/badSchema.json b/test/client-side-encryption/spec/legacy/badSchema.json similarity index 100% rename from test/client-side-encryption/spec/badSchema.json rename to test/client-side-encryption/spec/legacy/badSchema.json diff --git a/test/client-side-encryption/spec/basic.json b/test/client-side-encryption/spec/legacy/basic.json similarity index 100% rename from test/client-side-encryption/spec/basic.json rename to test/client-side-encryption/spec/legacy/basic.json diff --git a/test/client-side-encryption/spec/bulk.json b/test/client-side-encryption/spec/legacy/bulk.json similarity index 100% rename from test/client-side-encryption/spec/bulk.json rename to test/client-side-encryption/spec/legacy/bulk.json diff --git a/test/client-side-encryption/spec/bypassAutoEncryption.json b/test/client-side-encryption/spec/legacy/bypassAutoEncryption.json similarity index 100% rename from test/client-side-encryption/spec/bypassAutoEncryption.json rename to test/client-side-encryption/spec/legacy/bypassAutoEncryption.json diff --git a/test/client-side-encryption/spec/bypassedCommand.json b/test/client-side-encryption/spec/legacy/bypassedCommand.json similarity index 100% rename from test/client-side-encryption/spec/bypassedCommand.json rename to test/client-side-encryption/spec/legacy/bypassedCommand.json diff --git a/test/client-side-encryption/spec/count.json b/test/client-side-encryption/spec/legacy/count.json similarity index 100% rename from test/client-side-encryption/spec/count.json rename to test/client-side-encryption/spec/legacy/count.json diff --git a/test/client-side-encryption/spec/countDocuments.json b/test/client-side-encryption/spec/legacy/countDocuments.json similarity index 100% rename from test/client-side-encryption/spec/countDocuments.json rename to test/client-side-encryption/spec/legacy/countDocuments.json diff --git a/test/client-side-encryption/spec/legacy/create-and-createIndexes.json b/test/client-side-encryption/spec/legacy/create-and-createIndexes.json new file mode 100644 index 0000000000..48638a97c8 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/create-and-createIndexes.json @@ -0,0 +1,115 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "tests": [ + { + "description": "create is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "unencryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "unencryptedCollection" + } + } + ] + }, + { + "description": "createIndexes is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "unencryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "unencryptedCollection", + "index": "name" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/delete.json b/test/client-side-encryption/spec/legacy/delete.json similarity index 100% rename from test/client-side-encryption/spec/delete.json rename to test/client-side-encryption/spec/legacy/delete.json diff --git a/test/client-side-encryption/spec/distinct.json b/test/client-side-encryption/spec/legacy/distinct.json similarity index 100% rename from test/client-side-encryption/spec/distinct.json rename to test/client-side-encryption/spec/legacy/distinct.json diff --git a/test/client-side-encryption/spec/explain.json b/test/client-side-encryption/spec/legacy/explain.json similarity index 100% rename from test/client-side-encryption/spec/explain.json rename to test/client-side-encryption/spec/legacy/explain.json diff --git a/test/client-side-encryption/spec/find.json b/test/client-side-encryption/spec/legacy/find.json similarity index 100% rename from test/client-side-encryption/spec/find.json rename to test/client-side-encryption/spec/legacy/find.json diff --git a/test/client-side-encryption/spec/findOneAndDelete.json b/test/client-side-encryption/spec/legacy/findOneAndDelete.json similarity index 100% rename from test/client-side-encryption/spec/findOneAndDelete.json rename to test/client-side-encryption/spec/legacy/findOneAndDelete.json diff --git a/test/client-side-encryption/spec/findOneAndReplace.json b/test/client-side-encryption/spec/legacy/findOneAndReplace.json similarity index 100% rename from test/client-side-encryption/spec/findOneAndReplace.json rename to test/client-side-encryption/spec/legacy/findOneAndReplace.json diff --git a/test/client-side-encryption/spec/findOneAndUpdate.json b/test/client-side-encryption/spec/legacy/findOneAndUpdate.json similarity index 100% rename from test/client-side-encryption/spec/findOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/findOneAndUpdate.json diff --git a/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json b/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json new file mode 100644 index 0000000000..629faf189d --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json @@ -0,0 +1,289 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + }, + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "BypassQueryAnalysis decrypts", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "bypassQueryAnalysis": true + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "BHEBAAAFZAAgAAAAAHb62aV7+mqmaGcotPLdG3KP7S8diFwWMLM/5rYtqLrEBXMAIAAAAAAVJ6OWHRv3OtCozHpt3ZzfBhaxZirLv3B+G8PuaaO4EgVjACAAAAAAsZXWOWA+UiCBbrJNB6bHflB/cn7pWSvwWN2jw4FPeIUFcABQAAAAAMdD1nV2nqeI1eXEQNskDflCy8I7/HvvqDKJ6XxjhrPQWdLqjz+8GosGUsB7A8ee/uG9/guENuL25XD+Fxxkv1LLXtavHOlLF7iW0u9yabqqBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AE0AAAAAq83vqxI0mHYSNBI0VniQEkzZZBBDgeZh+h+gXEmOrSFtVvkUcnHWj/rfPW7iJ0G3UJ8zpuBmUM/VjOMJCY4+eDqdTiPIwX+/vNXegc8FZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsAA==", + "subType": "06" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedIndexed": "value123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "BHEBAAAFZAAgAAAAAHb62aV7+mqmaGcotPLdG3KP7S8diFwWMLM/5rYtqLrEBXMAIAAAAAAVJ6OWHRv3OtCozHpt3ZzfBhaxZirLv3B+G8PuaaO4EgVjACAAAAAAsZXWOWA+UiCBbrJNB6bHflB/cn7pWSvwWN2jw4FPeIUFcABQAAAAAMdD1nV2nqeI1eXEQNskDflCy8I7/HvvqDKJ6XxjhrPQWdLqjz+8GosGUsB7A8ee/uG9/guENuL25XD+Fxxkv1LLXtavHOlLF7iW0u9yabqqBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AE0AAAAAq83vqxI0mHYSNBI0VniQEkzZZBBDgeZh+h+gXEmOrSFtVvkUcnHWj/rfPW7iJ0G3UJ8zpuBmUM/VjOMJCY4+eDqdTiPIwX+/vNXegc8FZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsAA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Compact.json b/test/client-side-encryption/spec/legacy/fle2-Compact.json new file mode 100644 index 0000000000..46da99cbfc --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Compact.json @@ -0,0 +1,232 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + }, + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Compact works", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "runCommand", + "object": "database", + "command_name": "compactStructuredEncryptionData", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "compactStructuredEncryptionData": "default", + "compactionTokens": { + "encryptedIndexed": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + }, + "encryptedUnindexed": { + "$binary": { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00" + } + } + } + }, + "command_name": "compactStructuredEncryptionData" + } + } + ] + }, + { + "description": "Compact errors on an unencrypted client", + "operations": [ + { + "name": "runCommand", + "object": "database", + "command_name": "compactStructuredEncryptionData", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + } + }, + "result": { + "errorContains": "'compactStructuredEncryptionData.compactionTokens' is missing" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json b/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json new file mode 100644 index 0000000000..6836f40e04 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json @@ -0,0 +1,2239 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "tests": [ + { + "description": "state collections and index are created", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.esc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "default state collection names are applied", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + }, + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + }, + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "drop removes all state collections", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + }, + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + }, + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "encryptedFieldsMap with cyclic entries does not loop", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + }, + "default.encryptedCollection.esc": { + "escCollection": "encryptedCollection", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.esc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "CreateCollection without encryptedFields.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "plaintextCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "plaintextCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "plaintextCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "plaintextCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "plaintextCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "plaintextCollection" + }, + "command_name": "create", + "database_name": "default" + } + } + ] + }, + { + "description": "CreateCollection from encryptedFieldsMap.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.esc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "CreateCollection from encryptedFields.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.esc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "DropCollection from encryptedFieldsMap", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "DropCollection from encryptedFields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": {} + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.esc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "DropCollection from remote encryptedFields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": {} + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.esc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json b/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json new file mode 100644 index 0000000000..c6d0bca0d1 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json @@ -0,0 +1,148 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 decrypt of existing data succeeds", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Delete.json new file mode 100644 index 0000000000..790e818295 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Delete.json @@ -0,0 +1,305 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Delete can query an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedIndexed": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json new file mode 100644 index 0000000000..ea3eb4850c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -0,0 +1,217 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "encryptedFieldsMap is preferred over remote encryptedFields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.default": { + "escCollection": "esc", + "eccCollection": "ecc", + "ecocCollection": "ecoc", + "fields": [] + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json new file mode 100644 index 0000000000..69abfa7cfb --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json @@ -0,0 +1,304 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": {}, + "bsonType": "object" + }, + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "encryptedFields is preferred over jsonSchema", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedIndexed": "123" + } + }, + "result": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json new file mode 100644 index 0000000000..030952e056 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json @@ -0,0 +1,105 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "key_vault_data": [], + "tests": [ + { + "description": "default state collections are applied to encryptionInformation", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.default": { + "fields": [] + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + ], + "encryptionInformation": { + "type": { + "$numberInt": "1" + }, + "schema": { + "default.default": { + "fields": [], + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc" + } + } + }, + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json new file mode 100644 index 0000000000..b8088515ca --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json @@ -0,0 +1,602 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "findOneAndUpdate can query an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "foo": "bar" + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "foo": "bar" + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedIndexed": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "foo": "bar", + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + } + }, + { + "description": "findOneAndUpdate can modify an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "encryptedIndexed": "value456" + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "encryptedIndexed": "value456" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedIndexed": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedIndexed": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json new file mode 100644 index 0000000000..142cacf2fd --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json @@ -0,0 +1,300 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Insert and find FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedIndexed": "123" + } + }, + "result": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json new file mode 100644 index 0000000000..1a75095907 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json @@ -0,0 +1,250 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Insert and find FLE2 unindexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": "value123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedUnindexed": { + "$$type": "binData" + } + } + ] + } + } + }, + { + "description": "Query with an unindexed field fails", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": "value123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedUnindexed": "value123" + } + }, + "result": { + "errorContains": "Cannot query" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-MissingKey.json b/test/client-side-encryption/spec/legacy/fle2-MissingKey.json new file mode 100644 index 0000000000..2db1cd7702 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-MissingKey.json @@ -0,0 +1,118 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [], + "tests": [ + { + "description": "FLE2 encrypt fails with mising key", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + }, + "result": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + }, + { + "description": "FLE2 decrypt fails with mising key", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": {} + }, + "result": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json b/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json new file mode 100644 index 0000000000..e9dd586c26 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json @@ -0,0 +1,86 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "key_vault_data": [], + "encrypted_fields": { + "fields": [] + }, + "tests": [ + { + "description": "insert with no encryption succeeds", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "foo": "bar" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "foo": "bar" + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "foo": "bar" + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Update.json b/test/client-side-encryption/spec/legacy/fle2-Update.json new file mode 100644 index 0000000000..66a291902a --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Update.json @@ -0,0 +1,610 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Update can query an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "foo": "bar" + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "foo": "bar" + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedIndexed": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "update" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "foo": "bar", + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + } + }, + { + "description": "Update can modify an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "encryptedIndexed": "value456" + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "encryptedIndexed": "value456" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedIndexed": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedIndexed": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "update" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json new file mode 100644 index 0000000000..fab36f75a1 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json @@ -0,0 +1,520 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "tests": [ + { + "description": "create with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ] + }, + { + "description": "create with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "encryptedIndexed": "foo" + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "collMod with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + } + } + ] + }, + { + "description": "collMod with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "encryptedIndexed": "foo" + } + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "unencrypted_string": "foo" + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "name" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "encryptedIndexed": "foo" + } + } + ] + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/gcpKMS.json b/test/client-side-encryption/spec/legacy/gcpKMS.json similarity index 100% rename from test/client-side-encryption/spec/gcpKMS.json rename to test/client-side-encryption/spec/legacy/gcpKMS.json diff --git a/test/client-side-encryption/spec/getMore.json b/test/client-side-encryption/spec/legacy/getMore.json similarity index 100% rename from test/client-side-encryption/spec/getMore.json rename to test/client-side-encryption/spec/legacy/getMore.json diff --git a/test/client-side-encryption/spec/insert.json b/test/client-side-encryption/spec/legacy/insert.json similarity index 100% rename from test/client-side-encryption/spec/insert.json rename to test/client-side-encryption/spec/legacy/insert.json diff --git a/test/client-side-encryption/spec/keyAltName.json b/test/client-side-encryption/spec/legacy/keyAltName.json similarity index 100% rename from test/client-side-encryption/spec/keyAltName.json rename to test/client-side-encryption/spec/legacy/keyAltName.json diff --git a/test/client-side-encryption/spec/kmipKMS.json b/test/client-side-encryption/spec/legacy/kmipKMS.json similarity index 100% rename from test/client-side-encryption/spec/kmipKMS.json rename to test/client-side-encryption/spec/legacy/kmipKMS.json diff --git a/test/client-side-encryption/spec/localKMS.json b/test/client-side-encryption/spec/legacy/localKMS.json similarity index 100% rename from test/client-side-encryption/spec/localKMS.json rename to test/client-side-encryption/spec/legacy/localKMS.json diff --git a/test/client-side-encryption/spec/localSchema.json b/test/client-side-encryption/spec/legacy/localSchema.json similarity index 100% rename from test/client-side-encryption/spec/localSchema.json rename to test/client-side-encryption/spec/legacy/localSchema.json diff --git a/test/client-side-encryption/spec/malformedCiphertext.json b/test/client-side-encryption/spec/legacy/malformedCiphertext.json similarity index 100% rename from test/client-side-encryption/spec/malformedCiphertext.json rename to test/client-side-encryption/spec/legacy/malformedCiphertext.json diff --git a/test/client-side-encryption/spec/maxWireVersion.json b/test/client-side-encryption/spec/legacy/maxWireVersion.json similarity index 100% rename from test/client-side-encryption/spec/maxWireVersion.json rename to test/client-side-encryption/spec/legacy/maxWireVersion.json diff --git a/test/client-side-encryption/spec/missingKey.json b/test/client-side-encryption/spec/legacy/missingKey.json similarity index 100% rename from test/client-side-encryption/spec/missingKey.json rename to test/client-side-encryption/spec/legacy/missingKey.json diff --git a/test/client-side-encryption/spec/noSchema.json b/test/client-side-encryption/spec/legacy/noSchema.json similarity index 100% rename from test/client-side-encryption/spec/noSchema.json rename to test/client-side-encryption/spec/legacy/noSchema.json diff --git a/test/client-side-encryption/spec/replaceOne.json b/test/client-side-encryption/spec/legacy/replaceOne.json similarity index 100% rename from test/client-side-encryption/spec/replaceOne.json rename to test/client-side-encryption/spec/legacy/replaceOne.json diff --git a/test/client-side-encryption/spec/types.json b/test/client-side-encryption/spec/legacy/types.json similarity index 100% rename from test/client-side-encryption/spec/types.json rename to test/client-side-encryption/spec/legacy/types.json diff --git a/test/client-side-encryption/spec/unsupportedCommand.json b/test/client-side-encryption/spec/legacy/unsupportedCommand.json similarity index 100% rename from test/client-side-encryption/spec/unsupportedCommand.json rename to test/client-side-encryption/spec/legacy/unsupportedCommand.json diff --git a/test/client-side-encryption/spec/updateMany.json b/test/client-side-encryption/spec/legacy/updateMany.json similarity index 100% rename from test/client-side-encryption/spec/updateMany.json rename to test/client-side-encryption/spec/legacy/updateMany.json diff --git a/test/client-side-encryption/spec/updateOne.json b/test/client-side-encryption/spec/legacy/updateOne.json similarity index 100% rename from test/client-side-encryption/spec/updateOne.json rename to test/client-side-encryption/spec/legacy/updateOne.json diff --git a/test/client-side-encryption/spec/legacy/validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/legacy/validatorAndPartialFieldExpression.json new file mode 100644 index 0000000000..e07137ce15 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/validatorAndPartialFieldExpression.json @@ -0,0 +1,642 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "tests": [ + { + "description": "create with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ] + }, + { + "description": "create with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "encrypted_string": "foo" + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "collMod with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + } + } + ] + }, + { + "description": "collMod with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "encrypted_string": "foo" + } + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "unencrypted_string": "foo" + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "name" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "encrypted_string": "foo" + } + } + ] + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/addKeyAltName.json b/test/client-side-encryption/spec/unified/addKeyAltName.json new file mode 100644 index 0000000000..7dc371143b --- /dev/null +++ b/test/client-side-encryption/spec/unified/addKeyAltName.json @@ -0,0 +1,603 @@ +{ + "description": "addKeyAltName", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": {} + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "add keyAltName to non-existent data key", + "operations": [ + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "AAAjYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "new_key_alt_name" + }, + "expectResult": null + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "AAAjYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "new_key_alt_name" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "add new keyAltName to data key with no keyAltNames", + "operations": [ + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "projection": { + "_id": 0, + "keyAltNames": 1 + } + }, + "expectResult": [ + { + "keyAltNames": [ + "local_key" + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "add existing keyAltName to existing data key", + "operations": [ + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "projection": { + "_id": 0, + "keyAltNames": 1 + } + }, + "expectResult": [ + { + "keyAltNames": [ + "local_key" + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "add new keyAltName to data key with keyAltNames", + "operations": [ + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "another_name" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0, + "keyAltNames": "$keyAltNames" + } + }, + { + "$unwind": "$keyAltNames" + }, + { + "$sort": { + "keyAltNames": 1 + } + } + ] + }, + "expectResult": [ + { + "keyAltNames": "another_name" + }, + { + "keyAltNames": "local_key" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "another_name" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/createKey-kms_providers-invalid.json b/test/client-side-encryption/spec/unified/createKey-kms_providers-invalid.json new file mode 100644 index 0000000000..b2c8d83e05 --- /dev/null +++ b/test/client-side-encryption/spec/unified/createKey-kms_providers-invalid.json @@ -0,0 +1,112 @@ +{ + "description": "createKey-provider-invalid", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "create data key without required master key fields", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "aws", + "opts": { + "masterKey": {} + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "create data key with invalid master key field", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "masterKey": { + "invalid": 1 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "create data key with invalid master key", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "aws", + "opts": { + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "invalid" + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/createKey.json b/test/client-side-encryption/spec/unified/createKey.json new file mode 100644 index 0000000000..adb3fff20d --- /dev/null +++ b/test/client-side-encryption/spec/unified/createKey.json @@ -0,0 +1,711 @@ +{ + "description": "createKey", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [] + } + ], + "tests": [ + { + "description": "create data key with AWS KMS provider", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "aws", + "opts": { + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with Azure KMS provider", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "azure", + "opts": { + "masterKey": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with GCP KMS provider", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "gcp", + "opts": { + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with KMIP KMS provider", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "kmip" + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with local KMS provider", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local" + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "local" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with no keyAltName", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyAltNames": [] + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyAltNames": { + "$$exists": false + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with single keyAltName", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyAltNames": [ + "local_key" + ] + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with multiple keyAltNames", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyAltNames": [ + "abc", + "def" + ] + } + }, + "expectResult": { + "$$type": "binData" + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0, + "keyAltNames": 1 + } + }, + { + "$unwind": "$keyAltNames" + }, + { + "$sort": { + "keyAltNames": 1 + } + } + ] + }, + "expectResult": [ + { + "keyAltNames": "abc" + }, + { + "keyAltNames": "def" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyAltNames": { + "$$type": "array" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "create datakey with custom key material", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyMaterial": { + "$binary": { + "base64": "a2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFs", + "subType": "00" + } + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with invalid custom key material (too short)", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyMaterial": { + "$binary": { + "base64": "a2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFs", + "subType": "00" + } + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/deleteKey.json b/test/client-side-encryption/spec/unified/deleteKey.json new file mode 100644 index 0000000000..a3b2f98a50 --- /dev/null +++ b/test/client-side-encryption/spec/unified/deleteKey.json @@ -0,0 +1,553 @@ +{ + "description": "deleteKey", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": {} + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "delete non-existent data key", + "operations": [ + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "AAAzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "AAAzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "delete existing AWS data key", + "operations": [ + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "delete existing local data key", + "operations": [ + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + ] + } + ] + }, + { + "description": "delete existing data key twice", + "operations": [ + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/getKey.json b/test/client-side-encryption/spec/unified/getKey.json new file mode 100644 index 0000000000..f2f2c68113 --- /dev/null +++ b/test/client-side-encryption/spec/unified/getKey.json @@ -0,0 +1,313 @@ +{ + "description": "getKey", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": {} + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "get non-existent data key", + "operations": [ + { + "name": "getKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "AAAzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": null + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "_id": { + "$binary": { + "base64": "AAAzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "get existing AWS data key", + "operations": [ + { + "name": "getKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "get existing local data key", + "operations": [ + { + "name": "getKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/getKeyByAltName.json b/test/client-side-encryption/spec/unified/getKeyByAltName.json new file mode 100644 index 0000000000..18ed2e1943 --- /dev/null +++ b/test/client-side-encryption/spec/unified/getKeyByAltName.json @@ -0,0 +1,283 @@ +{ + "description": "getKeyByAltName", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": {} + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "get non-existent data key", + "operations": [ + { + "name": "getKeyByAltName", + "object": "clientEncryption0", + "arguments": { + "keyAltName": "does_not_exist" + }, + "expectResult": null + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": "does_not_exist" + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "get existing AWS data key", + "operations": [ + { + "name": "getKeyByAltName", + "object": "clientEncryption0", + "arguments": { + "keyAltName": "aws_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": "aws_key" + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "get existing local data key", + "operations": [ + { + "name": "getKeyByAltName", + "object": "clientEncryption0", + "arguments": { + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": "local_key" + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/getKeys.json b/test/client-side-encryption/spec/unified/getKeys.json new file mode 100644 index 0000000000..bd07af3804 --- /dev/null +++ b/test/client-side-encryption/spec/unified/getKeys.json @@ -0,0 +1,260 @@ +{ + "description": "getKeys", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [] + } + ], + "tests": [ + { + "description": "getKeys with zero key documents", + "operations": [ + { + "name": "getKeys", + "object": "clientEncryption0", + "expectResult": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "getKeys with single key documents", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyAltNames": [ + "abc" + ] + } + }, + "expectResult": { + "$$type": "binData" + } + }, + { + "name": "getKeys", + "object": "clientEncryption0", + "expectResult": [ + { + "_id": { + "$$type": "binData" + }, + "keyAltNames": [ + "abc" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "getKeys with many key documents", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local" + }, + "expectResult": { + "$$type": "binData" + } + }, + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local" + }, + "expectResult": { + "$$type": "binData" + } + }, + { + "name": "getKeys", + "object": "clientEncryption0", + "expectResult": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + }, + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/removeKeyAltName.json b/test/client-side-encryption/spec/unified/removeKeyAltName.json new file mode 100644 index 0000000000..f94d9b02dc --- /dev/null +++ b/test/client-side-encryption/spec/unified/removeKeyAltName.json @@ -0,0 +1,572 @@ +{ + "description": "removeKeyAltName", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": {} + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "remove keyAltName from non-existent data key", + "operations": [ + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "AAAjYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "does_not_exist" + }, + "expectResult": null + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "AAAjYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$pull": { + "keyAltNames": "does_not_exist" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "remove non-existent keyAltName from existing data key", + "operations": [ + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "does_not_exist" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$pull": { + "keyAltNames": "does_not_exist" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "remove an existing keyAltName from an existing data key", + "operations": [ + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "alternate_name" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "projection": { + "_id": 0, + "keyAltNames": 1 + } + }, + "expectResult": [ + { + "keyAltNames": [ + "local_key" + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$pull": { + "keyAltNames": "alternate_name" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "remove the last keyAltName from an existing data key", + "operations": [ + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "alternate_name" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$pull": { + "keyAltNames": "alternate_name" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$pull": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "updates": [ + { + "q": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "u": { + "$unset": { + "keyAltNames": true + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey-decrypt_failure.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey-decrypt_failure.json new file mode 100644 index 0000000000..4c7d4e8048 --- /dev/null +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey-decrypt_failure.json @@ -0,0 +1,162 @@ +{ + "description": "rewrapManyDataKey-decrypt_failure", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-2:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-2" + } + } + ] + } + ], + "tests": [ + { + "description": "rewrap data key that fails during decryption due to invalid masterKey", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {}, + "opts": { + "provider": "local" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey-encrypt_failure.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey-encrypt_failure.json new file mode 100644 index 0000000000..cd2d20c255 --- /dev/null +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey-encrypt_failure.json @@ -0,0 +1,250 @@ +{ + "description": "rewrapManyDataKey-encrypt_failure", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "rewrap with invalid masterKey for AWS KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {}, + "opts": { + "provider": "aws", + "masterKey": { + "key": "arn:aws:kms:us-east-2:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-2" + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with invalid masterKey for Azure KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {}, + "opts": { + "provider": "azure", + "masterKey": { + "keyVaultEndpoint": "invalid-vault-csfle.vault.azure.net", + "keyName": "invalid-name-csfle" + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with invalid masterKey for GCP KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {}, + "opts": { + "provider": "gcp", + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "invalid-ring-csfle", + "keyName": "invalid-name-csfle" + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json new file mode 100644 index 0000000000..ed7568ca4d --- /dev/null +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json @@ -0,0 +1,1373 @@ +{ + "description": "rewrapManyDataKey-kms_providers", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "YXp1cmVhenVyZWF6dXJlYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "azure_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEGkNTybTc7Eyif0f+qqE0lAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDB2j78AeuIQxcRh8cQIBEIB7vj9buHEaT7XHFIsKBJiyzZRmNnjvqMK5LSdzonKdx97jlqauvPvTDXSsdQDcspUs5oLrGmAXpbFResscxmbwZoKgUtWiuIOpeAcYuszCiMKt15s1WIMLDXUhYtfCmhRhekvgHnRAaK4HJMlGE+lKJXYI84E0b86Cd/g+", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "Z2NwZ2NwZ2NwZ2NwZ2NwZw==", + "subType": "04" + } + }, + "keyAltNames": [ + "gcp_key" + ], + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0USbQtof/pYRLQO96yg/JEtZbD1UxKueaC37yzT5tTkSiQEAhClWB5ZCSgzHgxv8raWjNB4r7e8ePGdsmSuYTYmLC5oHHS/BdQisConzNKFaobEQZHamTCjyhy5NotKF8MWoo+dyfQApwI29+vAGyrUIQCXzKwRnNdNQ+lb3vJtS5bqvLTvSxKHpVca2kqyC9nhonV+u4qru5Q2bAqUgVFc8fL4pBuvlowZFTQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "a21pcGttaXBrbWlwa21pcA==", + "subType": "04" + } + }, + "keyAltNames": [ + "kmip_key" + ], + "keyMaterial": { + "$binary": { + "base64": "VoI9J8HusQ3u2gT9i8Awgg/6W4/igvLwRzn3SRDGx0Dl/1ayDMubphOw0ONPVKfuvS6HL3e4gAoCJ/uEz2KLFTVsEqYCpMhfAhgXxm8Ena8vDcOkCzFX+euvN/N2ES3wpzAD18b3qIH0MbBwKJP82d5GQ4pVfGnPW8Ujp9aO1qC/s0EqNqYyzJ1SyzhV9lAjHHGIENYJx+bBrekg2EeZBA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "kmip", + "keyId": "1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "no keys to rewrap due to no filter matches", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": "no_matching_keys" + }, + "opts": { + "provider": "local" + } + }, + "expectResult": { + "bulkWriteResult": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": "no_matching_keys" + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new AWS KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "aws_key" + } + }, + "opts": { + "provider": "aws", + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "aws_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new Azure KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "azure_key" + } + }, + "opts": { + "provider": "azure", + "masterKey": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "azure_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new GCP KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "gcp_key" + } + }, + "opts": { + "provider": "gcp", + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "gcp_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new KMIP KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "kmip_key" + } + }, + "opts": { + "provider": "kmip" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "kmip_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new local KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "local_key" + } + }, + "opts": { + "provider": "local" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "local_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with current KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {} + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 5, + "modifiedCount": 5, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "projection": { + "masterKey": 1 + }, + "sort": { + "keyAltNames": 1 + } + }, + "expectResult": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "YXp1cmVhenVyZWF6dXJlYQ==", + "subType": "04" + } + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "Z2NwZ2NwZ2NwZ2NwZ2NwZw==", + "subType": "04" + } + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "a21pcGttaXBrbWlwa21pcA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "kmip", + "keyId": "1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + } + ], + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/test_encryption.py b/test/test_encryption.py index 288c137c7e..f5c6127a25 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -210,7 +210,7 @@ def assertBinaryUUID(self, val): # Location of JSON test files. BASE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "client-side-encryption") -SPEC_PATH = os.path.join(BASE, "spec") +SPEC_PATH = os.path.join(BASE, "spec", "legacy") OPTS = CodecOptions() @@ -614,12 +614,13 @@ def parse_auto_encrypt_opts(self, opts): opts["kms_tls_options"] = KMS_TLS_OPTS if "key_vault_namespace" not in opts: opts["key_vault_namespace"] = "keyvault.datakeys" + opts = dict(opts) return AutoEncryptionOpts(**opts) def parse_client_options(self, opts): """Override clientOptions parsing to support autoEncryptOpts.""" - encrypt_opts = opts.pop("autoEncryptOpts") + encrypt_opts = opts.pop("autoEncryptOpts", None) if encrypt_opts: opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) @@ -638,18 +639,18 @@ def maybe_skip_scenario(self, test): def setup_scenario(self, scenario_def): """Override a test's setup.""" key_vault_data = scenario_def["key_vault_data"] + encrypted_fields = scenario_def["encrypted_fields"] json_schema = scenario_def["json_schema"] data = scenario_def["data"] + coll = client_context.client.get_database("keyvault", codec_options=OPTS)["datakeys"] + coll.delete_many({}) if key_vault_data: - coll = client_context.client.get_database("keyvault", codec_options=OPTS)["datakeys"] - coll.delete_many({}) coll.insert_many(key_vault_data) db_name = self.get_scenario_db_name(scenario_def) coll_name = self.get_scenario_coll_name(scenario_def) db = client_context.client.get_database(db_name, codec_options=OPTS) - coll = db[coll_name] - coll.drop() + coll = db.drop_collection(coll_name, encrypted_fields=encrypted_fields) wc = WriteConcern(w="majority") kwargs: Dict[str, Any] = {} if json_schema: @@ -657,8 +658,8 @@ def setup_scenario(self, scenario_def): kwargs["codec_options"] = OPTS if not data: kwargs["write_concern"] = wc - db.create_collection(coll_name, **kwargs) - + db.create_collection(coll_name, **kwargs, encrypted_fields=encrypted_fields) + coll = db[coll_name] if data: # Load data. coll.with_options(write_concern=wc).insert_many(scenario_def["data"]) diff --git a/test/utils.py b/test/utils.py index 8a79c97d93..03985772a0 100644 --- a/test/utils.py +++ b/test/utils.py @@ -174,15 +174,26 @@ def failed(self, event): class OvertCommandListener(EventListener): """A CommandListener that ignores sensitive commands.""" + ignore_list_collections = False + def started(self, event): + if self.ignore_list_collections and event.command_name.lower() == "listcollections": + self.ignore_list_collections = False + return if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).started(event) def succeeded(self, event): + if self.ignore_list_collections and event.command_name.lower() == "listcollections": + self.ignore_list_collections = False + return if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).succeeded(event) def failed(self, event): + if self.ignore_list_collections and event.command_name.lower() == "listcollections": + self.ignore_list_collections = False + return if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).failed(event) @@ -983,6 +994,8 @@ def parse_spec_options(opts): if "maxCommitTimeMS" in opts: opts["max_commit_time_ms"] = opts.pop("maxCommitTimeMS") + if "encryptedFields" in opts: + opts["encrypted_fields"] = opts.pop("encryptedFields") if "hint" in opts: hint = opts.pop("hint") if not isinstance(hint, str): @@ -1049,11 +1062,6 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac arguments["requests"] = requests elif arg_name == "session": arguments["session"] = entity_map[arguments["session"]] - elif opname in ("command", "run_admin_command") and arg_name == "command": - # Ensure the first key is the command name. - ordered_command = SON([(spec["command_name"], 1)]) - ordered_command.update(arguments["command"]) - arguments["command"] = ordered_command elif opname == "open_download_stream" and arg_name == "id": arguments["file_id"] = arguments.pop(arg_name) elif opname != "find" and c2s == "max_time_ms": diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 4ae4d1bfb4..498a60220b 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -229,7 +229,19 @@ def check_result(self, expected_result, result): return True else: - self.assertEqual(result, expected_result) + + def _helper(expected_result, result): + if isinstance(expected_result, abc.Mapping): + for i in expected_result.keys(): + self.assertEqual(expected_result[i], result[i]) + + elif isinstance(expected_result, list): + for i, k in zip(expected_result, result): + _helper(i, k) + else: + self.assertEqual(expected_result, result) + + _helper(expected_result, result) def get_object_name(self, op): """Allow subclasses to override handling of 'object' @@ -294,8 +306,16 @@ def run_operation(self, sessions, collection, operation): args = {"sessions": sessions, "collection": collection} args.update(arguments) arguments = args - result = cmd(**dict(arguments)) + try: + if name == "create_collection" and ( + "encrypted" in operation["arguments"]["name"] + or "plaintext" in operation["arguments"]["name"] + ): + self.listener.ignore_list_collections = True + result = cmd(**dict(arguments)) + finally: + self.listener.ignore_list_collections = False # Cleanup open change stream cursors. if name == "watch": self.addCleanup(result.close) @@ -323,8 +343,7 @@ def _run_op(self, sessions, collection, op, in_with_transaction): expected_result = op.get("result") if expect_error(op): with self.assertRaises(self.allowable_errors(op), msg=op["name"]) as context: - self.run_operation(sessions, collection, op.copy()) - + out = self.run_operation(sessions, collection, op.copy()) if expect_error_message(expected_result): if isinstance(context.exception, BulkWriteError): errmsg = str(context.exception.details).lower() From 890cd26e1a2de3c0024ca7a9e35cdc8add088b34 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 6 Jun 2022 15:36:52 -0400 Subject: [PATCH 0163/1588] PYTHON-3288 Implement client side operation timeout (#954) Add timeoutMS URI option and MongoClient keyword argument. Add provisional/beta pymongo.timeout() api to set a deadline for a block of operations. --- .evergreen/resync-specs.sh | 4 + doc/api/pymongo/index.rst | 2 + doc/changelog.rst | 7 + pymongo/__init__.py | 47 +- pymongo/_csot.py | 80 + pymongo/bulk.py | 2 + pymongo/client_options.py | 11 + pymongo/client_session.py | 3 +- pymongo/collection.py | 4 + pymongo/common.py | 26 +- pymongo/database.py | 16 +- pymongo/encryption.py | 13 +- pymongo/message.py | 20 +- pymongo/mongo_client.py | 21 +- pymongo/network.py | 31 +- pymongo/ocsp_support.py | 8 +- pymongo/pool.py | 65 +- pymongo/pyopenssl_context.py | 4 +- pymongo/ssl_context.py | 3 + pymongo/ssl_support.py | 2 + pymongo/topology.py | 24 +- test/csot/bulkWrite.json | 159 + test/csot/change-streams.json | 598 ++ test/csot/close-cursors.json | 239 + test/csot/command-execution.json | 260 + test/csot/convenient-transactions.json | 191 + test/csot/cursors.json | 113 + test/csot/deprecated-options.json | 7179 +++++++++++++++++ test/csot/error-transformations.json | 181 + test/csot/global-timeoutMS.json | 5830 +++++++++++++ test/csot/gridfs-advanced.json | 370 + test/csot/gridfs-delete.json | 270 + test/csot/gridfs-download.json | 344 + test/csot/gridfs-find.json | 182 + test/csot/gridfs-upload.json | 408 + test/csot/legacy-timeouts.json | 379 + test/csot/non-tailable-cursors.json | 541 ++ test/csot/override-collection-timeoutMS.json | 3498 ++++++++ test/csot/override-database-timeoutMS.json | 4622 +++++++++++ test/csot/override-operation-timeoutMS.json | 3577 ++++++++ test/csot/retryability-legacy-timeouts.json | 3042 +++++++ test/csot/retryability-timeoutMS.json | 5439 +++++++++++++ test/csot/sessions-inherit-timeoutMS.json | 311 + ...sessions-override-operation-timeoutMS.json | 315 + test/csot/sessions-override-timeoutMS.json | 311 + test/csot/tailable-awaitData.json | 422 + test/csot/tailable-non-awaitData.json | 312 + test/test_csot.py | 32 + test/test_discovery_and_monitoring.py | 3 +- .../legacy/error-labels-blockConnection.json | 159 + .../collectionData-additionalProperties.json | 3 +- ...ollectionData-collectionName-required.json | 3 +- .../collectionData-collectionName-type.json | 3 +- .../collectionData-createOptions-type.json | 39 + .../collectionData-databaseName-required.json | 3 +- .../collectionData-databaseName-type.json | 3 +- .../collectionData-documents-items.json | 3 +- .../collectionData-documents-required.json | 3 +- .../collectionData-documents-type.json | 3 +- ...ctionOrDatabaseOptions-timeoutMS-type.json | 27 + .../expectedError-isTimeoutError-type.json | 25 + ...ventsForClient-ignoreExtraEvents-type.json | 24 + .../collectionData-createOptions.json | 68 + .../valid-pass/createEntities-operation.json | 74 + .../valid-pass/entity-cursor-iterateOnce.json | 108 + .../valid-pass/matches-lte-operator.json | 78 + test/unified_format.py | 114 +- test/uri_options/connection-options.json | 34 +- test/uri_options/tls-options.json | 9 - test/utils.py | 18 + 70 files changed, 40245 insertions(+), 77 deletions(-) create mode 100644 pymongo/_csot.py create mode 100644 test/csot/bulkWrite.json create mode 100644 test/csot/change-streams.json create mode 100644 test/csot/close-cursors.json create mode 100644 test/csot/command-execution.json create mode 100644 test/csot/convenient-transactions.json create mode 100644 test/csot/cursors.json create mode 100644 test/csot/deprecated-options.json create mode 100644 test/csot/error-transformations.json create mode 100644 test/csot/global-timeoutMS.json create mode 100644 test/csot/gridfs-advanced.json create mode 100644 test/csot/gridfs-delete.json create mode 100644 test/csot/gridfs-download.json create mode 100644 test/csot/gridfs-find.json create mode 100644 test/csot/gridfs-upload.json create mode 100644 test/csot/legacy-timeouts.json create mode 100644 test/csot/non-tailable-cursors.json create mode 100644 test/csot/override-collection-timeoutMS.json create mode 100644 test/csot/override-database-timeoutMS.json create mode 100644 test/csot/override-operation-timeoutMS.json create mode 100644 test/csot/retryability-legacy-timeouts.json create mode 100644 test/csot/retryability-timeoutMS.json create mode 100644 test/csot/sessions-inherit-timeoutMS.json create mode 100644 test/csot/sessions-override-operation-timeoutMS.json create mode 100644 test/csot/sessions-override-timeoutMS.json create mode 100644 test/csot/tailable-awaitData.json create mode 100644 test/csot/tailable-non-awaitData.json create mode 100644 test/test_csot.py create mode 100644 test/transactions/legacy/error-labels-blockConnection.json create mode 100644 test/unified-test-format/invalid/collectionData-createOptions-type.json create mode 100644 test/unified-test-format/invalid/collectionOrDatabaseOptions-timeoutMS-type.json create mode 100644 test/unified-test-format/invalid/expectedError-isTimeoutError-type.json create mode 100644 test/unified-test-format/invalid/expectedEventsForClient-ignoreExtraEvents-type.json create mode 100644 test/unified-test-format/valid-pass/collectionData-createOptions.json create mode 100644 test/unified-test-format/valid-pass/createEntities-operation.json create mode 100644 test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json create mode 100644 test/unified-test-format/valid-pass/matches-lte-operator.json diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index 1177ebb04a..4f5366098b 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -105,6 +105,9 @@ do crud|CRUD) cpjson crud/tests/ crud ;; + csot|CSOT|client-side-operations-timeout) + cpjson client-side-operations-timeout/tests csot + ;; load-balancers|load_balancer) cpjson load-balancers/tests load_balancer ;; @@ -150,6 +153,7 @@ do ;; uri|uri-options|uri_options) cpjson uri-options/tests uri_options + cp "$SPECS"/source/uri-options/tests/*.pem $PYMONGO/test/uri_options ;; stable-api|versioned-api) cpjson versioned-api/tests versioned-api diff --git a/doc/api/pymongo/index.rst b/doc/api/pymongo/index.rst index 6e6e337950..a4e15b9878 100644 --- a/doc/api/pymongo/index.rst +++ b/doc/api/pymongo/index.rst @@ -22,6 +22,8 @@ The maximum wire protocol version PyMongo supports. + .. autofunction:: timeout + Sub-modules: .. toctree:: diff --git a/doc/changelog.rst b/doc/changelog.rst index f1085c4bff..5497b4f3e9 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,6 +6,13 @@ Changes in Version 4.2 .. warning:: PyMongo 4.2 drops support for Python 3.6: Python 3.7+ is now required. +PyMongo 4.2 brings a number of improvements including: + +- Support for MongoDB 6.0. +- Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout + to an entire block of pymongo operations. +- Beta support for Queryable Encryption with MongoDB 6.0. + Bug fixes ......... diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 17c640b1fd..bdb1ec97c1 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -14,7 +14,7 @@ """Python driver for MongoDB.""" -from typing import Tuple, Union +from typing import ContextManager, Optional, Tuple, Union ASCENDING = 1 """Ascending sort order.""" @@ -69,6 +69,7 @@ def get_version_string() -> str: """Current version of PyMongo.""" +from pymongo import _csot from pymongo.collection import ReturnDocument # noqa: F401 from pymongo.common import ( # noqa: F401 MAX_SUPPORTED_WIRE_VERSION, @@ -97,3 +98,47 @@ def has_c() -> bool: return True except ImportError: return False + + +def timeout(seconds: Optional[float]) -> ContextManager: + """**(Provisional)** Apply the given timeout for a block of operations. + + .. note:: :func:`~pymongo.timeout` is currently provisional. Backwards + incompatible changes may occur before becoming officially supported. + + Use :func:`~pymongo.timeout` in a with-statement:: + + with pymongo.timeout(5): + client.db.coll.insert_one({}) + client.db.coll2.insert_one({}) + + When the with-statement is entered, a deadline is set for the entire + block. When that deadline is exceeded, any blocking pymongo operation + will raise a timeout exception. For example:: + + try: + with pymongo.timeout(5): + client.db.coll.insert_one({}) + time.sleep(5) + # The deadline has now expired, the next operation will raise + # a timeout exception. + client.db.coll2.insert_one({}) + except (ServerSelectionTimeoutError, ExecutionTimeout, WTimeoutError, + NetworkTimeout) as exc: + print(f"block timed out: {exc!r}") + + :Parameters: + - `seconds`: A non-negative floating point number expressing seconds, or None. + + :Raises: + - :py:class:`ValueError`: When `seconds` is negative. + + .. versionadded:: 4.2 + """ + if not isinstance(seconds, (int, float, type(None))): + raise TypeError("timeout must be None, an int, or a float") + if seconds and seconds < 0: + raise ValueError("timeout cannot be negative") + if seconds is not None: + seconds = float(seconds) + return _csot._TimeoutContext(seconds) diff --git a/pymongo/_csot.py b/pymongo/_csot.py new file mode 100644 index 0000000000..4085562ca8 --- /dev/null +++ b/pymongo/_csot.py @@ -0,0 +1,80 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Internal helpers for CSOT.""" + +import time +from contextvars import ContextVar +from typing import Optional + +TIMEOUT: ContextVar[Optional[float]] = ContextVar("TIMEOUT", default=None) +RTT: ContextVar[float] = ContextVar("RTT", default=0.0) +DEADLINE: ContextVar[float] = ContextVar("DEADLINE", default=float("inf")) + + +def get_timeout() -> Optional[float]: + return TIMEOUT.get(None) + + +def get_rtt() -> float: + return RTT.get() + + +def get_deadline() -> float: + return DEADLINE.get() + + +def set_rtt(rtt: float) -> None: + RTT.set(rtt) + + +def set_timeout(timeout: Optional[float]) -> None: + TIMEOUT.set(timeout) + DEADLINE.set(time.monotonic() + timeout if timeout else float("inf")) + + +def remaining() -> Optional[float]: + if not get_timeout(): + return None + return DEADLINE.get() - time.monotonic() + + +def clamp_remaining(max_timeout: float) -> float: + """Return the remaining timeout clamped to a max value.""" + timeout = remaining() + if timeout is None: + return max_timeout + return min(timeout, max_timeout) + + +class _TimeoutContext(object): + """Internal timeout context manager. + + Use :func:`pymongo.timeout` instead:: + + with client.timeout(0.5): + client.test.test.insert_one({}) + """ + + __slots__ = ("_timeout",) + + def __init__(self, timeout: Optional[float]): + self._timeout = timeout + + def __enter__(self): + set_timeout(self._timeout) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + set_timeout(None) diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 44923f73df..7992383f67 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -330,6 +330,8 @@ def _execute_command( session._apply_to(cmd, retryable, ReadPreference.PRIMARY, sock_info) sock_info.send_cluster_time(cmd, session, client) sock_info.add_server_api(cmd) + # CSOT: apply timeout before encoding the command. + sock_info.apply_timeout(client, cmd) ops = islice(run.ops, run.idx_offset, None) # Run as many ops as possible in one command. diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 4987601d5c..6784e32848 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -14,6 +14,8 @@ """Tools to parse mongo client options.""" +from typing import Optional + from bson.codec_options import _parse_codec_options from pymongo import common from pymongo.auth import _build_credentials_tuple @@ -195,6 +197,7 @@ def __init__(self, username, password, database, options): self.__server_selector = options.get("server_selector", any_server_selector) self.__auto_encryption_opts = options.get("auto_encryption_opts") self.__load_balanced = options.get("loadbalanced") + self.__timeout = options.get("timeoutms") @property def _options(self): @@ -260,6 +263,14 @@ def read_concern(self): """A :class:`~pymongo.read_concern.ReadConcern` instance.""" return self.__read_concern + @property + def timeout(self) -> Optional[float]: + """The timeout. + + ..versionadded: 4.2 + """ + return self.__timeout + @property def retry_writes(self): """If this instance should retry supported write operations.""" diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 7d70eb8f19..3ff98a579f 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -150,6 +150,7 @@ from bson.int64 import Int64 from bson.son import SON from bson.timestamp import Timestamp +from pymongo import _csot from pymongo.cursor import _SocketManager from pymongo.errors import ( ConfigurationError, @@ -826,7 +827,7 @@ def _finish_transaction(self, sock_info, command_name): wc = opts.write_concern cmd = SON([(command_name, 1)]) if command_name == "commitTransaction": - if opts.max_commit_time_ms: + if opts.max_commit_time_ms and _csot.get_timeout() is None: cmd["maxTimeMS"] = opts.max_commit_time_ms # Transaction spec says that after the initial commit attempt, diff --git a/pymongo/collection.py b/pymongo/collection.py index ffd883e939..9f3f73198b 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -116,6 +116,7 @@ def __init__( write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, + timeout: Optional[float] = None, encrypted_fields: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> None: @@ -198,6 +199,7 @@ def __init__( read_preference or database.read_preference, write_concern or database.write_concern, read_concern or database.read_concern, + timeout if timeout is not None else database.timeout, ) if not isinstance(name, str): raise TypeError("name must be an instance of str") @@ -390,6 +392,7 @@ def with_options( read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, + timeout: Optional[float] = None, ) -> "Collection[_DocumentType]": """Get a clone of this collection changing the specified settings. @@ -428,6 +431,7 @@ def with_options( read_preference or self.read_preference, write_concern or self.write_concern, read_concern or self.read_concern, + timeout=timeout if timeout is not None else self.timeout, ) def bulk_write( diff --git a/pymongo/common.py b/pymongo/common.py index 4376654405..858684bf05 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -339,6 +339,15 @@ def validate_timeout_or_none_or_zero(option: Any, value: Any) -> Optional[float] return validate_positive_float(option, value) / 1000.0 +def validate_timeoutms(option: Any, value: Any) -> Optional[float]: + """Validates a timeout specified in milliseconds returning + a value in floating point seconds. + """ + if value is None: + return None + return validate_positive_float_or_zero(option, value) / 1000.0 + + def validate_max_staleness(option: str, value: Any) -> int: """Validates maxStalenessSeconds according to the Max Staleness Spec.""" if value == -1 or value == "-1": @@ -658,6 +667,7 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A "zlibcompressionlevel": validate_zlib_compression_level, "srvservicename": validate_string, "srvmaxhosts": validate_non_negative_integer, + "timeoutms": validate_timeoutms, } # Dictionary where keys are the names of URI options specific to pymongo, @@ -821,8 +831,8 @@ def __init__( read_preference: _ServerMode, write_concern: WriteConcern, read_concern: ReadConcern, + timeout: Optional[float], ) -> None: - if not isinstance(codec_options, CodecOptions): raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") self.__codec_options = codec_options @@ -845,6 +855,12 @@ def __init__( raise TypeError("read_concern must be an instance of pymongo.read_concern.ReadConcern") self.__read_concern = read_concern + if not isinstance(timeout, (int, float, type(None))): + raise TypeError("timeout must be None, an int, or a float") + if timeout and timeout < 0: + raise TypeError("timeout cannot be negative") + self.__timeout = float(timeout) if timeout else None + @property def codec_options(self) -> CodecOptions: """Read only access to the :class:`~bson.codec_options.CodecOptions` @@ -894,6 +910,14 @@ def read_concern(self) -> ReadConcern: """ return self.__read_concern + @property + def timeout(self) -> Optional[float]: + """Read only access to the timeout of this instance. + + .. versionadded:: 4.2 + """ + return self.__timeout + class _CaseInsensitiveDictionary(abc.MutableMapping): def __init__(self, *args, **kwargs): diff --git a/pymongo/database.py b/pymongo/database.py index bb91196f2e..393f63c8c8 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -75,6 +75,7 @@ def __init__( read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, + timeout: Optional[float] = None, ) -> None: """Get a database by client and name. @@ -127,6 +128,7 @@ def __init__( read_preference or client.read_preference, write_concern or client.write_concern, read_concern or client.read_concern, + timeout if timeout is not None else client.timeout, ) if not isinstance(name, str): @@ -154,6 +156,7 @@ def with_options( read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, + timeout: Optional[float] = None, ) -> "Database[_DocumentType]": """Get a clone of this database changing the specified settings. @@ -193,6 +196,7 @@ def with_options( read_preference or self.read_preference, write_concern or self.write_concern, read_concern or self.read_concern, + timeout if timeout is not None else self.timeout, ) def __eq__(self, other: Any) -> bool: @@ -241,6 +245,7 @@ def get_collection( read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, + timeout: Optional[float] = None, ) -> Collection[_DocumentType]: """Get a :class:`~pymongo.collection.Collection` with the given name and options. @@ -280,7 +285,14 @@ def get_collection( used. """ return Collection( - self, name, False, codec_options, read_preference, write_concern, read_concern + self, + name, + False, + codec_options, + read_preference, + write_concern, + read_concern, + timeout=timeout, ) def create_collection( @@ -291,6 +303,7 @@ def create_collection( write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, + timeout: Optional[float] = None, encrypted_fields: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> Collection[_DocumentType]: @@ -421,6 +434,7 @@ def create_collection( write_concern, read_concern, session=s, + timeout=timeout, encrypted_fields=encrypted_fields, **kwargs, ) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index a7a69dbe34..a088bd2da8 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -16,6 +16,7 @@ import contextlib import enum +import socket import uuid import weakref from typing import Any, Mapping, Optional, Sequence @@ -38,6 +39,7 @@ from bson.errors import BSONError from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson from bson.son import SON +from pymongo import _csot from pymongo.daemon import _spawn_daemon from pymongo.encryption_options import AutoEncryptionOpts from pymongo.errors import ( @@ -47,6 +49,7 @@ ServerSelectionTimeoutError, ) from pymongo.mongo_client import MongoClient +from pymongo.network import BLOCKING_IO_ERRORS from pymongo.pool import PoolOptions, _configured_socket from pymongo.read_concern import ReadConcern from pymongo.ssl_support import get_ssl_context @@ -119,9 +122,11 @@ def kms_request(self, kms_context): False, # allow_invalid_hostnames False, ) # disable_ocsp_endpoint_check + # CSOT: set timeout for socket creation. + connect_timeout = max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0.001) opts = PoolOptions( - connect_timeout=_KMS_CONNECT_TIMEOUT, - socket_timeout=_KMS_CONNECT_TIMEOUT, + connect_timeout=connect_timeout, + socket_timeout=connect_timeout, ssl_context=ctx, ) host, port = parse_host(endpoint, _HTTPS_PORT) @@ -129,10 +134,14 @@ def kms_request(self, kms_context): try: conn.sendall(message) while kms_context.bytes_needed > 0: + # CSOT: update timeout. + conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) data = conn.recv(kms_context.bytes_needed) if not data: raise OSError("KMS connection closed") kms_context.feed(data) + except BLOCKING_IO_ERRORS: + raise socket.timeout("timed out") finally: conn.close() diff --git a/pymongo/message.py b/pymongo/message.py index de43d20c97..bcdedd7b48 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -300,6 +300,9 @@ def __init__( self._as_command = None self.exhaust = exhaust + def reset(self): + self._as_command = None + def namespace(self): return "%s.%s" % (self.db, self.coll) @@ -320,7 +323,7 @@ def use_command(self, sock_info): sock_info.validate_session(self.client, self.session) return use_find_cmd - def as_command(self, sock_info): + def as_command(self, sock_info, apply_timeout=False): """Return a find command document for this query.""" # We use the command twice: on the wire and for command monitoring. # Generate it once, for speed and to avoid repeating side-effects. @@ -356,6 +359,9 @@ def as_command(self, sock_info): client = self.client if client._encrypter and not client._encrypter._bypass_auto_encryption: cmd = client._encrypter.encrypt(self.db, cmd, self.codec_options) + # Support CSOT + if apply_timeout: + sock_info.apply_timeout(client, cmd) self._as_command = cmd, self.db return self._as_command @@ -371,7 +377,7 @@ def get_message(self, read_preference, sock_info, use_cmd=False): spec = self.spec if use_cmd: - spec = self.as_command(sock_info)[0] + spec = self.as_command(sock_info, apply_timeout=True)[0] request_id, msg, size, _ = _op_msg( 0, spec, @@ -457,6 +463,9 @@ def __init__( self.exhaust = exhaust self.comment = comment + def reset(self): + self._as_command = None + def namespace(self): return "%s.%s" % (self.db, self.coll) @@ -471,7 +480,7 @@ def use_command(self, sock_info): sock_info.validate_session(self.client, self.session) return use_cmd - def as_command(self, sock_info): + def as_command(self, sock_info, apply_timeout=False): """Return a getMore command document for this query.""" # See _Query.as_command for an explanation of this caching. if self._as_command is not None: @@ -493,6 +502,9 @@ def as_command(self, sock_info): client = self.client if client._encrypter and not client._encrypter._bypass_auto_encryption: cmd = client._encrypter.encrypt(self.db, cmd, self.codec_options) + # Support CSOT + if apply_timeout: + sock_info.apply_timeout(client, cmd=None) self._as_command = cmd, self.db return self._as_command @@ -503,7 +515,7 @@ def get_message(self, dummy0, sock_info, use_cmd=False): ctx = sock_info.compression_context if use_cmd: - spec = self.as_command(sock_info)[0] + spec = self.as_command(sock_info, apply_timeout=True)[0] if self.sock_mgr: flags = _OpMsg.EXHAUST_ALLOWED else: diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index e1aa80e2f9..7af4b167f1 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -57,6 +57,7 @@ from bson.son import SON from bson.timestamp import Timestamp from pymongo import ( + _csot, client_session, common, database, @@ -260,6 +261,10 @@ def __init__( replaced. Defaults to `None` (no limit). - `maxConnecting` (optional): The maximum number of connections that each pool can establish concurrently. Defaults to `2`. + - `timeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait when executing an operation + (including retry attempts) before raising a timeout error. + ``0`` or ``None`` means no timeout. - `socketTimeoutMS`: (integer or None) Controls how long (in milliseconds) the driver will wait for a response after sending an ordinary (non-monitoring) database operation before concluding that @@ -540,6 +545,9 @@ def __init__( .. seealso:: The MongoDB documentation on `connections `_. + .. versionchanged:: 4.2 + Added the ``timeoutMS`` keyword argument. + .. versionchanged:: 4.0 - Removed the fsync, unlock, is_locked, database_names, and @@ -780,6 +788,7 @@ def __init__( options.read_preference, options.write_concern, options.read_concern, + options.timeout, ) self._topology_settings = TopologySettings( @@ -1273,6 +1282,7 @@ def _run_operation(self, operation, unpack_res, address=None): ) def _cmd(session, server, sock_info, read_preference): + operation.reset() # Reset op in case of retry. return server.run_operation( sock_info, operation, read_preference, self._event_listeners, unpack_res ) @@ -1303,6 +1313,7 @@ def _retry_internal(self, retryable, func, session, bulk): max_wire_version = 0 last_error: Optional[Exception] = None retrying = False + multiple_retries = _csot.get_timeout() is not None def is_retrying(): return bulk.retrying if bulk else retrying @@ -1350,7 +1361,7 @@ def is_retrying(): retryable_error = exc.has_error_label("RetryableWriteError") if retryable_error: session._unpin() - if is_retrying() or not retryable_error: + if not retryable_error or (is_retrying() and not multiple_retries): raise if bulk: bulk.retrying = True @@ -1371,6 +1382,7 @@ def _retryable_read(self, func, read_pref, session, address=None, retryable=True ) last_error: Optional[Exception] = None retrying = False + multiple_retries = _csot.get_timeout() is not None while True: try: @@ -1394,12 +1406,12 @@ def _retryable_read(self, func, read_pref, session, address=None, retryable=True # most likely be a waste of time. raise except ConnectionFailure as exc: - if not retryable or retrying: + if not retryable or (retrying and not multiple_retries): raise retrying = True last_error = exc except OperationFailure as exc: - if not retryable or retrying: + if not retryable or (retrying and not multiple_retries): raise if exc.code not in helpers._RETRYABLE_ERROR_CODES: raise @@ -1922,6 +1934,7 @@ def get_database( read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, + timeout: Optional[float] = None, ) -> database.Database[_DocumentType]: """Get a :class:`~pymongo.database.Database` with the given name and options. @@ -1972,7 +1985,7 @@ def get_database( name = self.__default_database_name return database.Database( - self, name, codec_options, read_preference, write_concern, read_concern + self, name, codec_options, read_preference, write_concern, read_concern, timeout ) def _database_default_options(self, name): diff --git a/pymongo/network.py b/pymongo/network.py index df08158b2f..3eac0d02d3 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -21,7 +21,7 @@ import time from bson import _decode_all_selective -from pymongo import helpers, message +from pymongo import _csot, helpers, message, ssl_support from pymongo.common import MAX_MESSAGE_SIZE from pymongo.compression_support import _NO_COMPRESSION, decompress from pymongo.errors import ( @@ -59,6 +59,7 @@ def command( unacknowledged=False, user_fields=None, exhaust_allowed=False, + write_concern=None, ): """Execute a command over the socket, or raise socket.error. @@ -115,6 +116,12 @@ def command( if client and client._encrypter and not client._encrypter._bypass_auto_encryption: spec = orig = client._encrypter.encrypt(dbname, spec, codec_options) + # Support CSOT + if client: + sock_info.apply_timeout(client, spec, write_concern) + elif write_concern and not write_concern.is_server_default: + spec["writeConcern"] = write_concern.document + if use_op_msg: flags = _OpMsg.MORE_TO_COME if unacknowledged else 0 flags |= _OpMsg.EXHAUST_ALLOWED if exhaust_allowed else 0 @@ -198,11 +205,14 @@ def command( def receive_message(sock_info, request_id, max_message_size=MAX_MESSAGE_SIZE): """Receive a raw BSON message or raise socket.error.""" - timeout = sock_info.sock.gettimeout() - if timeout: - deadline = time.monotonic() + timeout + if _csot.get_timeout(): + deadline = _csot.get_deadline() else: - deadline = None + timeout = sock_info.sock.gettimeout() + if timeout: + deadline = time.monotonic() + timeout + else: + deadline = None # Ignore the response's request id. length, _, response_to, op_code = _UNPACK_HEADER( _receive_data_on_socket(sock_info, 16, deadline) @@ -271,6 +281,10 @@ def wait_for_read(sock_info, deadline): raise socket.timeout("timed out") +# Errors raised by sockets (and TLS sockets) when in non-blocking mode. +BLOCKING_IO_ERRORS = (BlockingIOError,) + ssl_support.BLOCKING_IO_ERRORS + + def _receive_data_on_socket(sock_info, length, deadline): buf = bytearray(length) mv = memoryview(buf) @@ -278,7 +292,14 @@ def _receive_data_on_socket(sock_info, length, deadline): while bytes_read < length: try: wait_for_read(sock_info, deadline) + # CSOT: Update timeout. When the timeout has expired perform one + # final non-blocking recv. This helps avoid spurious timeouts when + # the response is actually already buffered on the client. + if _csot.get_timeout(): + sock_info.set_socket_timeout(max(deadline - time.monotonic(), 0)) chunk_length = sock_info.sock.recv_into(mv[bytes_read:]) + except BLOCKING_IO_ERRORS: + raise socket.timeout("timed out") except (IOError, OSError) as exc: # noqa: B014 if _errno_from_exception(exc) == errno.EINTR: continue diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index 56d18a29bf..94905d9f47 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -48,6 +48,8 @@ from requests import post as _post from requests.exceptions import RequestException as _RequestException +from pymongo import _csot + # Note: the functions in this module generally return 1 or 0. The reason # is simple. The entry point, ocsp_callback, is registered as a callback # with OpenSSL through PyOpenSSL. The callback must return 1 (success) or @@ -235,12 +237,16 @@ def _get_ocsp_response(cert, issuer, uri, ocsp_response_cache): ocsp_response = ocsp_response_cache[ocsp_request] _LOGGER.debug("Using cached OCSP response.") except KeyError: + # CSOT: use the configured timeout or 5 seconds, whichever is smaller. + # Note that request's timeout works differently and does not imply an absolute + # deadline: https://requests.readthedocs.io/en/stable/user/quickstart/#timeouts + timeout = max(_csot.clamp_remaining(5), 0.001) try: response = _post( uri, data=ocsp_request.public_bytes(_Encoding.DER), headers={"Content-Type": "application/ocsp-request"}, - timeout=5, + timeout=timeout, ) except _RequestException as exc: _LOGGER.debug("HTTP request failed: %s", exc) diff --git a/pymongo/pool.py b/pymongo/pool.py index d68ba238f2..8a1e72fc0d 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -27,7 +27,7 @@ from bson import DEFAULT_CODEC_OPTIONS from bson.son import SON -from pymongo import __version__, auth, helpers +from pymongo import __version__, _csot, auth, helpers from pymongo.client_session import _validate_session_write_concern from pymongo.common import ( MAX_BSON_SIZE, @@ -46,6 +46,7 @@ ConfigurationError, ConnectionFailure, DocumentTooLarge, + ExecutionTimeout, InvalidOperation, NetworkTimeout, NotPrimaryError, @@ -557,6 +558,43 @@ def __init__(self, sock, pool, address, id): self.pinned_txn = False self.pinned_cursor = False self.active = False + self.last_timeout = self.opts.socket_timeout + + def set_socket_timeout(self, timeout): + """Cache last timeout to avoid duplicate calls to sock.settimeout.""" + if timeout == self.last_timeout: + return + self.last_timeout = timeout + self.sock.settimeout(timeout) + + def apply_timeout(self, client, cmd, write_concern=None): + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + # Reset the socket timeout unless we're performing a streaming monitor check. + if not self.more_to_come: + self.set_socket_timeout(self.opts.socket_timeout) + + if cmd and write_concern and not write_concern.is_server_default: + cmd["writeConcern"] = write_concern.document + return None + # RTT validation. + rtt = _csot.get_rtt() + max_time_ms = timeout - rtt + if max_time_ms < 0: + # CSOT: raise an error without running the command since we know it will time out. + errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f}" + raise ExecutionTimeout( + errmsg, 50, {"ok": 0, "errmsg": errmsg, "code": 50}, self.max_wire_version + ) + if cmd is not None: + cmd["maxTimeMS"] = int(max_time_ms * 1000) + wc = write_concern.document if write_concern else {} + wc.pop("wtimeout", None) + if wc: + cmd["writeConcern"] = wc + self.set_socket_timeout(timeout) + return timeout def pin_txn(self): self.pinned_txn = True @@ -602,7 +640,7 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): awaitable = True # If connect_timeout is None there is no timeout. if self.opts.connect_timeout: - self.sock.settimeout(self.opts.connect_timeout + heartbeat_frequency) + self.set_socket_timeout(self.opts.connect_timeout + heartbeat_frequency) if not performing_handshake and cluster_time is not None: cmd["$clusterTime"] = cluster_time @@ -714,8 +752,6 @@ def command( if not (write_concern is None or write_concern.acknowledged or collation is None): raise ConfigurationError("Collation is unsupported for unacknowledged writes.") - if write_concern and not write_concern.is_server_default: - spec["writeConcern"] = write_concern.document self.add_server_api(spec) if session: @@ -748,6 +784,7 @@ def command( unacknowledged=unacknowledged, user_fields=user_fields, exhaust_allowed=exhaust_allowed, + write_concern=write_concern, ) except (OperationFailure, NotPrimaryError): raise @@ -978,7 +1015,13 @@ def _create_connection(address, options): _set_non_inheritable_non_atomic(sock.fileno()) try: sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - sock.settimeout(options.connect_timeout) + # CSOT: apply timeout to socket connect. + timeout = _csot.remaining() + if timeout is None: + timeout = options.connect_timeout + elif timeout <= 0: + raise socket.timeout("timed out") + sock.settimeout(timeout) sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) _set_keepalive_times(sock) sock.connect(sa) @@ -1416,7 +1459,9 @@ def _get_socket(self): self.operation_count += 1 # Get a free socket or create one. - if self.opts.wait_queue_timeout: + if _csot.get_timeout(): + deadline = _csot.get_deadline() + elif self.opts.wait_queue_timeout: deadline = time.monotonic() + self.opts.wait_queue_timeout else: deadline = None @@ -1582,25 +1627,25 @@ def _raise_wait_queue_timeout(self) -> NoReturn: listeners.publish_connection_check_out_failed( self.address, ConnectionCheckOutFailedReason.TIMEOUT ) + timeout = _csot.get_timeout() or self.opts.wait_queue_timeout if self.opts.load_balanced: other_ops = self.active_sockets - self.ncursors - self.ntxns raise ConnectionFailure( "Timeout waiting for connection from the connection pool. " "maxPoolSize: %s, connections in use by cursors: %s, " "connections in use by transactions: %s, connections in use " - "by other operations: %s, wait_queue_timeout: %s" + "by other operations: %s, timeout: %s" % ( self.opts.max_pool_size, self.ncursors, self.ntxns, other_ops, - self.opts.wait_queue_timeout, + timeout, ) ) raise ConnectionFailure( "Timed out while checking out a connection from connection pool. " - "maxPoolSize: %s, wait_queue_timeout: %s" - % (self.opts.max_pool_size, self.opts.wait_queue_timeout) + "maxPoolSize: %s, timeout: %s" % (self.opts.max_pool_size, timeout) ) def __del__(self): diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 1a57ff4f2b..758a741b6f 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -82,7 +82,7 @@ def _is_ip_address(address): # According to the docs for Connection.send it can raise # WantX509LookupError and should be retried. -_RETRY_ERRORS = (_SSL.WantReadError, _SSL.WantWriteError, _SSL.WantX509LookupError) +BLOCKING_IO_ERRORS = (_SSL.WantReadError, _SSL.WantWriteError, _SSL.WantX509LookupError) def _ragged_eof(exc): @@ -106,7 +106,7 @@ def _call(self, call, *args, **kwargs): while True: try: return call(*args, **kwargs) - except _RETRY_ERRORS as exc: + except BLOCKING_IO_ERRORS as exc: if isinstance(exc, _SSL.WantReadError): want_read = True want_write = False diff --git a/pymongo/ssl_context.py b/pymongo/ssl_context.py index 4e997a439e..63970cb5e2 100644 --- a/pymongo/ssl_context.py +++ b/pymongo/ssl_context.py @@ -27,6 +27,9 @@ HAS_SNI = getattr(_ssl, "HAS_SNI", False) IS_PYOPENSSL = False +# Errors raised by SSL sockets when in non-blocking mode. +BLOCKING_IO_ERRORS = (_ssl.SSLWantReadError, _ssl.SSLWantWriteError) + # Base Exception class SSLError = _ssl.SSLError diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index d1381ce0e4..13c5315eee 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -38,6 +38,7 @@ HAS_SNI = _ssl.HAS_SNI IPADDR_SAFE = True SSLError = _ssl.SSLError + BLOCKING_IO_ERRORS = _ssl.BLOCKING_IO_ERRORS def get_ssl_context( certfile, @@ -91,6 +92,7 @@ class SSLError(Exception): # type: ignore HAS_SNI = False IPADDR_SAFE = False + BLOCKING_IO_ERRORS = () # type: ignore def get_ssl_context(*dummy): # type: ignore """No ssl module, raise ConfigurationError.""" diff --git a/pymongo/topology.py b/pymongo/topology.py index 4b5ff87bb5..db832a8e55 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -23,7 +23,7 @@ import weakref from typing import Any -from pymongo import common, helpers, periodic_executor +from pymongo import _csot, common, helpers, periodic_executor from pymongo.client_session import _ServerSessionPool from pymongo.errors import ( ConfigurationError, @@ -191,6 +191,13 @@ def open(self): with self._lock: self._ensure_opened() + def get_server_selection_timeout(self): + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + return self._settings.server_selection_timeout + return timeout + def select_servers(self, selector, server_selection_timeout=None, address=None): """Return a list of Servers matching selector, or time out. @@ -208,7 +215,7 @@ def select_servers(self, selector, server_selection_timeout=None, address=None): `server_selection_timeout` if no matching servers are found. """ if server_selection_timeout is None: - server_timeout = self._settings.server_selection_timeout + server_timeout = self.get_server_selection_timeout() else: server_timeout = server_selection_timeout @@ -250,8 +257,7 @@ def _select_servers_loop(self, selector, timeout, address): self._description.check_compatible() return server_descriptions - def select_server(self, selector, server_selection_timeout=None, address=None): - """Like select_servers, but choose a random server if several match.""" + def _select_server(self, selector, server_selection_timeout=None, address=None): servers = self.select_servers(selector, server_selection_timeout, address) if len(servers) == 1: return servers[0] @@ -261,6 +267,12 @@ def select_server(self, selector, server_selection_timeout=None, address=None): else: return server2 + def select_server(self, selector, server_selection_timeout=None, address=None): + """Like select_servers, but choose a random server if several match.""" + server = self._select_server(selector, server_selection_timeout, address) + _csot.set_rtt(server.description.round_trip_time) + return server + def select_server_by_address(self, address, server_selection_timeout=None): """Return a Server for "address", reconnecting if necessary. @@ -535,11 +547,11 @@ def _check_session_support(self): if self._description.topology_type == TOPOLOGY_TYPE.Single: if not self._description.has_known_servers: self._select_servers_loop( - any_server_selector, self._settings.server_selection_timeout, None + any_server_selector, self.get_server_selection_timeout(), None ) elif not self._description.readable_servers: self._select_servers_loop( - readable_server_selector, self._settings.server_selection_timeout, None + readable_server_selector, self.get_server_selection_timeout(), None ) session_timeout = self._description.logical_session_timeout_minutes diff --git a/test/csot/bulkWrite.json b/test/csot/bulkWrite.json new file mode 100644 index 0000000000..14d5b654f6 --- /dev/null +++ b/test/csot/bulkWrite.json @@ -0,0 +1,159 @@ +{ + "description": "timeoutMS behaves correctly for bulkWrite operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to entire bulkWrite, not individual commands", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert", + "update" + ], + "blockConnection": true, + "blockTimeMS": 120 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 1 + } + } + } + ], + "timeoutMS": 200 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/change-streams.json b/test/csot/change-streams.json new file mode 100644 index 0000000000..a8b2b7e170 --- /dev/null +++ b/test/csot/change-streams.json @@ -0,0 +1,598 @@ +{ + "description": "timeoutMS behaves correctly for change streams", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "error if maxAwaitTimeMS is greater than timeoutMS", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 10 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if maxAwaitTimeMS is equal to timeoutMS", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 5 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "timeoutMS applied to initial aggregate", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 50 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 1050 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "iterateOnce", + "object": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 20, + "batchSize": 2, + "maxAwaitTimeMS": 1 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "iterateOnce", + "object": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": 1 + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to full resume attempt in a next call", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 20 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "getMore", + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 12, + "errorCode": 7, + "errorLabels": [ + "ResumableChangeStreamError" + ] + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "change stream can be iterated again if previous iteration times out", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "maxAwaitTimeMS": 1, + "timeoutMS": 100 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream", + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "iterateOnce", + "object": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 10 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/close-cursors.json b/test/csot/close-cursors.json new file mode 100644 index 0000000000..1361971c4c --- /dev/null +++ b/test/csot/close-cursors.json @@ -0,0 +1,239 @@ +{ + "description": "timeoutMS behaves correctly when closing cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS is refreshed for close", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "batchSize": 2, + "timeoutMS": 20 + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "close", + "object": "cursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "commandName": "getMore" + } + }, + { + "commandFailedEvent": { + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be overridden for close", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "killCursors" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "batchSize": 2, + "timeoutMS": 20 + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "close", + "object": "cursor", + "arguments": { + "timeoutMS": 40 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "collection", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/command-execution.json b/test/csot/command-execution.json new file mode 100644 index 0000000000..f51b09d2d7 --- /dev/null +++ b/test/csot/command-execution.json @@ -0,0 +1,260 @@ +{ + "description": "timeoutMS behaves correctly during command execution", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "timeoutColl", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "maxTimeMS value in the command is less than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 20 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "regularCollection", + "database": "database", + "collectionName": "coll" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl", + "collectionOptions": { + "timeoutMS": 60 + } + } + } + ] + } + }, + { + "name": "insertOne", + "object": "regularCollection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 60 + } + } + } + } + ] + } + ] + }, + { + "description": "command is not sent if RTT is greater than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "rttTooHighTest", + "blockConnection": true, + "blockTimeMS": 20 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "rttTooHighTest", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "regularCollection", + "database": "database", + "collectionName": "coll" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl", + "collectionOptions": { + "timeoutMS": 2 + } + } + } + ] + } + }, + { + "name": "insertOne", + "object": "regularCollection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/convenient-transactions.json b/test/csot/convenient-transactions.json new file mode 100644 index 0000000000..0c8cc6edd9 --- /dev/null +++ b/test/csot/convenient-transactions.json @@ -0,0 +1,191 @@ +{ + "description": "timeoutMS behaves correctly for the withTransaction API", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 50 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction raises a client-side error if timeoutMS is overridden inside the callback", + "operations": [ + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session", + "timeoutMS": 100 + }, + "expectError": { + "isClientError": true + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [] + } + ] + }, + { + "description": "timeoutMS is not refreshed for each operation in the callback", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session" + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + }, + "session": "session" + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/cursors.json b/test/csot/cursors.json new file mode 100644 index 0000000000..36949d7509 --- /dev/null +++ b/test/csot/cursors.json @@ -0,0 +1,113 @@ +{ + "description": "tests for timeoutMS behavior that applies to all cursor types", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client" + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "find errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "collection aggregate errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "database aggregate errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [], + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "listCollections errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "listIndexes errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/csot/deprecated-options.json b/test/csot/deprecated-options.json new file mode 100644 index 0000000000..0e2bdefd73 --- /dev/null +++ b/test/csot/deprecated-options.json @@ -0,0 +1,7179 @@ +{ + "description": "operations ignore deprected timeout options if timeoutMS is set", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "commitTransaction ignores wTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "commitTransaction ignores maxCommitTimeMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTransactionOptions": { + "maxCommitTimeMS": 5000 + } + } + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "abortTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "abortTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "abortTransaction ignores wTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "abortTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores wTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores maxCommitTimeMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTransactionOptions": { + "maxCommitTimeMS": 5000 + } + } + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 1000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "fieldName": "x", + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "document": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "documents": [ + { + "x": 1 + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/error-transformations.json b/test/csot/error-transformations.json new file mode 100644 index 0000000000..4d9e061c3b --- /dev/null +++ b/test/csot/error-transformations.json @@ -0,0 +1,181 @@ +{ + "description": "MaxTimeMSExpired server errors are transformed into a custom timeout error", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "basic MaxTimeMSExpired error is transformed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 50 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "write concern error MaxTimeMSExpired is transformed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 50, + "errmsg": "maxTimeMS expired" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/global-timeoutMS.json b/test/csot/global-timeoutMS.json new file mode 100644 index 0000000000..34854ac155 --- /dev/null +++ b/test/csot/global-timeoutMS.json @@ -0,0 +1,5830 @@ +{ + "description": "timeoutMS can be configured on a MongoClient", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured on a MongoClient - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/gridfs-advanced.json b/test/csot/gridfs-advanced.json new file mode 100644 index 0000000000..668b93f37a --- /dev/null +++ b/test/csot/gridfs-advanced.json @@ -0,0 +1,370 @@ +{ + "description": "timeoutMS behaves correctly for advanced GridFS API operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 50 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for a rename", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "rename", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "newFilename": "foo", + "timeoutMS": 100 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to update during a rename", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "rename", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "newFilename": "foo" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be overridden for drop", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "arguments": { + "timeoutMS": 100 + } + } + ] + }, + { + "description": "timeoutMS applied to files collection drop", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "drop", + "databaseName": "test", + "command": { + "drop": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to chunks collection drop", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to drop as a whole, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/csot/gridfs-delete.json b/test/csot/gridfs-delete.json new file mode 100644 index 0000000000..f458fa827c --- /dev/null +++ b/test/csot/gridfs-delete.json @@ -0,0 +1,270 @@ +{ + "description": "timeoutMS behaves correctly for GridFS delete operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 50 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for delete", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "timeoutMS": 100 + } + } + ] + }, + { + "description": "timeoutMS applied to delete against the files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to delete against the chunks collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to entire delete, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/csot/gridfs-download.json b/test/csot/gridfs-download.json new file mode 100644 index 0000000000..a3044a6d81 --- /dev/null +++ b/test/csot/gridfs-download.json @@ -0,0 +1,344 @@ +{ + "description": "timeoutMS behaves correctly for GridFS download operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 50 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for download", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "timeoutMS": 100 + } + } + ] + }, + { + "description": "timeoutMS applied to find to get files document", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to find to get chunks", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.chunks", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to entire download, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.chunks", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/gridfs-find.json b/test/csot/gridfs-find.json new file mode 100644 index 0000000000..f75a279c01 --- /dev/null +++ b/test/csot/gridfs-find.json @@ -0,0 +1,182 @@ +{ + "description": "timeoutMS behaves correctly for GridFS find operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 50 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for a find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "find", + "object": "bucket", + "arguments": { + "filter": {}, + "timeoutMS": 100 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to find command", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "find", + "object": "bucket", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/gridfs-upload.json b/test/csot/gridfs-upload.json new file mode 100644 index 0000000000..b0daeb2e42 --- /dev/null +++ b/test/csot/gridfs-upload.json @@ -0,0 +1,408 @@ +{ + "description": "timeoutMS behaves correctly for GridFS upload operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 50 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for upload", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + }, + "timeoutMS": 1000 + } + } + ] + }, + { + "description": "timeoutMS applied to initial find on files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to listIndexes on files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to index creation for files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to listIndexes on chunks collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to index creation for chunks collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to chunk insertion", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to creation of files document", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to upload as a whole, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/csot/legacy-timeouts.json b/test/csot/legacy-timeouts.json new file mode 100644 index 0000000000..3a2d2eaefb --- /dev/null +++ b/test/csot/legacy-timeouts.json @@ -0,0 +1,379 @@ +{ + "description": "legacy timeouts continue to work if timeoutMS is not set", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "socketTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "socketTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "waitQueueTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "waitQueueTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "wTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "wTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + }, + "writeConcern": { + "wtimeout": 50000 + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS option is used directly as the maxTimeMS field on a command", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "maxTimeMS": 50000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": 50000 + } + } + } + ] + } + ] + }, + { + "description": "maxCommitTimeMS option is used directly as the maxTimeMS field on a commitTransaction command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTransactionOptions": { + "maxCommitTimeMS": 1000 + } + } + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": 1000 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/non-tailable-cursors.json b/test/csot/non-tailable-cursors.json new file mode 100644 index 0000000000..0a5448a6bb --- /dev/null +++ b/test/csot/non-tailable-cursors.json @@ -0,0 +1,541 @@ +{ + "description": "timeoutMS behaves correctly for non-tailable cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + { + "collectionName": "aggregateOutputColl", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to find if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "remaining timeoutMS applied to getMore if timeoutMode is unset", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMS": 20, + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "remaining timeoutMS applied to getMore if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime", + "timeoutMS": 20, + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to find if timeoutMode is iteration", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "iteration" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if timeoutMode is iteration - success", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "iteration", + "timeoutMS": 20, + "batchSize": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if timeoutMode is iteration - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "iteration", + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "aggregate with $out errors if timeoutMode is iteration", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$out": "aggregateOutputColl" + } + ], + "timeoutMS": 100, + "timeoutMode": "iteration" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [] + } + ] + }, + { + "description": "aggregate with $merge errors if timeoutMode is iteration", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$merge": "aggregateOutputColl" + } + ], + "timeoutMS": 100, + "timeoutMode": "iteration" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [] + } + ] + } + ] +} diff --git a/test/csot/override-collection-timeoutMS.json b/test/csot/override-collection-timeoutMS.json new file mode 100644 index 0000000000..7d2c663fc1 --- /dev/null +++ b/test/csot/override-collection-timeoutMS.json @@ -0,0 +1,3498 @@ +{ + "description": "timeoutMS can be overridden for a MongoCollection", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured on a MongoCollection - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/override-database-timeoutMS.json b/test/csot/override-database-timeoutMS.json new file mode 100644 index 0000000000..9c1b77f903 --- /dev/null +++ b/test/csot/override-database-timeoutMS.json @@ -0,0 +1,4622 @@ +{ + "description": "timeoutMS can be overridden for a MongoDatabase", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured on a MongoDatabase - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/override-operation-timeoutMS.json b/test/csot/override-operation-timeoutMS.json new file mode 100644 index 0000000000..896b996ee8 --- /dev/null +++ b/test/csot/override-operation-timeoutMS.json @@ -0,0 +1,3577 @@ +{ + "description": "timeoutMS can be overridden for an operation", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured for an operation - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - runCommand on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - runCommand on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 0, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listIndexNames on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listIndexNames on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - deleteMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - deleteMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - updateMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - updateMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - dropIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "name": "x_1" + }, + "expectError": { + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - dropIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "name": "x_1" + }, + "expectError": { + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - dropIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - dropIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/retryability-legacy-timeouts.json b/test/csot/retryability-legacy-timeouts.json new file mode 100644 index 0000000000..cd2af7fab6 --- /dev/null +++ b/test/csot/retryability-legacy-timeouts.json @@ -0,0 +1,3042 @@ +{ + "description": "legacy timeouts behave correctly for retryable operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 50 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "operation succeeds after one socket timeout - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/retryability-timeoutMS.json b/test/csot/retryability-timeoutMS.json new file mode 100644 index 0000000000..438ba6b8d2 --- /dev/null +++ b/test/csot/retryability-timeoutMS.json @@ -0,0 +1,5439 @@ +{ + "description": "timeoutMS behaves correctly for retryable operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 100 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applies to whole operation, not individual attempts - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 500 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 500, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 500, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 500, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 500 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 500 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/sessions-inherit-timeoutMS.json b/test/csot/sessions-inherit-timeoutMS.json new file mode 100644 index 0000000000..8205c086bc --- /dev/null +++ b/test/csot/sessions-inherit-timeoutMS.json @@ -0,0 +1,311 @@ +{ + "description": "sessions inherit timeoutMS from their parent MongoClient", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 50 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/sessions-override-operation-timeoutMS.json b/test/csot/sessions-override-operation-timeoutMS.json new file mode 100644 index 0000000000..ff26de29f5 --- /dev/null +++ b/test/csot/sessions-override-operation-timeoutMS.json @@ -0,0 +1,315 @@ +{ + "description": "timeoutMS can be overridden for individual session operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 50 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session", + "arguments": { + "timeoutMS": 50 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 50, + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/sessions-override-timeoutMS.json b/test/csot/sessions-override-timeoutMS.json new file mode 100644 index 0000000000..1d3b8932af --- /dev/null +++ b/test/csot/sessions-override-timeoutMS.json @@ -0,0 +1,311 @@ +{ + "description": "timeoutMS can be overridden at the level of a ClientSession", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTimeoutMS": 50 + } + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/tailable-awaitData.json b/test/csot/tailable-awaitData.json new file mode 100644 index 0000000000..6da85c7783 --- /dev/null +++ b/test/csot/tailable-awaitData.json @@ -0,0 +1,422 @@ +{ + "description": "timeoutMS behaves correctly for tailable awaitData cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "error if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime", + "cursorType": "tailableAwait" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if maxAwaitTimeMS is greater than timeoutMS", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 5, + "maxAwaitTimeMS": 10 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if maxAwaitTimeMS is equal to timeoutMS", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 5, + "maxAwaitTimeMS": 5 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "timeoutMS applied to find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 20, + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 20, + "batchSize": 1, + "maxAwaitTimeMS": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": 1 + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/tailable-non-awaitData.json b/test/csot/tailable-non-awaitData.json new file mode 100644 index 0000000000..34ee660963 --- /dev/null +++ b/test/csot/tailable-non-awaitData.json @@ -0,0 +1,312 @@ +{ + "description": "timeoutMS behaves correctly for tailable non-awaitData cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "error if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime", + "cursorType": "tailable" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "timeoutMS applied to find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - success", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable", + "timeoutMS": 20, + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/test_csot.py b/test/test_csot.py new file mode 100644 index 0000000000..5c7833467f --- /dev/null +++ b/test/test_csot.py @@ -0,0 +1,32 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the CSOT unified spec tests.""" + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "csot") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index a97eb65432..39979c2d10 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -395,7 +395,7 @@ def record_primary(self): """Run the recordPrimary test operation.""" self._previous_primary = self.scenario_client.primary - def wait_for_primary_change(self, timeout_ms): + def wait_for_primary_change(self, timeout): """Run the waitForPrimaryChange test operation.""" def primary_changed(): @@ -404,7 +404,6 @@ def primary_changed(): return False return primary != self._previous_primary - timeout = timeout_ms / 1000.0 wait_until(primary_changed, "change primary", timeout=timeout) def wait(self, ms): diff --git a/test/transactions/legacy/error-labels-blockConnection.json b/test/transactions/legacy/error-labels-blockConnection.json new file mode 100644 index 0000000000..56b646f7ad --- /dev/null +++ b/test/transactions/legacy/error-labels-blockConnection.json @@ -0,0 +1,159 @@ +{ + "runOn": [ + { + "minServerVersion": "4.2", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "transaction-tests", + "collection_name": "test", + "data": [], + "tests": [ + { + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to connection errors", + "clientOptions": { + "socketTimeoutMS": 100 + }, + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + }, + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "result": { + "insertedId": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0", + "result": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": null, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "insert", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + } + ] + } + } + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-additionalProperties.json b/test/unified-test-format/invalid/collectionData-additionalProperties.json index 2d85093109..1f4ed4c154 100644 --- a/test/unified-test-format/invalid/collectionData-additionalProperties.json +++ b/test/unified-test-format/invalid/collectionData-additionalProperties.json @@ -18,8 +18,7 @@ "collection": { "id": "collection0", "database": "database0", - "collectionName": "foo", - "foo": 0 + "collectionName": "foo" } } ], diff --git a/test/unified-test-format/invalid/collectionData-collectionName-required.json b/test/unified-test-format/invalid/collectionData-collectionName-required.json index 040dd86a1c..5426418c88 100644 --- a/test/unified-test-format/invalid/collectionData-collectionName-required.json +++ b/test/unified-test-format/invalid/collectionData-collectionName-required.json @@ -18,8 +18,7 @@ "collection": { "id": "collection0", "database": "database0", - "collectionName": "foo", - "foo": 0 + "collectionName": "foo" } } ], diff --git a/test/unified-test-format/invalid/collectionData-collectionName-type.json b/test/unified-test-format/invalid/collectionData-collectionName-type.json index 676d822e5e..2a922de13e 100644 --- a/test/unified-test-format/invalid/collectionData-collectionName-type.json +++ b/test/unified-test-format/invalid/collectionData-collectionName-type.json @@ -18,8 +18,7 @@ "collection": { "id": "collection0", "database": "database0", - "collectionName": "foo", - "foo": 0 + "collectionName": "foo" } } ], diff --git a/test/unified-test-format/invalid/collectionData-createOptions-type.json b/test/unified-test-format/invalid/collectionData-createOptions-type.json new file mode 100644 index 0000000000..5b78bbcbb6 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-createOptions-type.json @@ -0,0 +1,39 @@ +{ + "description": "collectionData-createOptions-type", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": "foo", + "createOptions": 0, + "documents": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-databaseName-required.json b/test/unified-test-format/invalid/collectionData-databaseName-required.json index 7548f9d5be..8417801390 100644 --- a/test/unified-test-format/invalid/collectionData-databaseName-required.json +++ b/test/unified-test-format/invalid/collectionData-databaseName-required.json @@ -18,8 +18,7 @@ "collection": { "id": "collection0", "database": "database0", - "collectionName": "foo", - "foo": 0 + "collectionName": "foo" } } ], diff --git a/test/unified-test-format/invalid/collectionData-databaseName-type.json b/test/unified-test-format/invalid/collectionData-databaseName-type.json index ef719bbf6a..d3480e8034 100644 --- a/test/unified-test-format/invalid/collectionData-databaseName-type.json +++ b/test/unified-test-format/invalid/collectionData-databaseName-type.json @@ -18,8 +18,7 @@ "collection": { "id": "collection0", "database": "database0", - "collectionName": "foo", - "foo": 0 + "collectionName": "foo" } } ], diff --git a/test/unified-test-format/invalid/collectionData-documents-items.json b/test/unified-test-format/invalid/collectionData-documents-items.json index 2916718d50..beb5af61c4 100644 --- a/test/unified-test-format/invalid/collectionData-documents-items.json +++ b/test/unified-test-format/invalid/collectionData-documents-items.json @@ -18,8 +18,7 @@ "collection": { "id": "collection0", "database": "database0", - "collectionName": "foo", - "foo": 0 + "collectionName": "foo" } } ], diff --git a/test/unified-test-format/invalid/collectionData-documents-required.json b/test/unified-test-format/invalid/collectionData-documents-required.json index 7b8a7ead2a..4aadf9b159 100644 --- a/test/unified-test-format/invalid/collectionData-documents-required.json +++ b/test/unified-test-format/invalid/collectionData-documents-required.json @@ -18,8 +18,7 @@ "collection": { "id": "collection0", "database": "database0", - "collectionName": "foo", - "foo": 0 + "collectionName": "foo" } } ], diff --git a/test/unified-test-format/invalid/collectionData-documents-type.json b/test/unified-test-format/invalid/collectionData-documents-type.json index 953cabae6e..9cbd3c164c 100644 --- a/test/unified-test-format/invalid/collectionData-documents-type.json +++ b/test/unified-test-format/invalid/collectionData-documents-type.json @@ -18,8 +18,7 @@ "collection": { "id": "collection0", "database": "database0", - "collectionName": "foo", - "foo": 0 + "collectionName": "foo" } } ], diff --git a/test/unified-test-format/invalid/collectionOrDatabaseOptions-timeoutMS-type.json b/test/unified-test-format/invalid/collectionOrDatabaseOptions-timeoutMS-type.json new file mode 100644 index 0000000000..088e9d1eb2 --- /dev/null +++ b/test/unified-test-format/invalid/collectionOrDatabaseOptions-timeoutMS-type.json @@ -0,0 +1,27 @@ +{ + "description": "collectionOrDatabaseOptions-timeoutMS-type", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": { + "timeoutMS": 4.5 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-isTimeoutError-type.json b/test/unified-test-format/invalid/expectedError-isTimeoutError-type.json new file mode 100644 index 0000000000..5683911d0d --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-isTimeoutError-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-isTimeoutError-type", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isTimeoutError": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-ignoreExtraEvents-type.json b/test/unified-test-format/invalid/expectedEventsForClient-ignoreExtraEvents-type.json new file mode 100644 index 0000000000..965190664e --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-ignoreExtraEvents-type.json @@ -0,0 +1,24 @@ +{ + "description": "expectedEventsForClient-ignoreExtraEvents-type", + "schemaVersion": "1.7", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [], + "ignoreExtraEvents": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/collectionData-createOptions.json b/test/unified-test-format/valid-pass/collectionData-createOptions.json new file mode 100644 index 0000000000..07ab66baa0 --- /dev/null +++ b/test/unified-test-format/valid-pass/collectionData-createOptions.json @@ -0,0 +1,68 @@ +{ + "description": "collectionData-createOptions", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0", + "createOptions": { + "capped": true, + "size": 512 + }, + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "collection is created with the correct options", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collStats", + "command": { + "collStats": "coll0", + "scale": 1 + } + }, + "expectResult": { + "capped": true, + "maxSize": 512 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/createEntities-operation.json b/test/unified-test-format/valid-pass/createEntities-operation.json new file mode 100644 index 0000000000..3fde42919d --- /dev/null +++ b/test/unified-test-format/valid-pass/createEntities-operation.json @@ -0,0 +1,74 @@ +{ + "description": "createEntities-operation", + "schemaVersion": "1.9", + "tests": [ + { + "description": "createEntities operation", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll1" + } + } + ] + } + }, + { + "name": "deleteOne", + "object": "collection1", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll1", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ] + }, + "commandName": "delete", + "databaseName": "database1" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json b/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json new file mode 100644 index 0000000000..88fc28e34e --- /dev/null +++ b/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json @@ -0,0 +1,108 @@ +{ + "description": "entity-cursor-iterateOnce", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "iterateOnce", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateOnce", + "object": "cursor0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": "long" + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/matches-lte-operator.json b/test/unified-test-format/valid-pass/matches-lte-operator.json new file mode 100644 index 0000000000..4de65c5838 --- /dev/null +++ b/test/unified-test-format/valid-pass/matches-lte-operator.json @@ -0,0 +1,78 @@ +{ + "description": "matches-lte-operator", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "special lte matching operator", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "y": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "$$lte": 1 + }, + "y": { + "$$lte": 2 + } + } + ] + }, + "commandName": "insert", + "databaseName": "database0Name" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index 61c96d6021..cdba80c23e 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -42,6 +42,7 @@ from test.version import Version from typing import Any +import pymongo from bson import SON, Code, DBRef, Decimal128, Int64, MaxKey, MinKey, json_util from bson.binary import Binary from bson.objectid import ObjectId @@ -56,9 +57,13 @@ BulkWriteError, ConfigurationError, ConnectionFailure, + ExecutionTimeout, InvalidOperation, + NetworkTimeout, NotPrimaryError, PyMongoError, + ServerSelectionTimeoutError, + WriteConcernError, ) from pymongo.monitoring import ( _SENSITIVE_COMMANDS, @@ -198,11 +203,16 @@ def parse_bulk_write_error_result(error): class NonLazyCursor(object): """A find cursor proxy that creates the remote cursor when initialized.""" - def __init__(self, find_cursor): + def __init__(self, find_cursor, client): + self.client = client self.find_cursor = find_cursor # Create the server side cursor. self.first_result = next(find_cursor, None) + @property + def alive(self): + return self.first_result is not None or self.find_cursor.alive + def __next__(self): if self.first_result is not None: first = self.first_result @@ -210,8 +220,12 @@ def __next__(self): return first return next(self.find_cursor) + # Added to support the iterateOnce operation. + try_next = __next__ + def close(self): self.find_cursor.close() + self.client = None class EventListenerUtil(CMAPListener, CommandListener): @@ -520,6 +534,11 @@ def _operation_sessionLsid(self, spec, actual, key_to_compare): expected_lsid = self.test.entity_map.get_lsid_for_session(spec) self.test.assertEqual(expected_lsid, actual[key_to_compare]) + def _operation_lte(self, spec, actual, key_to_compare): + if key_to_compare not in actual: + self.test.fail(f"Actual command is missing the {key_to_compare} field: {spec}") + self.test.assertLessEqual(actual[key_to_compare], spec) + def _evaluate_special_operation(self, opname, spec, actual, key_to_compare): method_name = "_operation_%s" % (opname.strip("$"),) try: @@ -710,7 +729,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.7") + SCHEMA_VERSION = Version.from_string("1.9") RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True TEST_SPEC: Any @@ -730,6 +749,7 @@ def insert_initial_data(self, initial_data): for i, collection_data in enumerate(initial_data): coll_name = collection_data["collectionName"] db_name = collection_data["databaseName"] + opts = collection_data.get("createOptions", {}) documents = collection_data["documents"] # Setup the collection with as few majority writes as possible. @@ -741,10 +761,12 @@ def insert_initial_data(self, initial_data): else: wc = WriteConcern(w=1) if documents: + if opts: + db.create_collection(coll_name, **opts) db.get_collection(coll_name, write_concern=wc).insert_many(documents) else: # Ensure collection exists - db.create_collection(coll_name, write_concern=wc) + db.create_collection(coll_name, write_concern=wc, **opts) @classmethod def setUpClass(cls): @@ -782,9 +804,26 @@ def maybe_skip_test(self, spec): "Dirty explicit session is discarded" in spec["description"] or "Dirty implicit session is discarded" in spec["description"] ): - raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") + self.skipTest("MMAPv1 does not support retryWrites=True") elif "Client side error in command starting transaction" in spec["description"]: - raise unittest.SkipTest("Implement PYTHON-1894") + self.skipTest("Implement PYTHON-1894") + class_name = self.__class__.__name__.lower() + description = spec["description"].lower() + if "csot" in class_name: + if "change" in description or "change" in class_name: + self.skipTest("CSOT not implemented for watch()") + if "cursors" in class_name: + self.skipTest("CSOT not implemented for cursors") + if "tailable" in class_name: + self.skipTest("CSOT not implemented for tailable cursors") + if "sessions" in class_name: + self.skipTest("CSOT not implemented for sessions") + if "withtransaction" in description: + self.skipTest("CSOT not implemented for with_transaction") + if "transaction" in class_name or "transaction" in description: + self.skipTest("CSOT not implemented for transactions") + if "socket timeout" in description: + self.skipTest("CSOT not implemented for socket timeouts") # Some tests need to be skipped based on the operations they try to run. for op in spec["operations"]: @@ -801,10 +840,21 @@ def maybe_skip_test(self, spec): if not client_context.test_commands_enabled: if name == "failPoint" or name == "targetedFailPoint": self.skipTest("Test commands must be enabled to use fail points") + if "timeoutMode" in op.get("arguments", {}): + self.skipTest("PyMongo does not support timeoutMode") + if name == "createEntities": + self.maybe_skip_entity(op.get("arguments", {}).get("entities", [])) + + def maybe_skip_entity(self, entities): + for entity in entities: + entity_type = next(iter(entity)) + if entity_type == "bucket": + self.skipTest("GridFS is not currently supported (PYTHON-2459)") def process_error(self, exception, spec): is_error = spec.get("isError") is_client_error = spec.get("isClientError") + is_timeout_error = spec.get("isTimeoutError") error_contains = spec.get("errorContains") error_code = spec.get("errorCode") error_code_name = spec.get("errorCodeName") @@ -825,6 +875,15 @@ def process_error(self, exception, spec): else: self.assertNotIsInstance(exception, PyMongoError) + if is_timeout_error: + # TODO: PYTHON-3291 Implement error transformation. + if isinstance(exception, WriteConcernError): + self.assertEqual(exception.code, 50) + else: + self.assertIsInstance( + exception, (NetworkTimeout, ExecutionTimeout, ServerSelectionTimeoutError) + ) + if error_contains: if isinstance(exception, BulkWriteError): errmsg = str(exception.details).lower() @@ -925,15 +984,21 @@ def _collectionOperation_createFindCursor(self, target, *args, **kwargs): self.__raise_if_unsupported("find", target, Collection) if "filter" not in kwargs: self.fail('createFindCursor requires a "filter" argument') - cursor = NonLazyCursor(target.find(*args, **kwargs)) + cursor = NonLazyCursor(target.find(*args, **kwargs), target.database.client) self.addCleanup(cursor.close) return cursor + def _collectionOperation_count(self, target, *args, **kwargs): + self.skipTest("PyMongo does not support collection.count()") + def _collectionOperation_listIndexes(self, target, *args, **kwargs): if "batch_size" in kwargs: self.skipTest("PyMongo does not support batch_size for list_indexes") return target.list_indexes(*args, **kwargs) + def _collectionOperation_listIndexNames(self, target, *args, **kwargs): + self.skipTest("PyMongo does not support list_index_names") + def _sessionOperation_withTransaction(self, target, *args, **kwargs): if client_context.storage_engine == "mmapv1": self.skipTest("MMAPv1 does not support document-level locking") @@ -946,13 +1011,21 @@ def _sessionOperation_startTransaction(self, target, *args, **kwargs): self.__raise_if_unsupported("startTransaction", target, ClientSession) return target.start_transaction(*args, **kwargs) + def _cursor_iterateOnce(self, target, *args, **kwargs): + self.__raise_if_unsupported("iterateOnce", target, NonLazyCursor, ChangeStream) + return target.try_next() + def _changeStreamOperation_iterateUntilDocumentOrError(self, target, *args, **kwargs): self.__raise_if_unsupported("iterateUntilDocumentOrError", target, ChangeStream) return next(target) def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs): self.__raise_if_unsupported("iterateUntilDocumentOrError", target, NonLazyCursor) - return next(target) + while target.alive: + try: + return next(target) + except StopIteration: + pass def _cursor_close(self, target, *args, **kwargs): self.__raise_if_unsupported("close", target, NonLazyCursor) @@ -960,6 +1033,7 @@ def _cursor_close(self, target, *args, **kwargs): def run_entity_operation(self, spec): target = self.entity_map[spec["object"]] + client = target opname = spec["name"] opargs = spec.get("arguments") expect_error = spec.get("expectError") @@ -977,20 +1051,26 @@ def run_entity_operation(self, spec): spec, arguments, camel_to_snake(opname), self.entity_map, self.run_operations ) else: - arguments = tuple() + arguments = {} if isinstance(target, MongoClient): method_name = "_clientOperation_%s" % (opname,) + client = target elif isinstance(target, Database): method_name = "_databaseOperation_%s" % (opname,) + client = target.client elif isinstance(target, Collection): method_name = "_collectionOperation_%s" % (opname,) + client = target.database.client elif isinstance(target, ChangeStream): method_name = "_changeStreamOperation_%s" % (opname,) + client = target._client elif isinstance(target, NonLazyCursor): method_name = "_cursor_%s" % (opname,) + client = target.client elif isinstance(target, ClientSession): method_name = "_sessionOperation_%s" % (opname,) + client = target._client elif isinstance(target, GridFSBucket): raise NotImplementedError else: @@ -1007,7 +1087,17 @@ def run_entity_operation(self, spec): cmd = functools.partial(method, target) try: - result = cmd(**dict(arguments)) + # TODO: PYTHON-3289 apply inherited timeout by default. + inherit_timeout = getattr(target, "timeout", None) + # CSOT: Translate the spec test "timeout" arg into pymongo's context timeout API. + if "timeout" in arguments or inherit_timeout is not None: + timeout = arguments.pop("timeout", None) + if timeout is None: + timeout = inherit_timeout + with pymongo.timeout(timeout): + result = cmd(**dict(arguments)) + else: + result = cmd(**dict(arguments)) except Exception as exc: # Ignore all operation errors but to avoid masking bugs don't # ignore things like TypeError and ValueError. @@ -1057,6 +1147,9 @@ def _testOperation_targetedFailPoint(self, spec): self.addCleanup(client.close) self.__set_fail_point(client=client, command_args=spec["failPoint"]) + def _testOperation_createEntities(self, spec): + self.entity_map.create_entities_from_spec(spec["entities"], uri=self._uri) + def _testOperation_assertSessionTransactionState(self, spec): session = self.entity_map[spec["session"]] expected_state = getattr(_TxnState, spec["state"].upper()) @@ -1245,6 +1338,7 @@ def run_scenario(self, spec, uri=None): raise unittest.SkipTest("%s" % (skip_reason,)) # process createEntities + self._uri = uri self.entity_map = EntityMapUtil(self) self.entity_map.create_entities_from_spec(self.TEST_SPEC.get("createEntities", []), uri=uri) # process initialData @@ -1309,7 +1403,7 @@ def generate_test_classes( class_name_prefix="", expected_failures=[], # noqa: B006 bypass_test_generation_errors=False, - **kwargs + **kwargs, ): """Method for generating test classes. Returns a dictionary where keys are the names of test classes and values are the test class objects.""" diff --git a/test/uri_options/connection-options.json b/test/uri_options/connection-options.json index 8bb05cc721..b2669b6cf1 100644 --- a/test/uri_options/connection-options.json +++ b/test/uri_options/connection-options.json @@ -2,7 +2,7 @@ "tests": [ { "description": "Valid connection and timeout options are parsed correctly", - "uri": "mongodb://example.com/?appname=URI-OPTIONS-SPEC-TEST&connectTimeoutMS=20000&heartbeatFrequencyMS=5000&localThresholdMS=3000&maxIdleTimeMS=50000&replicaSet=uri-options-spec&retryWrites=true&serverSelectionTimeoutMS=15000&socketTimeoutMS=7500", + "uri": "mongodb://example.com/?appname=URI-OPTIONS-SPEC-TEST&connectTimeoutMS=20000&heartbeatFrequencyMS=5000&localThresholdMS=3000&maxIdleTimeMS=50000&replicaSet=uri-options-spec&retryWrites=true&serverSelectionTimeoutMS=15000&socketTimeoutMS=7500&timeoutMS=100", "valid": true, "warning": false, "hosts": null, @@ -16,7 +16,8 @@ "replicaSet": "uri-options-spec", "retryWrites": true, "serverSelectionTimeoutMS": 15000, - "socketTimeoutMS": 7500 + "socketTimeoutMS": 7500, + "timeoutMS": 100 } }, { @@ -238,6 +239,35 @@ "hosts": null, "auth": null, "options": {} + }, + { + "description": "timeoutMS=0", + "uri": "mongodb://example.com/?timeoutMS=0", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "timeoutMS": 0 + } + }, + { + "description": "Non-numeric timeoutMS causes a warning", + "uri": "mongodb://example.com/?timeoutMS=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "Too low timeoutMS causes a warning", + "uri": "mongodb://example.com/?timeoutMS=-2", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} } ] } diff --git a/test/uri_options/tls-options.json b/test/uri_options/tls-options.json index edf6042943..8beaaddd86 100644 --- a/test/uri_options/tls-options.json +++ b/test/uri_options/tls-options.json @@ -44,15 +44,6 @@ "tlsAllowInvalidCertificates": true } }, - { - "description": "Invalid tlsAllowInvalidCertificates causes a warning", - "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=invalid", - "valid": true, - "warning": true, - "hosts": null, - "auth": null, - "options": {} - }, { "description": "tlsAllowInvalidHostnames is parsed correctly", "uri": "mongodb://example.com/?tlsAllowInvalidHostnames=true", diff --git a/test/utils.py b/test/utils.py index 03985772a0..1aeb7571ab 100644 --- a/test/utils.py +++ b/test/utils.py @@ -35,6 +35,7 @@ from bson.son import SON from pymongo import MongoClient, monitoring, operations, read_preferences from pymongo.collection import ReturnDocument +from pymongo.cursor import CursorType from pymongo.errors import ConfigurationError, OperationFailure from pymongo.hello import HelloCompat from pymongo.monitoring import _SENSITIVE_COMMANDS @@ -651,6 +652,9 @@ def parse_collection_options(opts): if "readConcern" in opts: opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) + + if "timeoutMS" in opts: + opts["timeout"] = int(opts.pop("timeoutMS")) / 1000.0 return opts @@ -988,6 +992,10 @@ def parse_spec_options(opts): if "readConcern" in opts: opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) + if "timeoutMS" in opts: + assert isinstance(opts["timeoutMS"], int) + opts["timeout"] = int(opts.pop("timeoutMS")) / 1000.0 + if "maxTimeMS" in opts: opts["max_time_ms"] = opts.pop("maxTimeMS") @@ -1041,6 +1049,8 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac # Aggregate uses "batchSize", while find uses batch_size. elif (arg_name == "batchSize" or arg_name == "allowDiskUse") and opname == "aggregate": continue + elif arg_name == "timeoutMode": + raise unittest.SkipTest("PyMongo does not support timeoutMode") # Requires boolean returnDocument. elif arg_name == "returnDocument": arguments[c2s] = getattr(ReturnDocument, arguments.pop(arg_name).upper()) @@ -1090,5 +1100,13 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac arguments["index_or_name"] = arguments.pop(arg_name) elif opname == "rename" and arg_name == "to": arguments["new_name"] = arguments.pop(arg_name) + elif arg_name == "cursorType": + cursor_type = arguments.pop(arg_name) + if cursor_type == "tailable": + arguments["cursor_type"] = CursorType.TAILABLE + elif cursor_type == "tailableAwait": + arguments["cursor_type"] = CursorType.TAILABLE + else: + assert False, f"Unsupported cursorType: {cursor_type}" else: arguments[c2s] = arguments.pop(arg_name) From 70cfe460639e66b00395567a2f441db705c7a1a9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Jun 2022 14:29:07 -0400 Subject: [PATCH 0164/1588] PYTHON-3290 Support nested pymongo.timeout() calls (#962) --- pymongo/__init__.py | 12 ++++++++++++ pymongo/_csot.py | 25 +++++++++++++++---------- pymongo/topology.py | 3 ++- test/test_csot.py | 43 ++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 71 insertions(+), 12 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index bdb1ec97c1..9e877e9551 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -127,6 +127,18 @@ def timeout(seconds: Optional[float]) -> ContextManager: NetworkTimeout) as exc: print(f"block timed out: {exc!r}") + When nesting :func:`~pymongo.timeout`, the nested block overrides the + timeout. When exiting the block, the previous deadline is restored:: + + with pymongo.timeout(5): + coll.find_one() # Uses the 5 second deadline. + with pymongo.timeout(3): + coll.find_one() # Uses the 3 second deadline. + coll.find_one() # Uses the original 5 second deadline. + with pymongo.timeout(10): + coll.find_one() # Uses the 10 second deadline. + coll.find_one() # Uses the original 5 second deadline. + :Parameters: - `seconds`: A non-negative floating point number expressing seconds, or None. diff --git a/pymongo/_csot.py b/pymongo/_csot.py index 4085562ca8..f1601f75d2 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -15,8 +15,8 @@ """Internal helpers for CSOT.""" import time -from contextvars import ContextVar -from typing import Optional +from contextvars import ContextVar, Token +from typing import Optional, Tuple TIMEOUT: ContextVar[Optional[float]] = ContextVar("TIMEOUT", default=None) RTT: ContextVar[float] = ContextVar("RTT", default=0.0) @@ -39,11 +39,6 @@ def set_rtt(rtt: float) -> None: RTT.set(rtt) -def set_timeout(timeout: Optional[float]) -> None: - TIMEOUT.set(timeout) - DEADLINE.set(time.monotonic() + timeout if timeout else float("inf")) - - def remaining() -> Optional[float]: if not get_timeout(): return None @@ -67,14 +62,24 @@ class _TimeoutContext(object): client.test.test.insert_one({}) """ - __slots__ = ("_timeout",) + __slots__ = ("_timeout", "_tokens") def __init__(self, timeout: Optional[float]): self._timeout = timeout + self._tokens: Optional[Tuple[Token, Token, Token]] = None def __enter__(self): - set_timeout(self._timeout) + timeout_token = TIMEOUT.set(self._timeout) + deadline_token = DEADLINE.set( + time.monotonic() + self._timeout if self._timeout else float("inf") + ) + rtt_token = RTT.set(0.0) + self._tokens = (timeout_token, deadline_token, rtt_token) return self def __exit__(self, exc_type, exc_val, exc_tb): - set_timeout(None) + if self._tokens: + timeout_token, deadline_token, rtt_token = self._tokens + TIMEOUT.reset(timeout_token) + DEADLINE.reset(deadline_token) + RTT.reset(rtt_token) diff --git a/pymongo/topology.py b/pymongo/topology.py index db832a8e55..4e82a41228 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -270,7 +270,8 @@ def _select_server(self, selector, server_selection_timeout=None, address=None): def select_server(self, selector, server_selection_timeout=None, address=None): """Like select_servers, but choose a random server if several match.""" server = self._select_server(selector, server_selection_timeout, address) - _csot.set_rtt(server.description.round_trip_time) + if _csot.get_timeout(): + _csot.set_rtt(server.description.round_trip_time) return server def select_server_by_address(self, address, server_selection_timeout=None): diff --git a/test/test_csot.py b/test/test_csot.py index 5c7833467f..d00f8c2916 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -19,14 +19,55 @@ sys.path[0:0] = [""] -from test import unittest +from test import IntegrationTest, unittest from test.unified_format import generate_test_classes +import pymongo +from pymongo import _csot + # Location of JSON test specifications. TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "csot") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +class TestCSOT(IntegrationTest): + def test_timeout_nested(self): + coll = self.db.coll + self.assertEqual(_csot.get_timeout(), None) + self.assertEqual(_csot.get_deadline(), float("inf")) + self.assertEqual(_csot.get_rtt(), 0.0) + with pymongo.timeout(10): + coll.find_one() + self.assertEqual(_csot.get_timeout(), 10) + deadline_10 = _csot.get_deadline() + + with pymongo.timeout(15): + coll.find_one() + self.assertEqual(_csot.get_timeout(), 15) + self.assertGreater(_csot.get_deadline(), deadline_10) + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), 10) + self.assertEqual(_csot.get_deadline(), deadline_10) + coll.find_one() + + with pymongo.timeout(5): + coll.find_one() + self.assertEqual(_csot.get_timeout(), 5) + self.assertLess(_csot.get_deadline(), deadline_10) + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), 10) + self.assertEqual(_csot.get_deadline(), deadline_10) + coll.find_one() + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), None) + self.assertEqual(_csot.get_deadline(), float("inf")) + self.assertEqual(_csot.get_rtt(), 0.0) + + if __name__ == "__main__": unittest.main() From 09b18244ccb80c58dc203208cf1ca04f7381f8f7 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Jun 2022 15:46:13 -0400 Subject: [PATCH 0165/1588] PYTHON-3293 Document Queryable Encryption API is in beta (#965) --- doc/changelog.rst | 3 ++- pymongo/collection.py | 15 ++++++++++++++- pymongo/database.py | 16 ++++++++++------ pymongo/encryption.py | 21 +++++++++++++++++---- pymongo/encryption_options.py | 12 ++++++++---- 5 files changed, 51 insertions(+), 16 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 5497b4f3e9..c53ec2201a 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -9,9 +9,10 @@ Changes in Version 4.2 PyMongo 4.2 brings a number of improvements including: - Support for MongoDB 6.0. +- Support for the Queryable Encryption beta with MongoDB 6.0. Note that backwards-breaking + changes may be made before the final release. - Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout to an entire block of pymongo operations. -- Beta support for Queryable Encryption with MongoDB 6.0. Bug fixes ......... diff --git a/pymongo/collection.py b/pymongo/collection.py index 9f3f73198b..27550e0fb3 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -159,9 +159,14 @@ def __init__( - `session` (optional): a :class:`~pymongo.client_session.ClientSession` that is used with the create collection command + - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. If provided it will be passed to the create collection command. - `**kwargs` (optional): additional keyword arguments will be passed as options for the create collection command + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + .. versionchanged:: 4.0 Removed the reindex, map_reduce, inline_map_reduce, parallel_scan, initialize_unordered_bulk_op, @@ -1156,6 +1161,7 @@ def drop( self, session: Optional["ClientSession"] = None, comment: Optional[Any] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, ) -> None: """Alias for :meth:`~pymongo.database.Database.drop_collection`. @@ -1164,12 +1170,17 @@ def drop( :class:`~pymongo.client_session.ClientSession`. - `comment` (optional): A user-provided comment to attach to this command. + - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. The following two calls are equivalent: >>> db.foo.drop() >>> db.drop_collection("foo") + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + .. versionchanged:: 4.1 Added ``comment`` parameter. @@ -1186,7 +1197,9 @@ def drop( self.write_concern, self.read_concern, ) - dbo.drop_collection(self.__name, session=session, comment=comment) + dbo.drop_collection( + self.__name, session=session, comment=comment, encrypted_fields=encrypted_fields + ) def _delete( self, diff --git a/pymongo/database.py b/pymongo/database.py index 393f63c8c8..c9447c1a77 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -336,9 +336,8 @@ def create_collection( :class:`~pymongo.collation.Collation`. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - - `encrypted_fields`: Document that describes the encrypted fields for Queryable - Encryption. - For example:: + - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: { "escCollection": "enxcol_.encryptedCollection.esc", @@ -391,6 +390,9 @@ def create_collection( - ``comment`` (str): a user-provided comment to attach to this command. This option is only supported on MongoDB >= 4.4. + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + .. versionchanged:: 3.11 This method is now supported inside multi-document transactions with MongoDB 4.4+. @@ -955,9 +957,8 @@ def drop_collection( :class:`~pymongo.client_session.ClientSession`. - `comment` (optional): A user-provided comment to attach to this command. - - `encrypted_fields`: Document that describes the encrypted fields for Queryable - Encryption. - For example:: + - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: { "escCollection": "enxcol_.encryptedCollection.esc", @@ -983,6 +984,9 @@ def drop_collection( .. note:: The :attr:`~pymongo.database.Database.write_concern` of this database is automatically applied to this operation. + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + .. versionchanged:: 4.1 Added ``comment`` parameter. diff --git a/pymongo/encryption.py b/pymongo/encryption.py index a088bd2da8..0a8bf69a38 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -379,17 +379,26 @@ class Algorithm(str, enum.Enum): INDEXED = "Indexed" """Indexed. + .. note:: Support for Queryable Encryption is in beta. + Backwards-breaking changes may be made before the final release. + .. versionadded:: 4.2 """ UNINDEXED = "Unindexed" """Unindexed. + .. note:: Support for Queryable Encryption is in beta. + Backwards-breaking changes may be made before the final release. + .. versionadded:: 4.2 """ class QueryType(enum.IntEnum): - """An enum that defines the supported values for explicit encryption query_type. + """**(BETA)** An enum that defines the supported values for explicit encryption query_type. + + .. note:: Support for Queryable Encryption is in beta. + Backwards-breaking changes may be made before the final release. .. versionadded:: 4.2 """ @@ -606,13 +615,17 @@ def encrypt( :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - `key_alt_name`: Identifies a key vault document by 'keyAltName'. - - `index_key_id`: The index key id to use for Queryable Encryption. Must be + - `index_key_id`: **(BETA)** The index key id to use for Queryable Encryption. Must be a :class:`~bson.binary.Binary` with subtype 4 (:attr:`~bson.binary.UUID_SUBTYPE`). - - `query_type` (int): The query type to execute. See + - `query_type` (int): **(BETA)** The query type to execute. See :class:`QueryType` for valid options. - - `contention_factor` (int): The contention factor to use + - `contention_factor` (int): **(BETA)** The contention factor to use when the algorithm is :attr:`Algorithm.INDEXED`. + .. note:: `index_key_id`, `query_type`, and `contention_factor` are part of the + Queryable Encryption beta. Backwards-breaking changes may be made before the + final release. + :Returns: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index eedc2ee23c..c5e6f47837 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -148,12 +148,12 @@ def __init__( - `crypt_shared_lib_path` (optional): Override the path to load the crypt_shared library. - `crypt_shared_lib_required` (optional): If True, raise an error if libmongocrypt is unable to load the crypt_shared library. - - `bypass_query_analysis` (optional): If ``True``, disable automatic analysis of - outgoing commands. Set `bypass_query_analysis` to use explicit + - `bypass_query_analysis` (optional): **(BETA)** If ``True``, disable automatic analysis + of outgoing commands. Set `bypass_query_analysis` to use explicit encryption on indexed fields without the MongoDB Enterprise Advanced licensed crypt_shared library. - - `encrypted_fields_map`: Map of collection namespace ("db.coll") to documents that - described the encrypted fields for Queryable Encryption. For example:: + - `encrypted_fields_map`: **(BETA)** Map of collection namespace ("db.coll") to documents + that described the encrypted fields for Queryable Encryption. For example:: { "db.encryptedCollection": { @@ -176,6 +176,10 @@ def __init__( } } + .. note:: `bypass_query_analysis` and `encrypted_fields_map` are part of the + Queryable Encryption beta. Backwards-breaking changes may be made before the + final release. + .. versionchanged:: 4.2 Added `encrypted_fields_map` `crypt_shared_lib_path`, `crypt_shared_lib_required`, and `bypass_query_analysis` parameters. From 3e8487826a05ff9d891d57e62ac35206a9bb622e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 7 Jun 2022 17:06:54 -0400 Subject: [PATCH 0166/1588] PYTHON-3294 Depend on PyMongoCrypt 1.3.0b0 tag for beta (#963) --- setup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 40fb484ad1..655cc5ea0c 100755 --- a/setup.py +++ b/setup.py @@ -281,7 +281,9 @@ def build_extension(self, ext): pyopenssl_reqs.append("certifi") extras_require = { - "encryption": ["pymongocrypt>=1.2.0,<2.0.0"], + "encryption": [ + "pymongocrypt@git+ssh://git@github.com/mongodb/libmongocrypt.git@pymongocrypt-1.3.0b0#subdirectory=bindings/python" + ], "ocsp": pyopenssl_reqs, "snappy": ["python-snappy"], "zstd": ["zstandard"], From 77ace9a988051a1967671e8ca4baa93c06ca98c6 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 7 Jun 2022 17:29:51 -0400 Subject: [PATCH 0167/1588] PYTHON-3299 Add Automatic Queryable Encryption Example to Docs (#964) --- doc/changelog.rst | 3 +- doc/conf.py | 1 + doc/examples/encryption.rst | 73 +++++++++++++++++++++++++++++++++++++ 3 files changed, 76 insertions(+), 1 deletion(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index c53ec2201a..b2fcb7fa24 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -10,7 +10,7 @@ PyMongo 4.2 brings a number of improvements including: - Support for MongoDB 6.0. - Support for the Queryable Encryption beta with MongoDB 6.0. Note that backwards-breaking - changes may be made before the final release. + changes may be made before the final release. See :ref:`automatic-queryable-client-side-encryption` for example usage. - Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout to an entire block of pymongo operations. @@ -41,6 +41,7 @@ in this release. .. _PYTHON-2885: https://jira.mongodb.org/browse/PYTHON-2885 .. _PYTHON-3167: https://jira.mongodb.org/browse/PYTHON-3167 .. _PyMongo 4.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33196 +.. _Queryable Encryption: automatic-queryable-client-side-encryption Changes in Version 4.1.1 ------------------------- diff --git a/doc/conf.py b/doc/conf.py index 7b1580de32..ff330b59a4 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -85,6 +85,7 @@ # so this link results in a 404. linkcheck_ignore = [ "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check", + "https://github.com/mongodb/libmongocrypt/blob/master/bindings/python/README.rst#installing-from-source", r"https://wiki.centos.org/[\w/]*", ] diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index e86eb7733d..5568b0d741 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -336,6 +336,79 @@ data key and create a collection with the if __name__ == "__main__": main() +.. _automatic-queryable-client-side-encryption: + +Automatic Queryable Encryption (Beta) +````````````````````````````````````` + +PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB 6.0. + +Queryable Encryption is the second version of Client-Side Field Level Encryption. +Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, +which are further processed server-side. + +You must have MongoDB 6.0rc8+ Enterprise to preview the capability. + +Until PyMongo 4.2 release is finalized, it can be installed using:: + + pip install "pymongo@git+ssh://git@github.com/mongodb/mongo-python-driver.git@4.2.0b0#egg=pymongo[encryption]" + +Additionally, ``libmongocrypt`` must be installed from `source `_. + +Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, as demonstrated by the following example:: + + import os + from bson.codec_options import CodecOptions + from pymongo import MongoClient + from pymongo.encryption import Algorithm, ClientEncryption, QueryType + from pymongo.encryption_options import AutoEncryptionOpts + + + local_master_key = os.urandom(96) + kms_providers = {"local": {"key": local_master_key}} + key_vault_namespace = "keyvault.datakeys" + key_vault_client = MongoClient() + client_encryption = ClientEncryption( + kms_providers, key_vault_namespace, key_vault_client, CodecOptions() + ) + key_vault = key_vault_client["keyvault"]["datakeys"] + key_vault.drop() + key1_id = client_encryption.create_data_key("local", key_alt_names=["firstName"]) + key2_id = client_encryption.create_data_key("local", key_alt_names=["lastName"]) + + encrypted_fields_map = { + "default.encryptedCollection": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": key1_id, + "queries": [{"queryType": "equality"}], + }, + { + "path": "lastName", + "bsonType": "string", + "keyId": key2_id, + } + ] + } + } + + auto_encryption_opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace, encrypted_fields_map=encrypted_fields_map) + client = MongoClient(auto_encryption_opts=auto_encryption_opts) + client.default.drop_collection('encryptedCollection') + coll = client.default.create_collection('encryptedCollection') + coll.insert_one({ "_id": 1, "firstName": "Jane", "lastName": "Doe" }) + docs = list(coll.find({"firstName": "Jane"})) + print(docs) + +In the above example, the ``firstName`` and ``lastName`` fields are +automatically encrypted and decrypted. + .. _explicit-client-side-encryption: Explicit Encryption From a6ae852c364ed373392b4e7cae2994d83b4514e4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Jun 2022 17:40:46 -0400 Subject: [PATCH 0168/1588] PYTHON-3290 Nested pymongo.timeout() calls only shorten the deadline (#966) --- pymongo/__init__.py | 7 ++++--- pymongo/_csot.py | 6 +++--- test/test_csot.py | 3 ++- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 9e877e9551..801d466c2e 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -127,8 +127,9 @@ def timeout(seconds: Optional[float]) -> ContextManager: NetworkTimeout) as exc: print(f"block timed out: {exc!r}") - When nesting :func:`~pymongo.timeout`, the nested block overrides the - timeout. When exiting the block, the previous deadline is restored:: + When nesting :func:`~pymongo.timeout`, the newly computed deadline is capped to at most + the existing deadline. The deadline can only be shortened, not extended. + When exiting the block, the previous deadline is restored:: with pymongo.timeout(5): coll.find_one() # Uses the 5 second deadline. @@ -136,7 +137,7 @@ def timeout(seconds: Optional[float]) -> ContextManager: coll.find_one() # Uses the 3 second deadline. coll.find_one() # Uses the original 5 second deadline. with pymongo.timeout(10): - coll.find_one() # Uses the 10 second deadline. + coll.find_one() # Still uses the original 5 second deadline. coll.find_one() # Uses the original 5 second deadline. :Parameters: diff --git a/pymongo/_csot.py b/pymongo/_csot.py index f1601f75d2..ddd4e9233f 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -70,9 +70,9 @@ def __init__(self, timeout: Optional[float]): def __enter__(self): timeout_token = TIMEOUT.set(self._timeout) - deadline_token = DEADLINE.set( - time.monotonic() + self._timeout if self._timeout else float("inf") - ) + prev_deadline = DEADLINE.get() + next_deadline = time.monotonic() + self._timeout if self._timeout else float("inf") + deadline_token = DEADLINE.set(min(prev_deadline, next_deadline)) rtt_token = RTT.set(0.0) self._tokens = (timeout_token, deadline_token, rtt_token) return self diff --git a/test/test_csot.py b/test/test_csot.py index d00f8c2916..290851159d 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -43,10 +43,11 @@ def test_timeout_nested(self): self.assertEqual(_csot.get_timeout(), 10) deadline_10 = _csot.get_deadline() + # Capped at the original 10 deadline. with pymongo.timeout(15): coll.find_one() self.assertEqual(_csot.get_timeout(), 15) - self.assertGreater(_csot.get_deadline(), deadline_10) + self.assertEqual(_csot.get_deadline(), deadline_10) # Should be reset to previous values self.assertEqual(_csot.get_timeout(), 10) From 83ade52b1b7eef0ed526a3180b507338dd6c74b2 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 7 Jun 2022 20:24:12 -0400 Subject: [PATCH 0169/1588] bump to 4.2.0b0 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 801d466c2e..62139dac11 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -55,7 +55,7 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, ".dev1") +version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, "b0") def get_version_string() -> str: diff --git a/setup.py b/setup.py index 655cc5ea0c..0d77c7c720 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.2.0.dev1" +version = "4.2.0b0" f = open("README.rst") try: From e59a11ef4bceaab4da2e81bfca0713e4a333c296 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 7 Jun 2022 20:24:59 -0400 Subject: [PATCH 0170/1588] back to dev version --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 62139dac11..30bfc2bdf7 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -55,7 +55,7 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, "b0") +version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, ".dev2") def get_version_string() -> str: diff --git a/setup.py b/setup.py index 0d77c7c720..a2df4fac67 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.2.0b0" +version = "4.2.0.dev2" f = open("README.rst") try: From b8653b018d0f16f2f60640a633c832939fffccbe Mon Sep 17 00:00:00 2001 From: Atiab Bin Zakaria <61742543+atiabbz@users.noreply.github.com> Date: Thu, 9 Jun 2022 23:10:03 +0800 Subject: [PATCH 0171/1588] Remove extra period in `is_mongos` documentation (#967) --- pymongo/mongo_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 7af4b167f1..5e4cf0d754 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1080,7 +1080,7 @@ def is_primary(self) -> bool: def is_mongos(self) -> bool: """If this client is connected to mongos. If the client is not connected, this will block until a connection is established or raise - ServerSelectionTimeoutError if no server is available.. + ServerSelectionTimeoutError if no server is available. """ return self._server_property("server_type") == SERVER_TYPE.Mongos From be3008aa11f51c692ab903e744d305e8e230d5df Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Mon, 13 Jun 2022 11:42:41 -0700 Subject: [PATCH 0172/1588] PYTHON-2110 Refactored some C to avoid symbol conflicts (#968) * Refactored to avoid symbol conflicts * Forgot a replacement * Found a symbol * Undid symbol replacement for PyInit__cmessage * Changed cbson too Co-authored-by: Ben Warner --- bson/_cbsonmodule.c | 90 +++++++++++++++---------------- bson/buffer.c | 14 ++--- bson/buffer.h | 14 ++--- bson/encoding_helpers.c | 2 +- bson/encoding_helpers.h | 2 +- bson/time64.c | 64 +++++++++++----------- bson/time64.h | 16 +++--- doc/contributors.rst | 1 + pymongo/_cmessagemodule.c | 108 +++++++++++++++++++------------------- 9 files changed, 156 insertions(+), 155 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 1a296db527..191ce9886f 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -153,7 +153,7 @@ static PyObject* datetime_from_millis(long long millis) { int microseconds = diff * 1000; Time64_T seconds = (millis - diff) / 1000; struct TM timeinfo; - gmtime64_r(&seconds, &timeinfo); + cbson_gmtime64_r(&seconds, &timeinfo); return PyDateTime_FromDateAndTime(timeinfo.tm_year + 1900, timeinfo.tm_mon + 1, @@ -175,14 +175,14 @@ static long long millis_from_datetime(PyObject* datetime) { timeinfo.tm_min = PyDateTime_DATE_GET_MINUTE(datetime); timeinfo.tm_sec = PyDateTime_DATE_GET_SECOND(datetime); - millis = timegm64(&timeinfo) * 1000; + millis = cbson_timegm64(&timeinfo) * 1000; millis += PyDateTime_DATE_GET_MICROSECOND(datetime) / 1000; return millis; } /* Just make this compatible w/ the old API. */ int buffer_write_bytes(buffer_t buffer, const char* data, int size) { - if (buffer_write(buffer, data, size)) { + if (pymongo_buffer_write(buffer, data, size)) { return 0; } return 1; @@ -207,7 +207,7 @@ void buffer_write_int32_at_position(buffer_t buffer, int position, int32_t data) { uint32_t data_le = BSON_UINT32_TO_LE(data); - memcpy(buffer_get_buffer(buffer) + position, &data_le, 4); + memcpy(pymongo_buffer_get_buffer(buffer) + position, &data_le, 4); } static int write_unicode(buffer_t buffer, PyObject* py_string) { @@ -419,7 +419,7 @@ static long _type_marker(PyObject* object) { * Return 1 on success. options->document_class is a new reference. * Return 0 on failure. */ -int convert_type_registry(PyObject* registry_obj, type_registry_t* registry) { +int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registry) { registry->encoder_map = NULL; registry->decoder_map = NULL; registry->fallback_encoder = NULL; @@ -481,7 +481,7 @@ int convert_codec_options(PyObject* options_obj, void* p) { return 0; } - if (!convert_type_registry(type_registry_obj, + if (!cbson_convert_type_registry(type_registry_obj, &options->type_registry)) { return 0; } @@ -597,7 +597,7 @@ static int _write_regex_to_buffer( Py_DECREF(encoded_pattern); return 0; } - status = check_string((const unsigned char*)pattern_data, + status = cbson_check_string((const unsigned char*)pattern_data, pattern_length, check_utf8, 1); if (status == NOT_UTF_8) { PyObject* InvalidStringData = _error("InvalidStringData"); @@ -649,7 +649,7 @@ static int _write_regex_to_buffer( if (!buffer_write_bytes(buffer, flags, flags_length)) { return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x0B; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0B; return 1; } @@ -687,7 +687,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, const char* data; int size; - *(buffer_get_buffer(buffer) + type_byte) = 0x05; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x05; subtype_object = PyObject_GetAttrString(value, "subtype"); if (!subtype_object) { return 0; @@ -750,7 +750,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } Py_DECREF(pystring); - *(buffer_get_buffer(buffer) + type_byte) = 0x07; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x07; return 1; } case 11: @@ -772,15 +772,15 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, if (scope == Py_None) { Py_DECREF(scope); - *(buffer_get_buffer(buffer) + type_byte) = 0x0D; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0D; return write_string(buffer, value); } - *(buffer_get_buffer(buffer) + type_byte) = 0x0F; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0F; - start_position = buffer_get_position(buffer); + start_position = pymongo_buffer_get_position(buffer); /* save space for length */ - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { Py_DECREF(scope); return 0; @@ -797,7 +797,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } Py_DECREF(scope); - length = buffer_get_position(buffer) - start_position; + length = pymongo_buffer_get_position(buffer) - start_position; buffer_write_int32_at_position( buffer, length_location, (int32_t)length); return 1; @@ -834,7 +834,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x11; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x11; return 1; } case 18: @@ -849,7 +849,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, if (!buffer_write_int64(buffer, (int64_t)ll)) { return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x12; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x12; return 1; } case 19: @@ -870,7 +870,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } Py_DECREF(pystring); - *(buffer_get_buffer(buffer) + type_byte) = 0x13; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x13; return 1; } case 100: @@ -885,7 +885,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } Py_DECREF(as_doc); - *(buffer_get_buffer(buffer) + type_byte) = 0x03; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; return 1; } case 101: @@ -894,19 +894,19 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, if (!write_raw_doc(buffer, value)) { return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x03; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; return 1; } case 255: { /* MinKey */ - *(buffer_get_buffer(buffer) + type_byte) = 0xFF; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0xFF; return 1; } case 127: { /* MaxKey */ - *(buffer_get_buffer(buffer) + type_byte) = 0x7F; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x7F; return 1; } } @@ -915,7 +915,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, if (PyBool_Check(value)) { const char c = (value == Py_True) ? 0x01 : 0x00; - *(buffer_get_buffer(buffer) + type_byte) = 0x08; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x08; return buffer_write_bytes(buffer, &c, 1); } else if (PyLong_Check(value)) { @@ -931,20 +931,20 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, "MongoDB can only handle up to 8-byte ints"); return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x12; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x12; return buffer_write_int64(buffer, (int64_t)long_long_value); } - *(buffer_get_buffer(buffer) + type_byte) = 0x10; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x10; return buffer_write_int32(buffer, (int32_t)int_value); } else if (PyFloat_Check(value)) { const double d = PyFloat_AsDouble(value); - *(buffer_get_buffer(buffer) + type_byte) = 0x01; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x01; return buffer_write_double(buffer, d); } else if (value == Py_None) { - *(buffer_get_buffer(buffer) + type_byte) = 0x0A; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0A; return 1; } else if (PyDict_Check(value)) { - *(buffer_get_buffer(buffer) + type_byte) = 0x03; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; return write_dict(self, buffer, value, check_keys, options, 0); } else if (PyList_Check(value) || PyTuple_Check(value)) { Py_ssize_t items, i; @@ -953,11 +953,11 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, length; char zero = 0; - *(buffer_get_buffer(buffer) + type_byte) = 0x04; - start_position = buffer_get_position(buffer); + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x04; + start_position = pymongo_buffer_get_position(buffer); /* save space for length */ - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { return 0; } @@ -972,7 +972,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } for(i = 0; i < items; i++) { - int list_type_byte = buffer_save_space(buffer, 1); + int list_type_byte = pymongo_buffer_save_space(buffer, 1); char name[16]; PyObject* item_value; @@ -999,7 +999,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, if (!buffer_write_bytes(buffer, &zero, 1)) { return 0; } - length = buffer_get_position(buffer) - start_position; + length = pymongo_buffer_get_position(buffer) - start_position; buffer_write_int32_at_position( buffer, length_location, (int32_t)length); return 1; @@ -1012,7 +1012,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; if ((size = _downcast_and_check(PyBytes_GET_SIZE(value), 0)) == -1) return 0; - *(buffer_get_buffer(buffer) + type_byte) = 0x05; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x05; if (!buffer_write_int32(buffer, (int32_t)size)) { return 0; } @@ -1024,7 +1024,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } return 1; } else if (PyUnicode_Check(value)) { - *(buffer_get_buffer(buffer) + type_byte) = 0x02; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x02; return write_unicode(buffer, value); } else if (PyDateTime_Check(value)) { long long millis; @@ -1042,7 +1042,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } else { millis = millis_from_datetime(value); } - *(buffer_get_buffer(buffer) + type_byte) = 0x09; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; return buffer_write_int64(buffer, (int64_t)millis); } else if (PyObject_TypeCheck(value, state->REType)) { return _write_regex_to_buffer(buffer, type_byte, value); @@ -1059,7 +1059,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, if (PyErr_Occurred()) { return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x03; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; return write_dict(self, buffer, value, check_keys, options, 0); } @@ -1189,7 +1189,7 @@ int write_pair(PyObject* self, buffer_t buffer, const char* name, int name_lengt return 1; } - type_byte = buffer_save_space(buffer, 1); + type_byte = pymongo_buffer_save_space(buffer, 1); if (type_byte == -1) { return 0; } @@ -1362,7 +1362,7 @@ int write_dict(PyObject* self, buffer_t buffer, } } - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { return 0; } @@ -1429,7 +1429,7 @@ int write_dict(PyObject* self, buffer_t buffer, if (!buffer_write_bytes(buffer, &zero, 1)) { return 0; } - length = buffer_get_position(buffer) - length_location; + length = pymongo_buffer_get_position(buffer) - length_location; buffer_write_int32_at_position( buffer, length_location, (int32_t)length); return length; @@ -1464,7 +1464,7 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { return raw_bson_document_bytes_obj; } - buffer = buffer_new(); + buffer = pymongo_buffer_new(); if (!buffer) { destroy_codec_options(&options); return NULL; @@ -1472,15 +1472,15 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { if (!write_dict(self, buffer, dict, check_keys, &options, top_level)) { destroy_codec_options(&options); - buffer_free(buffer); + pymongo_buffer_free(buffer); return NULL; } /* objectify buffer */ - result = Py_BuildValue("y#", buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer)); + result = Py_BuildValue("y#", pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer)); destroy_codec_options(&options); - buffer_free(buffer); + pymongo_buffer_free(buffer); return result; } diff --git a/bson/buffer.c b/bson/buffer.c index bb92ab3ee5..cc75202746 100644 --- a/bson/buffer.c +++ b/bson/buffer.c @@ -39,7 +39,7 @@ static void set_memory_error(void) { /* Allocate and return a new buffer. * Return NULL and sets MemoryError on allocation failure. */ -buffer_t buffer_new(void) { +buffer_t pymongo_buffer_new(void) { buffer_t buffer; buffer = (buffer_t)malloc(sizeof(struct buffer)); if (buffer == NULL) { @@ -61,7 +61,7 @@ buffer_t buffer_new(void) { /* Free the memory allocated for `buffer`. * Return non-zero on failure. */ -int buffer_free(buffer_t buffer) { +int pymongo_buffer_free(buffer_t buffer) { if (buffer == NULL) { return 1; } @@ -122,7 +122,7 @@ static int buffer_assure_space(buffer_t buffer, int size) { /* Save `size` bytes from the current position in `buffer` (and grow if needed). * Return offset for writing, or -1 on failure. * Sets MemoryError or ValueError on failure. */ -buffer_position buffer_save_space(buffer_t buffer, int size) { +buffer_position pymongo_buffer_save_space(buffer_t buffer, int size) { int position = buffer->position; if (buffer_assure_space(buffer, size) != 0) { return -1; @@ -134,7 +134,7 @@ buffer_position buffer_save_space(buffer_t buffer, int size) { /* Write `size` bytes from `data` to `buffer` (and grow if needed). * Return non-zero on failure. * Sets MemoryError or ValueError on failure. */ -int buffer_write(buffer_t buffer, const char* data, int size) { +int pymongo_buffer_write(buffer_t buffer, const char* data, int size) { if (buffer_assure_space(buffer, size) != 0) { return 1; } @@ -144,14 +144,14 @@ int buffer_write(buffer_t buffer, const char* data, int size) { return 0; } -int buffer_get_position(buffer_t buffer) { +int pymongo_buffer_get_position(buffer_t buffer) { return buffer->position; } -char* buffer_get_buffer(buffer_t buffer) { +char* pymongo_buffer_get_buffer(buffer_t buffer) { return buffer->buffer; } -void buffer_update_position(buffer_t buffer, buffer_position new_position) { +void pymongo_buffer_update_position(buffer_t buffer, buffer_position new_position) { buffer->position = new_position; } diff --git a/bson/buffer.h b/bson/buffer.h index 1485082d95..a78e34e4de 100644 --- a/bson/buffer.h +++ b/bson/buffer.h @@ -27,25 +27,25 @@ typedef int buffer_position; /* Allocate and return a new buffer. * Return NULL on allocation failure. */ -buffer_t buffer_new(void); +buffer_t pymongo_buffer_new(void); /* Free the memory allocated for `buffer`. * Return non-zero on failure. */ -int buffer_free(buffer_t buffer); +int pymongo_buffer_free(buffer_t buffer); /* Save `size` bytes from the current position in `buffer` (and grow if needed). * Return offset for writing, or -1 on allocation failure. */ -buffer_position buffer_save_space(buffer_t buffer, int size); +buffer_position pymongo_buffer_save_space(buffer_t buffer, int size); /* Write `size` bytes from `data` to `buffer` (and grow if needed). * Return non-zero on allocation failure. */ -int buffer_write(buffer_t buffer, const char* data, int size); +int pymongo_buffer_write(buffer_t buffer, const char* data, int size); /* Getters for the internals of a buffer_t. * Should try to avoid using these as much as possible * since they break the abstraction. */ -buffer_position buffer_get_position(buffer_t buffer); -char* buffer_get_buffer(buffer_t buffer); -void buffer_update_position(buffer_t buffer, buffer_position new_position); +buffer_position pymongo_buffer_get_position(buffer_t buffer); +char* pymongo_buffer_get_buffer(buffer_t buffer); +void pymongo_buffer_update_position(buffer_t buffer, buffer_position new_position); #endif diff --git a/bson/encoding_helpers.c b/bson/encoding_helpers.c index ea96810878..187ce6f3bd 100644 --- a/bson/encoding_helpers.c +++ b/bson/encoding_helpers.c @@ -87,7 +87,7 @@ static unsigned char isLegalUTF8(const unsigned char* source, int length) { return 1; } -result_t check_string(const unsigned char* string, const int length, +result_t cbson_check_string(const unsigned char* string, const int length, const char check_utf8, const char check_null) { int position = 0; /* By default we go character by character. Will be different for checking diff --git a/bson/encoding_helpers.h b/bson/encoding_helpers.h index b1a90fa510..a5fb75860f 100644 --- a/bson/encoding_helpers.h +++ b/bson/encoding_helpers.h @@ -23,7 +23,7 @@ typedef enum { HAS_NULL } result_t; -result_t check_string(const unsigned char* string, const int length, +result_t cbson_check_string(const unsigned char* string, const int length, const char check_utf8, const char check_null); #endif diff --git a/bson/time64.c b/bson/time64.c index bad6b51dc1..8d2886592e 100644 --- a/bson/time64.c +++ b/bson/time64.c @@ -29,13 +29,13 @@ THE SOFTWARE. /* Programmers who have available to them 64-bit time values as a 'long -long' type can use localtime64_r() and gmtime64_r() which correctly +long' type can use cbson_localtime64_r() and cbson_gmtime64_r() which correctly converts the time even on 32-bit systems. Whether you have 64-bit time values will depend on the operating system. -localtime64_r() is a 64-bit equivalent of localtime_r(). +cbson_localtime64_r() is a 64-bit equivalent of localtime_r(). -gmtime64_r() is a 64-bit equivalent of gmtime_r(). +cbson_gmtime64_r() is a 64-bit equivalent of gmtime_r(). */ @@ -158,7 +158,7 @@ static int is_exception_century(Year year) The result is like cmp. Ignores things like gmtoffset and dst */ -int cmp_date( const struct TM* left, const struct tm* right ) { +int cbson_cmp_date( const struct TM* left, const struct tm* right ) { if( left->tm_year > right->tm_year ) return 1; else if( left->tm_year < right->tm_year ) @@ -196,11 +196,11 @@ int cmp_date( const struct TM* left, const struct tm* right ) { /* Check if a date is safely inside a range. The intention is to check if its a few days inside. */ -int date_in_safe_range( const struct TM* date, const struct tm* min, const struct tm* max ) { - if( cmp_date(date, min) == -1 ) +int cbson_date_in_safe_range( const struct TM* date, const struct tm* min, const struct tm* max ) { + if( cbson_cmp_date(date, min) == -1 ) return 0; - if( cmp_date(date, max) == 1 ) + if( cbson_cmp_date(date, max) == 1 ) return 0; return 1; @@ -209,9 +209,9 @@ int date_in_safe_range( const struct TM* date, const struct tm* min, const struc /* timegm() is not in the C or POSIX spec, but it is such a useful extension I would be remiss in leaving it out. Also I need it - for localtime64() + for cbson_localtime64() */ -Time64_T timegm64(const struct TM *date) { +Time64_T cbson_timegm64(const struct TM *date) { Time64_T days = 0; Time64_T seconds = 0; Year year; @@ -376,7 +376,7 @@ static int safe_year(const Year year) } -void copy_tm_to_TM64(const struct tm *src, struct TM *dest) { +void pymongo_copy_tm_to_TM64(const struct tm *src, struct TM *dest) { if( src == NULL ) { memset(dest, 0, sizeof(*dest)); } @@ -408,7 +408,7 @@ void copy_tm_to_TM64(const struct tm *src, struct TM *dest) { } -void copy_TM64_to_tm(const struct TM *src, struct tm *dest) { +void cbson_copy_TM64_to_tm(const struct TM *src, struct tm *dest) { if( src == NULL ) { memset(dest, 0, sizeof(*dest)); } @@ -441,7 +441,7 @@ void copy_TM64_to_tm(const struct TM *src, struct tm *dest) { /* Simulate localtime_r() to the best of our ability */ -struct tm * fake_localtime_r(const time_t *time, struct tm *result) { +struct tm * cbson_fake_localtime_r(const time_t *time, struct tm *result) { const struct tm *static_result = localtime(time); assert(result != NULL); @@ -458,7 +458,7 @@ struct tm * fake_localtime_r(const time_t *time, struct tm *result) { /* Simulate gmtime_r() to the best of our ability */ -struct tm * fake_gmtime_r(const time_t *time, struct tm *result) { +struct tm * cbson_fake_gmtime_r(const time_t *time, struct tm *result) { const struct tm *static_result = gmtime(time); assert(result != NULL); @@ -499,22 +499,22 @@ static Time64_T seconds_between_years(Year left_year, Year right_year) { } -Time64_T mktime64(const struct TM *input_date) { +Time64_T cbson_mktime64(const struct TM *input_date) { struct tm safe_date; struct TM date; Time64_T time; Year year = input_date->tm_year + 1900; - if( date_in_safe_range(input_date, &SYSTEM_MKTIME_MIN, &SYSTEM_MKTIME_MAX) ) + if( cbson_date_in_safe_range(input_date, &SYSTEM_MKTIME_MIN, &SYSTEM_MKTIME_MAX) ) { - copy_TM64_to_tm(input_date, &safe_date); + cbson_copy_TM64_to_tm(input_date, &safe_date); return (Time64_T)mktime(&safe_date); } /* Have to make the year safe in date else it won't fit in safe_date */ date = *input_date; date.tm_year = safe_year(year) - 1900; - copy_TM64_to_tm(&date, &safe_date); + cbson_copy_TM64_to_tm(&date, &safe_date); time = (Time64_T)mktime(&safe_date); @@ -526,11 +526,11 @@ Time64_T mktime64(const struct TM *input_date) { /* Because I think mktime() is a crappy name */ Time64_T timelocal64(const struct TM *date) { - return mktime64(date); + return cbson_mktime64(date); } -struct TM *gmtime64_r (const Time64_T *in_time, struct TM *p) +struct TM *cbson_gmtime64_r (const Time64_T *in_time, struct TM *p) { int v_tm_sec, v_tm_min, v_tm_hour, v_tm_mon, v_tm_wday; Time64_T v_tm_tday; @@ -549,7 +549,7 @@ struct TM *gmtime64_r (const Time64_T *in_time, struct TM *p) struct tm safe_date; GMTIME_R(&safe_time, &safe_date); - copy_tm_to_TM64(&safe_date, p); + pymongo_copy_tm_to_TM64(&safe_date, p); assert(check_tm(p)); return p; @@ -659,7 +659,7 @@ struct TM *gmtime64_r (const Time64_T *in_time, struct TM *p) } -struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) +struct TM *cbson_localtime64_r (const Time64_T *time, struct TM *local_tm) { time_t safe_time; struct tm safe_date; @@ -678,15 +678,15 @@ struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) LOCALTIME_R(&safe_time, &safe_date); - copy_tm_to_TM64(&safe_date, local_tm); + pymongo_copy_tm_to_TM64(&safe_date, local_tm); assert(check_tm(local_tm)); return local_tm; } #endif - if( gmtime64_r(time, &gm_tm) == NULL ) { - TIME64_TRACE1("gmtime64_r returned null for %lld\n", *time); + if( cbson_gmtime64_r(time, &gm_tm) == NULL ) { + TIME64_TRACE1("cbson_gmtime64_r returned null for %lld\n", *time); return NULL; } @@ -700,13 +700,13 @@ struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) gm_tm.tm_year = safe_year((Year)(gm_tm.tm_year + 1900)) - 1900; } - safe_time = (time_t)timegm64(&gm_tm); + safe_time = (time_t)cbson_timegm64(&gm_tm); if( LOCALTIME_R(&safe_time, &safe_date) == NULL ) { TIME64_TRACE1("localtime_r(%d) returned NULL\n", (int)safe_time); return NULL; } - copy_tm_to_TM64(&safe_date, local_tm); + pymongo_copy_tm_to_TM64(&safe_date, local_tm); local_tm->tm_year = (int)orig_year; if( local_tm->tm_year != orig_year ) { @@ -751,14 +751,14 @@ struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) } -int valid_tm_wday( const struct TM* date ) { +int cbson_valid_tm_wday( const struct TM* date ) { if( 0 <= date->tm_wday && date->tm_wday <= 6 ) return 1; else return 0; } -int valid_tm_mon( const struct TM* date ) { +int cbson_valid_tm_mon( const struct TM* date ) { if( 0 <= date->tm_mon && date->tm_mon <= 11 ) return 1; else @@ -767,15 +767,15 @@ int valid_tm_mon( const struct TM* date ) { /* Non-thread safe versions of the above */ -struct TM *localtime64(const Time64_T *time) { +struct TM *cbson_localtime64(const Time64_T *time) { #ifdef _MSC_VER _tzset(); #else tzset(); #endif - return localtime64_r(time, &Static_Return_Date); + return cbson_localtime64_r(time, &Static_Return_Date); } -struct TM *gmtime64(const Time64_T *time) { - return gmtime64_r(time, &Static_Return_Date); +struct TM *cbson_gmtime64(const Time64_T *time) { + return cbson_gmtime64_r(time, &Static_Return_Date); } diff --git a/bson/time64.h b/bson/time64.h index 61d9776926..6321eb307e 100644 --- a/bson/time64.h +++ b/bson/time64.h @@ -41,13 +41,13 @@ struct TM64 { /* Declare public functions */ -struct TM *gmtime64_r (const Time64_T *, struct TM *); -struct TM *localtime64_r (const Time64_T *, struct TM *); -struct TM *gmtime64 (const Time64_T *); -struct TM *localtime64 (const Time64_T *); +struct TM *cbson_gmtime64_r (const Time64_T *, struct TM *); +struct TM *cbson_localtime64_r (const Time64_T *, struct TM *); +struct TM *cbson_gmtime64 (const Time64_T *); +struct TM *cbson_localtime64 (const Time64_T *); -Time64_T timegm64 (const struct TM *); -Time64_T mktime64 (const struct TM *); +Time64_T cbson_timegm64 (const struct TM *); +Time64_T cbson_mktime64 (const struct TM *); Time64_T timelocal64 (const struct TM *); @@ -55,12 +55,12 @@ Time64_T timelocal64 (const struct TM *); #ifdef HAS_LOCALTIME_R # define LOCALTIME_R(clock, result) localtime_r(clock, result) #else -# define LOCALTIME_R(clock, result) fake_localtime_r(clock, result) +# define LOCALTIME_R(clock, result) cbson_fake_localtime_r(clock, result) #endif #ifdef HAS_GMTIME_R # define GMTIME_R(clock, result) gmtime_r(clock, result) #else -# define GMTIME_R(clock, result) fake_gmtime_r(clock, result) +# define GMTIME_R(clock, result) cbson_fake_gmtime_r(clock, result) #endif diff --git a/doc/contributors.rst b/doc/contributors.rst index 4275209781..7ab87f7790 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -92,3 +92,4 @@ The following is a list of people who have contributed to - Henri Froese (henrifroese) - Ishmum Jawad Khan (ishmum123) - Arie Bovenberg (ariebovenberg) +- Ben Warner (bcwarner) diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index 517c0fb798..2f03ce73e0 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -90,13 +90,13 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { convert_codec_options, &options)) { return NULL; } - buffer = buffer_new(); + buffer = pymongo_buffer_new(); if (!buffer) { goto fail; } // save space for message length - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { goto fail; } @@ -111,37 +111,37 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { goto fail; } - begin = buffer_get_position(buffer); + begin = pymongo_buffer_get_position(buffer); if (!write_dict(state->_cbson, buffer, query, 0, &options, 1)) { goto fail; } - max_size = buffer_get_position(buffer) - begin; + max_size = pymongo_buffer_get_position(buffer) - begin; if (field_selector != Py_None) { - begin = buffer_get_position(buffer); + begin = pymongo_buffer_get_position(buffer); if (!write_dict(state->_cbson, buffer, field_selector, 0, &options, 1)) { goto fail; } - cur_size = buffer_get_position(buffer) - begin; + cur_size = pymongo_buffer_get_position(buffer) - begin; max_size = (cur_size > max_size) ? cur_size : max_size; } - message_length = buffer_get_position(buffer) - length_location; + message_length = pymongo_buffer_get_position(buffer) - length_location; buffer_write_int32_at_position( buffer, length_location, (int32_t)message_length); /* objectify buffer */ result = Py_BuildValue("iy#i", request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), max_size); fail: PyMem_Free(collection_name); destroy_codec_options(&options); if (buffer) { - buffer_free(buffer); + pymongo_buffer_free(buffer); } return result; } @@ -165,13 +165,13 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { &cursor_id)) { return NULL; } - buffer = buffer_new(); + buffer = pymongo_buffer_new(); if (!buffer) { goto fail; } // save space for message length - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { goto fail; } @@ -188,18 +188,18 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { goto fail; } - message_length = buffer_get_position(buffer) - length_location; + message_length = pymongo_buffer_get_position(buffer) - length_location; buffer_write_int32_at_position( buffer, length_location, (int32_t)message_length); /* objectify buffer */ result = Py_BuildValue("iy#", request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer)); + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer)); fail: PyMem_Free(collection_name); if (buffer) { - buffer_free(buffer); + pymongo_buffer_free(buffer); } return result; } @@ -239,13 +239,13 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { convert_codec_options, &options)) { return NULL; } - buffer = buffer_new(); + buffer = pymongo_buffer_new(); if (!buffer) { goto fail; } // save space for message length - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { goto fail; } @@ -273,7 +273,7 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { goto fail; } /* save space for payload 0 length */ - payload_one_length_location = buffer_save_space(buffer, 4); + payload_one_length_location = pymongo_buffer_save_space(buffer, 4); /* C string identifier */ if (!buffer_write_bytes_ssize_t(buffer, identifier, identifier_length + 1)) { goto fail; @@ -295,26 +295,26 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { Py_CLEAR(doc); } - payload_length = buffer_get_position(buffer) - payload_one_length_location; + payload_length = pymongo_buffer_get_position(buffer) - payload_one_length_location; buffer_write_int32_at_position( buffer, payload_one_length_location, (int32_t)payload_length); total_size += payload_length; } - message_length = buffer_get_position(buffer) - length_location; + message_length = pymongo_buffer_get_position(buffer) - length_location; buffer_write_int32_at_position( buffer, length_location, (int32_t)message_length); /* objectify buffer */ result = Py_BuildValue("iy#ii", request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), total_size, max_doc_size); fail: Py_XDECREF(iterator); if (buffer) { - buffer_free(buffer); + pymongo_buffer_free(buffer); } PyMem_Free(identifier); destroy_codec_options(&options); @@ -400,7 +400,7 @@ _batched_op_msg( return 0; } /* Save space for size */ - size_location = buffer_save_space(buffer, 4); + size_location = pymongo_buffer_save_space(buffer, 4); if (size_location == -1) { return 0; } @@ -445,17 +445,17 @@ _batched_op_msg( return 0; } while ((doc = PyIter_Next(iterator)) != NULL) { - int cur_doc_begin = buffer_get_position(buffer); + int cur_doc_begin = pymongo_buffer_get_position(buffer); int cur_size; int doc_too_large = 0; int unacked_doc_too_large = 0; if (!write_dict(state->_cbson, buffer, doc, 0, &options, 1)) { goto fail; } - cur_size = buffer_get_position(buffer) - cur_doc_begin; + cur_size = pymongo_buffer_get_position(buffer) - cur_doc_begin; /* Does the first document exceed max_message_size? */ - doc_too_large = (idx == 0 && (buffer_get_position(buffer) > max_message_size)); + doc_too_large = (idx == 0 && (pymongo_buffer_get_position(buffer) > max_message_size)); /* When OP_MSG is used unacknowledged we have to check * document size client side or applications won't be notified. * Otherwise we let the server deal with documents that are too large @@ -483,12 +483,12 @@ _batched_op_msg( goto fail; } /* We have enough data, return this batch. */ - if (buffer_get_position(buffer) > max_message_size) { + if (pymongo_buffer_get_position(buffer) > max_message_size) { /* * Roll the existing buffer back to the beginning * of the last document encoded. */ - buffer_update_position(buffer, cur_doc_begin); + pymongo_buffer_update_position(buffer, cur_doc_begin); Py_CLEAR(doc); break; } @@ -508,7 +508,7 @@ _batched_op_msg( goto fail; } - position = buffer_get_position(buffer); + position = pymongo_buffer_get_position(buffer); length = position - size_location; buffer_write_int32_at_position(buffer, size_location, (int32_t)length); return 1; @@ -538,7 +538,7 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { &ctx)) { return NULL; } - if (!(buffer = buffer_new())) { + if (!(buffer = pymongo_buffer_new())) { destroy_codec_options(&options); return NULL; } @@ -560,12 +560,12 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { } result = Py_BuildValue("y#O", - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), to_publish); fail: destroy_codec_options(&options); - buffer_free(buffer); + pymongo_buffer_free(buffer); Py_XDECREF(to_publish); return result; } @@ -591,12 +591,12 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { &ctx)) { return NULL; } - if (!(buffer = buffer_new())) { + if (!(buffer = pymongo_buffer_new())) { destroy_codec_options(&options); return NULL; } /* Save space for message length and request id */ - if ((buffer_save_space(buffer, 8)) == -1) { + if ((pymongo_buffer_save_space(buffer, 8)) == -1) { goto fail; } if (!buffer_write_bytes(buffer, @@ -623,16 +623,16 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { } request_id = rand(); - position = buffer_get_position(buffer); + position = pymongo_buffer_get_position(buffer); buffer_write_int32_at_position(buffer, 0, (int32_t)position); buffer_write_int32_at_position(buffer, 4, (int32_t)request_id); result = Py_BuildValue("iy#O", request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), to_publish); fail: destroy_codec_options(&options); - buffer_free(buffer); + pymongo_buffer_free(buffer); Py_XDECREF(to_publish); return result; } @@ -702,14 +702,14 @@ _batched_write_command( } /* Position of command document length */ - cmd_len_loc = buffer_get_position(buffer); + cmd_len_loc = pymongo_buffer_get_position(buffer); if (!write_dict(state->_cbson, buffer, command, 0, &options, 0)) { return 0; } /* Write type byte for array */ - *(buffer_get_buffer(buffer) + (buffer_get_position(buffer) - 1)) = 0x4; + *(pymongo_buffer_get_buffer(buffer) + (pymongo_buffer_get_position(buffer) - 1)) = 0x4; switch (op) { case _INSERT: @@ -742,7 +742,7 @@ _batched_write_command( } /* Save space for list document */ - lst_len_loc = buffer_save_space(buffer, 4); + lst_len_loc = pymongo_buffer_save_space(buffer, 4); if (lst_len_loc == -1) { return 0; } @@ -757,7 +757,7 @@ _batched_write_command( return 0; } while ((doc = PyIter_Next(iterator)) != NULL) { - int sub_doc_begin = buffer_get_position(buffer); + int sub_doc_begin = pymongo_buffer_get_position(buffer); int cur_doc_begin; int cur_size; int enough_data = 0; @@ -767,7 +767,7 @@ _batched_write_command( !buffer_write_bytes(buffer, key, (int)strlen(key) + 1)) { goto fail; } - cur_doc_begin = buffer_get_position(buffer); + cur_doc_begin = pymongo_buffer_get_position(buffer); if (!write_dict(state->_cbson, buffer, doc, 0, &options, 1)) { goto fail; } @@ -775,7 +775,7 @@ _batched_write_command( /* We have enough data, return this batch. * max_cmd_size accounts for the two trailing null bytes. */ - cur_size = buffer_get_position(buffer) - cur_doc_begin; + cur_size = pymongo_buffer_get_position(buffer) - cur_doc_begin; /* This single document is too large for the command. */ if (cur_size > max_cmd_size) { if (op == _INSERT) { @@ -797,13 +797,13 @@ _batched_write_command( goto fail; } enough_data = (idx >= 1 && - (buffer_get_position(buffer) > max_split_size)); + (pymongo_buffer_get_position(buffer) > max_split_size)); if (enough_data) { /* * Roll the existing buffer back to the beginning * of the last document encoded. */ - buffer_update_position(buffer, sub_doc_begin); + pymongo_buffer_update_position(buffer, sub_doc_begin); Py_CLEAR(doc); break; } @@ -827,7 +827,7 @@ _batched_write_command( goto fail; } - position = buffer_get_position(buffer); + position = pymongo_buffer_get_position(buffer); length = position - lst_len_loc - 1; buffer_write_int32_at_position(buffer, lst_len_loc, (int32_t)length); length = position - cmd_len_loc; @@ -860,7 +860,7 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { &ctx)) { return NULL; } - if (!(buffer = buffer_new())) { + if (!(buffer = pymongo_buffer_new())) { PyMem_Free(ns); destroy_codec_options(&options); return NULL; @@ -884,13 +884,13 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { } result = Py_BuildValue("y#O", - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), to_publish); fail: PyMem_Free(ns); destroy_codec_options(&options); - buffer_free(buffer); + pymongo_buffer_free(buffer); Py_XDECREF(to_publish); return result; } From 3f7231a1a2668c80b1dd82d18f079b387d881fa3 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Mon, 13 Jun 2022 16:04:30 -0700 Subject: [PATCH 0173/1588] PYTHON-3048 Fixed bug with incorrect validation of UTF-8 regex patterns (#970) --- THIRD-PARTY-NOTICES | 23 -------- bson/_cbsonmodule.c | 33 ++++++----- bson/encoding_helpers.c | 118 ---------------------------------------- bson/encoding_helpers.h | 29 ---------- doc/changelog.rst | 3 + setup.py | 7 +-- 6 files changed, 23 insertions(+), 190 deletions(-) delete mode 100644 bson/encoding_helpers.c delete mode 100644 bson/encoding_helpers.h diff --git a/THIRD-PARTY-NOTICES b/THIRD-PARTY-NOTICES index a307b30432..0b9fc738ed 100644 --- a/THIRD-PARTY-NOTICES +++ b/THIRD-PARTY-NOTICES @@ -71,26 +71,3 @@ OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -3) License Notice for encoding_helpers.c ----------------------------------------- - -Portions Copyright 2001 Unicode, Inc. - -Disclaimer - -This source code is provided as is by Unicode, Inc. No claims are -made as to fitness for any particular purpose. No warranties of any -kind are expressed or implied. The recipient agrees to determine -applicability of information provided. If this file has been -purchased on magnetic or optical media from Unicode, Inc., the -sole remedy for any claim will be exchange of defective media -within 90 days of receipt. - -Limitations on Rights to Redistribute This Code - -Unicode, Inc. hereby grants the right to freely use the information -supplied in this file in the creation of products supporting the -Unicode Standard, and to make copies of this file in any form -for internal or external distribution as long as this notice -remains attached. diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 191ce9886f..da6a5cbda7 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -26,7 +26,6 @@ #include "buffer.h" #include "time64.h" -#include "encoding_helpers.h" #define _CBSON_MODULE #include "_cbsonmodule.h" @@ -553,12 +552,12 @@ static int _write_regex_to_buffer( PyObject* py_flags; PyObject* py_pattern; PyObject* encoded_pattern; + PyObject* decoded_pattern; long int_flags; char flags[FLAGS_SIZE]; char check_utf8 = 0; const char* pattern_data; int pattern_length, flags_length; - result_t status; /* * Both the builtin re type and our Regex class have attributes @@ -597,18 +596,8 @@ static int _write_regex_to_buffer( Py_DECREF(encoded_pattern); return 0; } - status = cbson_check_string((const unsigned char*)pattern_data, - pattern_length, check_utf8, 1); - if (status == NOT_UTF_8) { - PyObject* InvalidStringData = _error("InvalidStringData"); - if (InvalidStringData) { - PyErr_SetString(InvalidStringData, - "regex patterns must be valid UTF-8"); - Py_DECREF(InvalidStringData); - } - Py_DECREF(encoded_pattern); - return 0; - } else if (status == HAS_NULL) { + + if (strlen(pattern_data) != (size_t) pattern_length){ PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument) { PyErr_SetString(InvalidDocument, @@ -619,6 +608,22 @@ static int _write_regex_to_buffer( return 0; } + if (check_utf8) { + decoded_pattern = PyUnicode_DecodeUTF8(pattern_data, (Py_ssize_t) pattern_length, NULL); + if (decoded_pattern == NULL) { + PyErr_Clear(); + PyObject* InvalidStringData = _error("InvalidStringData"); + if (InvalidStringData) { + PyErr_SetString(InvalidStringData, + "regex patterns must be valid UTF-8"); + Py_DECREF(InvalidStringData); + } + Py_DECREF(encoded_pattern); + return 0; + } + Py_DECREF(decoded_pattern); + } + if (!buffer_write_bytes(buffer, pattern_data, pattern_length + 1)) { Py_DECREF(encoded_pattern); return 0; diff --git a/bson/encoding_helpers.c b/bson/encoding_helpers.c deleted file mode 100644 index 187ce6f3bd..0000000000 --- a/bson/encoding_helpers.c +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright 2009-2015 MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "encoding_helpers.h" - -/* - * Portions Copyright 2001 Unicode, Inc. - * - * Disclaimer - * - * This source code is provided as is by Unicode, Inc. No claims are - * made as to fitness for any particular purpose. No warranties of any - * kind are expressed or implied. The recipient agrees to determine - * applicability of information provided. If this file has been - * purchased on magnetic or optical media from Unicode, Inc., the - * sole remedy for any claim will be exchange of defective media - * within 90 days of receipt. - * - * Limitations on Rights to Redistribute This Code - * - * Unicode, Inc. hereby grants the right to freely use the information - * supplied in this file in the creation of products supporting the - * Unicode Standard, and to make copies of this file in any form - * for internal or external distribution as long as this notice - * remains attached. - */ - -/* - * Index into the table below with the first byte of a UTF-8 sequence to - * get the number of trailing bytes that are supposed to follow it. - */ -static const char trailingBytesForUTF8[256] = { - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 -}; - -/* --------------------------------------------------------------------- */ - -/* - * Utility routine to tell whether a sequence of bytes is legal UTF-8. - * This must be called with the length pre-determined by the first byte. - * The length can be set by: - * length = trailingBytesForUTF8[*source]+1; - * and the sequence is illegal right away if there aren't that many bytes - * available. - * If presented with a length > 4, this returns 0. The Unicode - * definition of UTF-8 goes up to 4-byte sequences. - */ -static unsigned char isLegalUTF8(const unsigned char* source, int length) { - unsigned char a; - const unsigned char* srcptr = source + length; - switch (length) { - default: return 0; - /* Everything else falls through when "true"... */ - case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; - case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; - case 2: if ((a = (*--srcptr)) > 0xBF) return 0; - switch (*source) { - /* no fall-through in this inner switch */ - case 0xE0: if (a < 0xA0) return 0; break; - case 0xF0: if (a < 0x90) return 0; break; - case 0xF4: if ((a > 0x8F) || (a < 0x80)) return 0; break; - default: if (a < 0x80) return 0; - } - case 1: if (*source >= 0x80 && *source < 0xC2) return 0; - if (*source > 0xF4) return 0; - } - return 1; -} - -result_t cbson_check_string(const unsigned char* string, const int length, - const char check_utf8, const char check_null) { - int position = 0; - /* By default we go character by character. Will be different for checking - * UTF-8 */ - int sequence_length = 1; - - if (!check_utf8 && !check_null) { - return VALID; - } - - while (position < length) { - if (check_null && *(string + position) == 0) { - return HAS_NULL; - } - if (check_utf8) { - sequence_length = trailingBytesForUTF8[*(string + position)] + 1; - if ((position + sequence_length) > length) { - return NOT_UTF_8; - } - if (!isLegalUTF8(string + position, sequence_length)) { - return NOT_UTF_8; - } - } - position += sequence_length; - } - - return VALID; -} diff --git a/bson/encoding_helpers.h b/bson/encoding_helpers.h deleted file mode 100644 index a5fb75860f..0000000000 --- a/bson/encoding_helpers.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2009-2015 MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ENCODING_HELPERS_H -#define ENCODING_HELPERS_H - -typedef enum { - VALID, - NOT_UTF_8, - HAS_NULL -} result_t; - -result_t cbson_check_string(const unsigned char* string, const int length, - const char check_utf8, const char check_null); - -#endif diff --git a/doc/changelog.rst b/doc/changelog.rst index b2fcb7fa24..f074a9d464 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -19,6 +19,8 @@ Bug fixes - Fixed a bug where :meth:`~pymongo.collection.Collection.estimated_document_count` would fail with a "CommandNotSupportedOnView" error on views (`PYTHON-2885`_). +- Fixed a bug where invalid UTF-8 strings could be passed as patterns for :class:`~bson.regex.Regex` + objects (`PYTHON-3048`_). :func:`bson.encode` now correctly raises :class:`bson.errors.InvalidStringData`. Unavoidable breaking changes ............................ @@ -38,6 +40,7 @@ Issues Resolved See the `PyMongo 4.2 release notes in JIRA`_ for the list of resolved issues in this release. +.. _PYTHON-3048: https://jira.mongodb.org/browse/PYTHON-3048 .. _PYTHON-2885: https://jira.mongodb.org/browse/PYTHON-2885 .. _PYTHON-3167: https://jira.mongodb.org/browse/PYTHON-3167 .. _PyMongo 4.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33196 diff --git a/setup.py b/setup.py index a2df4fac67..c6b32b9fba 100755 --- a/setup.py +++ b/setup.py @@ -255,12 +255,7 @@ def build_extension(self, ext): Extension( "bson._cbson", include_dirs=["bson"], - sources=[ - "bson/_cbsonmodule.c", - "bson/time64.c", - "bson/buffer.c", - "bson/encoding_helpers.c", - ], + sources=["bson/_cbsonmodule.c", "bson/time64.c", "bson/buffer.c"], ), Extension( "pymongo._cmessage", From 98d393336411b7cd5ad4e184ca45192f76fb48e8 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 13 Jun 2022 19:54:36 -0500 Subject: [PATCH 0174/1588] PYTHON-3253 Provide FLE 2.0 API example for docs team (#969) --- test/test_encryption.py | 92 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/test/test_encryption.py b/test/test_encryption.py index f5c6127a25..e5a9666d2c 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2099,5 +2099,97 @@ def test_05_roundtrip_encrypted_unindexed(self): self.assertEqual(decrypted, val) +class TestQueryableEncryptionDocsExample(EncryptionIntegrationTest): + # Queryable Encryption is not supported on Standalone topology. + @client_context.require_no_standalone + @client_context.require_version_min(6, 0, -1) + def setUp(self): + super().setUp() + + def test_queryable_encryption(self): + # MongoClient to use in testing that handles auth/tls/etc, + # and cleanup. + def MongoClient(**kwargs): + c = rs_or_single_client(**kwargs) + self.addCleanup(c.close) + return c + + # Drop data from prior test runs. + self.client.keyvault.datakeys.drop() + self.client.drop_database("docs_examples") + + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + + # Create two data keys. + key_vault_client = MongoClient() + client_encryption = ClientEncryption( + kms_providers_map, "keyvault.datakeys", key_vault_client, CodecOptions() + ) + key1_id = client_encryption.create_data_key("local") + key2_id = client_encryption.create_data_key("local") + + # Create an encryptedFieldsMap. + encrypted_fields_map = { + "docs_examples.encrypted": { + "fields": [ + { + "path": "encrypted_indexed", + "bsonType": "string", + "keyId": key1_id, + "queries": [ + { + "queryType": "equality", + }, + ], + }, + { + "path": "encrypted_unindexed", + "bsonType": "string", + "keyId": key2_id, + }, + ], + }, + } + + # Create an Queryable Encryption collection. + opts = AutoEncryptionOpts( + kms_providers_map, "keyvault.datakeys", encrypted_fields_map=encrypted_fields_map + ) + encrypted_client = MongoClient(auto_encryption_opts=opts) + + # Create a Queryable Encryption collection "docs_examples.encrypted". + # Because docs_examples.encrypted is in encrypted_fields_map, it is + # created with Queryable Encryption support. + db = encrypted_client.docs_examples + encrypted_coll = db.create_collection("encrypted") + + # Auto encrypt an insert and find. + + # Encrypt an insert. + encrypted_coll.insert_one( + { + "_id": 1, + "encrypted_indexed": "indexed_value", + "encrypted_unindexed": "unindexed_value", + } + ) + + # Encrypt a find. + res = encrypted_coll.find_one({"encrypted_indexed": "indexed_value"}) + assert res is not None + assert res["encrypted_indexed"] == "indexed_value" + assert res["encrypted_unindexed"] == "unindexed_value" + + # Find documents without decryption. + unencrypted_client = MongoClient() + unencrypted_coll = unencrypted_client.docs_examples.encrypted + res = unencrypted_coll.find_one({"_id": 1}) + assert res is not None + assert isinstance(res["encrypted_indexed"], Binary) + assert isinstance(res["encrypted_unindexed"], Binary) + + client_encryption.close() + + if __name__ == "__main__": unittest.main() From 43c2062305d25a7c81fee27109ea30de57379690 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Wed, 15 Jun 2022 11:22:55 -0700 Subject: [PATCH 0175/1588] PYTHON-3093 Change streams support for user-facing PIT pre- and post-images (#972) --- pymongo/change_stream.py | 5 + pymongo/collection.py | 8 +- pymongo/database.py | 13 +- pymongo/mongo_client.py | 9 +- .../change-streams-pre_and_post_images.json | 827 ++++++++++++++++++ .../unified/change-streams.json | 63 +- .../createCollection-pre_and_post_images.json | 92 ++ .../modifyCollection-pre_and_post_images.json | 111 +++ test/unified_format.py | 2 + 9 files changed, 1076 insertions(+), 54 deletions(-) create mode 100644 test/change_streams/unified/change-streams-pre_and_post_images.json create mode 100644 test/collection_management/createCollection-pre_and_post_images.json create mode 100644 test/collection_management/modifyCollection-pre_and_post_images.json diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index b4bce8da59..d2d60e25a4 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -96,6 +96,7 @@ def __init__( session: Optional["ClientSession"], start_after: Optional[Mapping[str, Any]], comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, ) -> None: if pipeline is None: pipeline = [] @@ -118,6 +119,7 @@ def __init__( self._pipeline = copy.deepcopy(pipeline) self._full_document = full_document + self._full_document_before_change = full_document_before_change self._uses_start_after = start_after is not None self._uses_resume_after = resume_after is not None self._resume_token = copy.deepcopy(start_after or resume_after) @@ -147,6 +149,9 @@ def _change_stream_options(self): if self._full_document is not None: options["fullDocument"] = self._full_document + if self._full_document_before_change is not None: + options["fullDocumentBeforeChange"] = self._full_document_before_change + resume_token = self.resume_token if resume_token is not None: if self._uses_start_after: diff --git a/pymongo/collection.py b/pymongo/collection.py index 27550e0fb3..b43e06c2a4 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2501,6 +2501,7 @@ def watch( session: Optional["ClientSession"] = None, start_after: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, ) -> CollectionChangeStream[_DocumentType]: """Watch changes on this collection. @@ -2559,6 +2560,8 @@ def watch( updates will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred. + - `full_document_before_change`: Allowed values: `whenAvailable` and `required`. Change events + may now result in a `fullDocumentBeforeChange` response field. - `resume_after` (optional): A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token @@ -2585,6 +2588,8 @@ def watch( :Returns: A :class:`~pymongo.change_stream.CollectionChangeStream` cursor. + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. .. versionchanged:: 4.1 Added ``comment`` parameter. @@ -2613,7 +2618,8 @@ def watch( start_at_operation_time, session, start_after, - comment=comment, + comment, + full_document_before_change, ) def rename( diff --git a/pymongo/database.py b/pymongo/database.py index c9447c1a77..f764ade522 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -389,6 +389,8 @@ def create_collection( - ``pipeline`` (list): a list of aggregation pipeline stages - ``comment`` (str): a user-provided comment to attach to this command. This option is only supported on MongoDB >= 4.4. + - ``changeStreamPreAndPostImages`` (dict): a document with a boolean field ``enabled`` for + enabling pre- and post-images. .. versionchanged:: 4.2 Added ``encrypted_fields`` parameter. @@ -530,6 +532,7 @@ def watch( session: Optional["ClientSession"] = None, start_after: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, ) -> DatabaseChangeStream[_DocumentType]: """Watch changes on this database. @@ -576,11 +579,13 @@ def watch( pipeline stages are valid after a ``$changeStream`` stage, see the MongoDB documentation on change streams for the supported stages. - `full_document` (optional): The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup'. + to the ``$changeStream`` stage. Allowed values: 'updateLookup', 'whenAvailable', 'required'. When set to 'updateLookup', the change notification for partial updates will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred. + - `full_document_before_change`: Allowed values: `whenAvailable` and `required`. Change events + may now result in a `fullDocumentBeforeChange` response field. - `resume_after` (optional): A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token @@ -607,6 +612,9 @@ def watch( :Returns: A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + .. versionchanged:: 4.1 Added ``comment`` parameter. @@ -631,7 +639,8 @@ def watch( start_at_operation_time, session, start_after, - comment=comment, + comment, + full_document_before_change, ) def _command( diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 5e4cf0d754..2a4b8a1d90 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -871,6 +871,7 @@ def watch( session: Optional[client_session.ClientSession] = None, start_after: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, ) -> ChangeStream[_DocumentType]: """Watch changes on this cluster. @@ -922,6 +923,8 @@ def watch( updates will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred. + - `full_document_before_change`: Allowed values: `whenAvailable` and `required`. Change events + may now result in a `fullDocumentBeforeChange` response field. - `resume_after` (optional): A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token @@ -948,6 +951,9 @@ def watch( :Returns: A :class:`~pymongo.change_stream.ClusterChangeStream` cursor. + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + .. versionchanged:: 4.1 Added ``comment`` parameter. @@ -972,7 +978,8 @@ def watch( start_at_operation_time, session, start_after, - comment=comment, + comment, + full_document_before_change, ) @property diff --git a/test/change_streams/unified/change-streams-pre_and_post_images.json b/test/change_streams/unified/change-streams-pre_and_post_images.json new file mode 100644 index 0000000000..8beefb2bc8 --- /dev/null +++ b/test/change_streams/unified/change-streams-pre_and_post_images.json @@ -0,0 +1,827 @@ +{ + "description": "change-streams-pre_and_post_images", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "collMod", + "insert", + "update", + "getMore", + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "change-stream-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "change-stream-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "fullDocument:whenAvailable with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocument": "whenAvailable" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocument": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocument": "whenAvailable" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocument:whenAvailable with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocument": "whenAvailable" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocument": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocument": "whenAvailable" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocument:required with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocument": "required" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocument": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocument": "required" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocument:required with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocument": "required" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocument": "required" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:whenAvailable with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "whenAvailable" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "whenAvailable" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:whenAvailable with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "whenAvailable" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "whenAvailable" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:required with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "required" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "required" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:required with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "required" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "required" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:off with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "off" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": { + "$$exists": false + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "off" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:off with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "off" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": { + "$$exists": false + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "off" + } + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams.json b/test/change_streams/unified/change-streams.json index 572d2d6e97..c8b60ed4e2 100644 --- a/test/change_streams/unified/change-streams.json +++ b/test/change_streams/unified/change-streams.json @@ -5,8 +5,7 @@ { "minServerVersion": "3.6", "topologies": [ - "replicaset", - "sharded-replicaset" + "replicaset" ], "serverless": "forbid" } @@ -314,10 +313,7 @@ "description": "Test that comment is set on getMore", "runOnRequirements": [ { - "minServerVersion": "4.4.0", - "topologies": [ - "replicaset" - ] + "minServerVersion": "4.4.0" } ], "operations": [ @@ -405,10 +401,7 @@ "description": "Test that comment is not set on getMore - pre 4.4", "runOnRequirements": [ { - "maxServerVersion": "4.3.99", - "topologies": [ - "replicaset" - ] + "maxServerVersion": "4.3.99" } ], "operations": [ @@ -806,10 +799,7 @@ "description": "$changeStream must be the first stage in a change stream pipeline sent to the server", "runOnRequirements": [ { - "minServerVersion": "3.6.0", - "topologies": [ - "replicaset" - ] + "minServerVersion": "3.6.0" } ], "operations": [ @@ -882,10 +872,7 @@ "description": "The server returns change stream responses in the specified server response format", "runOnRequirements": [ { - "minServerVersion": "3.6.0", - "topologies": [ - "replicaset" - ] + "minServerVersion": "3.6.0" } ], "operations": [ @@ -935,10 +922,7 @@ "description": "Executing a watch helper on a Collection results in notifications for changes to the specified collection", "runOnRequirements": [ { - "minServerVersion": "3.6.0", - "topologies": [ - "replicaset" - ] + "minServerVersion": "3.6.0" } ], "operations": [ @@ -1023,10 +1007,7 @@ "description": "Change Stream should allow valid aggregate pipeline stages", "runOnRequirements": [ { - "minServerVersion": "3.6.0", - "topologies": [ - "replicaset" - ] + "minServerVersion": "3.6.0" } ], "operations": [ @@ -1113,10 +1094,7 @@ "description": "Executing a watch helper on a Database results in notifications for changes to all collections in the specified database.", "runOnRequirements": [ { - "minServerVersion": "3.8.0", - "topologies": [ - "replicaset" - ] + "minServerVersion": "3.8.0" } ], "operations": [ @@ -1218,10 +1196,7 @@ "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", "runOnRequirements": [ { - "minServerVersion": "3.8.0", - "topologies": [ - "replicaset" - ] + "minServerVersion": "3.8.0" } ], "operations": [ @@ -1342,10 +1317,7 @@ "description": "Test insert, update, replace, and delete event types", "runOnRequirements": [ { - "minServerVersion": "3.6.0", - "topologies": [ - "replicaset" - ] + "minServerVersion": "3.6.0" } ], "operations": [ @@ -1497,10 +1469,7 @@ "description": "Test rename and invalidate event types", "runOnRequirements": [ { - "minServerVersion": "4.0.1", - "topologies": [ - "replicaset" - ] + "minServerVersion": "4.0.1" } ], "operations": [ @@ -1577,10 +1546,7 @@ "description": "Test drop and invalidate event types", "runOnRequirements": [ { - "minServerVersion": "4.0.1", - "topologies": [ - "replicaset" - ] + "minServerVersion": "4.0.1" } ], "operations": [ @@ -1646,10 +1612,7 @@ "description": "Test consecutive resume", "runOnRequirements": [ { - "minServerVersion": "4.1.7", - "topologies": [ - "replicaset" - ] + "minServerVersion": "4.1.7" } ], "operations": [ diff --git a/test/collection_management/createCollection-pre_and_post_images.json b/test/collection_management/createCollection-pre_and_post_images.json new file mode 100644 index 0000000000..f488deacd8 --- /dev/null +++ b/test/collection_management/createCollection-pre_and_post_images.json @@ -0,0 +1,92 @@ +{ + "description": "createCollection-pre_and_post_images", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "6.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "papi-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "createCollection with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "papi-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "papi-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + }, + "databaseName": "papi-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/collection_management/modifyCollection-pre_and_post_images.json b/test/collection_management/modifyCollection-pre_and_post_images.json new file mode 100644 index 0000000000..8026faeb17 --- /dev/null +++ b/test/collection_management/modifyCollection-pre_and_post_images.json @@ -0,0 +1,111 @@ +{ + "description": "modifyCollection-pre_and_post_images", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "6.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "papi-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "modifyCollection to changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "papi-tests", + "collectionName": "test" + } + }, + { + "name": "modifyCollection", + "object": "database0", + "arguments": { + "collection": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "papi-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index cdba80c23e..cb69882b2c 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -844,6 +844,8 @@ def maybe_skip_test(self, spec): self.skipTest("PyMongo does not support timeoutMode") if name == "createEntities": self.maybe_skip_entity(op.get("arguments", {}).get("entities", [])) + if name == "modifyCollection": + self.skipTest("PyMongo does not support modifyCollection") def maybe_skip_entity(self, entities): for entity in entities: From dc21a083f47e23a953032610448f3caaaa34f496 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 15 Jun 2022 12:25:11 -0700 Subject: [PATCH 0176/1588] PYTHON-3300 Add Explicit Queryable Encryption Example to Docs (#973) --- doc/examples/encryption.rst | 136 ++++++++++++++++++++++++++++++++++++ 1 file changed, 136 insertions(+) diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 5568b0d741..941e1bd029 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -409,6 +409,142 @@ Automatic encryption in Queryable Encryption is configured with an ``encrypted_f In the above example, the ``firstName`` and ``lastName`` fields are automatically encrypted and decrypted. +Explicit Queryable Encryption (Beta) +```````````````````````````````````` + +PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB 6.0. + +Queryable Encryption is the second version of Client-Side Field Level Encryption. +Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, +which are further processed server-side. + +You must have MongoDB 6.0rc8+ to preview the capability. + +Until PyMongo 4.2 release is finalized, it can be installed using:: + + pip install "pymongo@git+ssh://git@github.com/mongodb/mongo-python-driver.git@4.2.0b0#egg=pymongo[encryption]" + +Additionally, ``libmongocrypt`` must be installed from `source `_. + +Explicit encryption in Queryable Encryption is performed using the ``encrypt`` and ``decrypt`` +methods. Automatic encryption (to allow the ``find_one`` to automatically decrypt) is configured +using an ``encrypted_fields`` mapping, as demonstrated by the following example:: + + import os + + from pymongo import MongoClient + from pymongo.encryption import (Algorithm, AutoEncryptionOpts, + ClientEncryption, QueryType) + + + def main(): + # This must be the same master key that was used to create + # the encryption key. + local_master_key = os.urandom(96) + kms_providers = {"local": {"key": local_master_key}} + + # The MongoDB namespace (db.collection) used to store + # the encryption data keys. + key_vault_namespace = "encryption.__pymongoTestKeyVault" + key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) + + # Set up the key vault (key_vault_namespace) for this example. + client = MongoClient() + key_vault = client[key_vault_db_name][key_vault_coll_name] + + # Ensure that two data keys cannot share the same keyAltName. + key_vault.drop() + key_vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}) + + client_encryption = ClientEncryption( + kms_providers, + key_vault_namespace, + # The MongoClient to use for reading/writing to the key vault. + # This can be the same MongoClient used by the main application. + client, + # The CodecOptions class used for encrypting and decrypting. + # This should be the same CodecOptions instance you have configured + # on MongoClient, Database, or Collection. + client.codec_options) + + # Create a new data key for the encryptedField. + indexed_key_id = client_encryption.create_data_key( + 'local') + unindexed_key_id = client_encryption.create_data_key( + 'local') + + encrypted_fields = { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": indexed_key_id, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality" + } + }, + { + "keyId": unindexed_key_id, + "path": "encryptedUnindexed", + "bsonType": "string", + } + ] + } + + opts = AutoEncryptionOpts( + {"local": {"key": local_master_key}}, + key_vault.full_name, + bypass_query_analysis=True, + key_vault_client=client, + ) + + # The MongoClient used to read/write application data. + encrypted_client = MongoClient(auto_encryption_opts=opts) + encrypted_client.drop_database("test") + db = encrypted_client.test + + # Create the collection with encrypted fields. + coll = db.create_collection("coll", encrypted_fields=encrypted_fields) + + # Create and encrypt an indexed and unindexed value. + val = "encrypted indexed value" + unindexed_val = "encrypted unindexed value" + insert_payload_indexed = client_encryption.encrypt(val, Algorithm.INDEXED, indexed_key_id) + insert_payload_unindexed = client_encryption.encrypt(unindexed_val, Algorithm.UNINDEXED, + unindexed_key_id) + + # Insert the payloads. + coll.insert_one({ + "encryptedIndexed": insert_payload_indexed, + "encryptedUnindexed": insert_payload_unindexed + }) + + # Encrypt our find payload using QueryType.EQUALITY. + # The value of "data_key_id" must be the same as used to encrypt the values + # above. + find_payload = client_encryption.encrypt( + val, Algorithm.INDEXED, indexed_key_id, query_type=QueryType.EQUALITY + ) + + # Find the document we inserted using the encrypted payload. + # The returned document is automatically decrypted. + doc = coll.find_one({"encryptedIndexed": find_payload}) + print('Returned document: %s' % (doc,)) + + # Cleanup resources. + client_encryption.close() + encrypted_client.close() + + + if __name__ == "__main__": + main() + .. _explicit-client-side-encryption: Explicit Encryption From f45f00b4e53fd92702b50757b9cbcf9a4458f6cb Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 15 Jun 2022 13:16:07 -0700 Subject: [PATCH 0177/1588] PYTHON-3300 Fix Explicit Queryable Encryption Example (#975) --- doc/examples/encryption.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 941e1bd029..0e349f48da 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -540,6 +540,7 @@ using an ``encrypted_fields`` mapping, as demonstrated by the following example: # Cleanup resources. client_encryption.close() encrypted_client.close() + client.close() if __name__ == "__main__": From 02a9df69f66cf34e3d3858e49152af2ad2c88bfd Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 15 Jun 2022 13:16:22 -0700 Subject: [PATCH 0178/1588] PYTHON-3227 Clustered Indexes for all Collections (#971) --- pymongo/collection.py | 6 +- pymongo/database.py | 66 ++++--- .../clustered-indexes.json | 177 ++++++++++++++++++ test/test_encryption.py | 4 +- test/unified_format.py | 2 +- test/utils.py | 2 - 6 files changed, 223 insertions(+), 34 deletions(-) create mode 100644 test/collection_management/clustered-indexes.json diff --git a/pymongo/collection.py b/pymongo/collection.py index b43e06c2a4..afaef480cc 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -117,7 +117,6 @@ def __init__( read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, timeout: Optional[float] = None, - encrypted_fields: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> None: """Get / create a Mongo collection. @@ -159,13 +158,11 @@ def __init__( - `session` (optional): a :class:`~pymongo.client_session.ClientSession` that is used with the create collection command - - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for - Queryable Encryption. If provided it will be passed to the create collection command. - `**kwargs` (optional): additional keyword arguments will be passed as options for the create collection command .. versionchanged:: 4.2 - Added ``encrypted_fields`` parameter. + Added the ``clusteredIndex`` and ``encryptedFields`` parameters. .. versionchanged:: 4.0 Removed the reindex, map_reduce, inline_map_reduce, @@ -222,6 +219,7 @@ def __init__( self.__database: Database[_DocumentType] = database self.__name = name self.__full_name = "%s.%s" % (self.__database.name, self.__name) + encrypted_fields = kwargs.pop("encryptedFields", None) if create or kwargs or collation: if encrypted_fields: common.validate_is_mapping("encrypted_fields", encrypted_fields) diff --git a/pymongo/database.py b/pymongo/database.py index f764ade522..d3746b0c55 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -304,7 +304,6 @@ def create_collection( read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, timeout: Optional[float] = None, - encrypted_fields: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> Collection[_DocumentType]: """Create a new :class:`~pymongo.collection.Collection` in this @@ -336,28 +335,6 @@ def create_collection( :class:`~pymongo.collation.Collation`. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for - Queryable Encryption. For example:: - - { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), - "bsonType": "string", - "queries": {"queryType": "equality"} - }, - { - "path": "ssn", - "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), - "bsonType": "string" - } - ] - - } } - `**kwargs` (optional): additional keyword arguments will be passed as options for the `create collection command`_ @@ -389,11 +366,42 @@ def create_collection( - ``pipeline`` (list): a list of aggregation pipeline stages - ``comment`` (str): a user-provided comment to attach to this command. This option is only supported on MongoDB >= 4.4. + - ``encryptedFields`` (dict): **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + - ``clusteredIndex`` (dict): Document that specifies the clustered index + configuration. It must have the following form:: + + { + // key pattern must be {_id: 1} + key: , // required + unique: , // required, must be ‘true’ + name: , // optional, otherwise automatically generated + v: , // optional, must be ‘2’ if provided + } - ``changeStreamPreAndPostImages`` (dict): a document with a boolean field ``enabled`` for enabling pre- and post-images. .. versionchanged:: 4.2 - Added ``encrypted_fields`` parameter. + Added the ``clusteredIndex`` and ``encryptedFields`` parameters. .. versionchanged:: 3.11 This method is now supported inside multi-document transactions @@ -411,6 +419,7 @@ def create_collection( .. _create collection command: https://mongodb.com/docs/manual/reference/command/create """ + encrypted_fields = kwargs.get("encryptedFields") if ( not encrypted_fields and self.client.options.auto_encryption_opts @@ -419,8 +428,14 @@ def create_collection( encrypted_fields = self.client.options.auto_encryption_opts._encrypted_fields_map.get( "%s.%s" % (self.name, name) ) + kwargs["encryptedFields"] = encrypted_fields + if encrypted_fields: - common.validate_is_mapping("encrypted_fields", encrypted_fields) + common.validate_is_mapping("encryptedFields", encrypted_fields) + + clustered_index = kwargs.get("clusteredIndex") + if clustered_index: + common.validate_is_mapping("clusteredIndex", clustered_index) with self.__client._tmp_session(session) as s: # Skip this check in a transaction where listCollections is not @@ -439,7 +454,6 @@ def create_collection( read_concern, session=s, timeout=timeout, - encrypted_fields=encrypted_fields, **kwargs, ) diff --git a/test/collection_management/clustered-indexes.json b/test/collection_management/clustered-indexes.json new file mode 100644 index 0000000000..739d0fd8b6 --- /dev/null +++ b/test/collection_management/clustered-indexes.json @@ -0,0 +1,177 @@ +{ + "description": "clustered-indexes", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "5.3", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "ts-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "ts-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "createCollection with clusteredIndex", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "ts-tests", + "collectionName": "test" + } + } + ] + }, + { + "description": "listCollections includes clusteredIndex", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + } + }, + { + "name": "listCollections", + "object": "database0", + "arguments": { + "filter": { + "name": { + "$eq": "test" + } + } + }, + "expectResult": [ + { + "name": "test", + "options": { + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index", + "v": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "listIndexes returns the index", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + } + }, + { + "name": "listIndexes", + "object": "collection0", + "expectResult": [ + { + "key": { + "_id": 1 + }, + "name": "test index", + "clustered": true, + "unique": true, + "v": { + "$$type": [ + "int", + "long" + ] + } + } + ] + } + ] + } + ] +} diff --git a/test/test_encryption.py b/test/test_encryption.py index e5a9666d2c..209308aba6 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -658,7 +658,9 @@ def setup_scenario(self, scenario_def): kwargs["codec_options"] = OPTS if not data: kwargs["write_concern"] = wc - db.create_collection(coll_name, **kwargs, encrypted_fields=encrypted_fields) + if encrypted_fields: + kwargs["encryptedFields"] = encrypted_fields + db.create_collection(coll_name, **kwargs) coll = db[coll_name] if data: # Load data. diff --git a/test/unified_format.py b/test/unified_format.py index cb69882b2c..001af4434c 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -996,7 +996,7 @@ def _collectionOperation_count(self, target, *args, **kwargs): def _collectionOperation_listIndexes(self, target, *args, **kwargs): if "batch_size" in kwargs: self.skipTest("PyMongo does not support batch_size for list_indexes") - return target.list_indexes(*args, **kwargs) + return list(target.list_indexes(*args, **kwargs)) def _collectionOperation_listIndexNames(self, target, *args, **kwargs): self.skipTest("PyMongo does not support list_index_names") diff --git a/test/utils.py b/test/utils.py index 1aeb7571ab..7071764b15 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1002,8 +1002,6 @@ def parse_spec_options(opts): if "maxCommitTimeMS" in opts: opts["max_commit_time_ms"] = opts.pop("maxCommitTimeMS") - if "encryptedFields" in opts: - opts["encrypted_fields"] = opts.pop("encryptedFields") if "hint" in opts: hint = opts.pop("hint") if not isinstance(hint, str): From 922e63d6e0b235d13ee81739aceecf0cb92a4dd3 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Thu, 16 Jun 2022 11:40:09 -0700 Subject: [PATCH 0179/1588] PYTHON-3093 Continuation of #972 (#976) --- pymongo/collection.py | 16 +++++++++------- pymongo/database.py | 16 +++++++++------- pymongo/mongo_client.py | 16 +++++++++------- 3 files changed, 27 insertions(+), 21 deletions(-) diff --git a/pymongo/collection.py b/pymongo/collection.py index afaef480cc..0088388624 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2553,13 +2553,15 @@ def watch( pipeline stages are valid after a ``$changeStream`` stage, see the MongoDB documentation on change streams for the supported stages. - `full_document` (optional): The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup'. - When set to 'updateLookup', the change notification for partial - updates will include both a delta describing the changes to the - document, as well as a copy of the entire document that was - changed from some time after the change occurred. - - `full_document_before_change`: Allowed values: `whenAvailable` and `required`. Change events - may now result in a `fullDocumentBeforeChange` response field. + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + - `full_document_before_change`: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. - `resume_after` (optional): A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token diff --git a/pymongo/database.py b/pymongo/database.py index d3746b0c55..fcf1f3e36c 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -593,13 +593,15 @@ def watch( pipeline stages are valid after a ``$changeStream`` stage, see the MongoDB documentation on change streams for the supported stages. - `full_document` (optional): The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup', 'whenAvailable', 'required'. - When set to 'updateLookup', the change notification for partial - updates will include both a delta describing the changes to the - document, as well as a copy of the entire document that was - changed from some time after the change occurred. - - `full_document_before_change`: Allowed values: `whenAvailable` and `required`. Change events - may now result in a `fullDocumentBeforeChange` response field. + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + - `full_document_before_change`: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. - `resume_after` (optional): A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 2a4b8a1d90..4b20c2e5b7 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -918,13 +918,15 @@ def watch( pipeline stages are valid after a ``$changeStream`` stage, see the MongoDB documentation on change streams for the supported stages. - `full_document` (optional): The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup'. - When set to 'updateLookup', the change notification for partial - updates will include both a delta describing the changes to the - document, as well as a copy of the entire document that was - changed from some time after the change occurred. - - `full_document_before_change`: Allowed values: `whenAvailable` and `required`. Change events - may now result in a `fullDocumentBeforeChange` response field. + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + - `full_document_before_change`: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. - `resume_after` (optional): A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token From 4ae93c49378d73e4af127ca65030a517fd814f34 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Thu, 16 Jun 2022 15:26:27 -0700 Subject: [PATCH 0180/1588] PYTHON-1552 Prevent uploading partial or corrupt GridFS files after an error occurs --- doc/migrate-to-pymongo4.rst | 11 +++++------ gridfs/__init__.py | 30 ++++++++++++------------------ gridfs/grid_file.py | 9 +++++++-- test/test_grid_file.py | 16 ++++++++++++++++ test/test_gridfs.py | 2 +- 5 files changed, 41 insertions(+), 27 deletions(-) diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index eca479c7c7..5843a2261b 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -879,12 +879,11 @@ and store it with other file metadata. For example:: import hashlib my_db = MongoClient().test fs = GridFSBucket(my_db) - grid_in = fs.open_upload_stream("test_file") - file_data = b'...' - sha356 = hashlib.sha256(file_data).hexdigest() - grid_in.write(file_data) - grid_in.sha356 = sha356 # Set the custom 'sha356' field - grid_in.close() + with fs.open_upload_stream("test_file") as grid_in: + file_data = b'...' + sha356 = hashlib.sha256(file_data).hexdigest() + grid_in.write(file_data) + grid_in.sha356 = sha356 # Set the custom 'sha356' field Note that for large files, the checksum may need to be computed in chunks to avoid the excessive memory needed to load the entire file at once. diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 5675e8f937..29d582cd21 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -109,11 +109,8 @@ def put(self, data: Any, **kwargs: Any) -> Any: Equivalent to doing:: - try: - f = new_file(**kwargs) + with fs.new_file(**kwargs) as f: f.write(data) - finally: - f.close() `data` can be either an instance of :class:`bytes` or a file-like object providing a :meth:`read` method. If an `encoding` keyword @@ -134,13 +131,10 @@ def put(self, data: Any, **kwargs: Any) -> Any: .. versionchanged:: 3.0 w=0 writes to GridFS are now prohibited. """ - grid_file = GridIn(self.__collection, **kwargs) - try: - grid_file.write(data) - finally: - grid_file.close() - return grid_file._id + with GridIn(self.__collection, **kwargs) as grid_file: + grid_file.write(data) + return grid_file._id def get(self, file_id: Any, session: Optional[ClientSession] = None) -> GridOut: """Get a file from GridFS by ``"_id"``. @@ -528,11 +522,11 @@ def open_upload_stream( my_db = MongoClient().test fs = GridFSBucket(my_db) - grid_in = fs.open_upload_stream( + with fs.open_upload_stream( "test_file", chunk_size_bytes=4, - metadata={"contentType": "text/plain"}) - grid_in.write("data I want to store!") - grid_in.close() # uploaded on close + metadata={"contentType": "text/plain"}) as grid_in: + grid_in.write("data I want to store!") + # uploaded on close Returns an instance of :class:`~gridfs.grid_file.GridIn`. @@ -584,13 +578,13 @@ def open_upload_stream_with_id( my_db = MongoClient().test fs = GridFSBucket(my_db) - grid_in = fs.open_upload_stream_with_id( + with fs.open_upload_stream_with_id( ObjectId(), "test_file", chunk_size_bytes=4, - metadata={"contentType": "text/plain"}) - grid_in.write("data I want to store!") - grid_in.close() # uploaded on close + metadata={"contentType": "text/plain"}) as grid_in: + grid_in.write("data I want to store!") + # uploaded on close Returns an instance of :class:`~gridfs.grid_file.GridIn`. diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 5d63d5c653..cec7d57a22 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -396,9 +396,14 @@ def __enter__(self) -> "GridIn": def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: """Support for the context manager protocol. - Close the file and allow exceptions to propagate. + Close the file if no exceptions occur and allow exceptions to propagate. """ - self.close() + if exc_type is None: + # No exceptions happened. + self.close() + else: + # Something happened, at minimum mark as closed. + object.__setattr__(self, "_closed", True) # propagate exceptions return False diff --git a/test/test_grid_file.py b/test/test_grid_file.py index b9fdeacef7..8b46133a60 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -675,6 +675,22 @@ def test_context_manager(self): with GridOut(self.db.fs, infile._id) as outfile: self.assertEqual(contents, outfile.read()) + def test_exception_file_non_existence(self): + contents = b"Imagine this is some important data..." + + with self.assertRaises(ConnectionError): + with GridIn(self.db.fs, filename="important") as infile: + infile.write(contents) + raise ConnectionError("Test exception") + + # Expectation: File chunks are written, entry in files doesn't appear. + self.assertEqual( + self.db.fs.chunks.count_documents({"files_id": infile._id}), infile._chunk_number + ) + + self.assertIsNone(self.db.fs.files.find_one({"_id": infile._id})) + self.assertTrue(infile.closed) + def test_prechunked_string(self): def write_me(s, chunk_size): buf = BytesIO(s) diff --git a/test/test_gridfs.py b/test/test_gridfs.py index ec88dcd488..35a574a1d9 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -540,7 +540,7 @@ def test_gridfs_secondary_lazy(self): # Connects, doesn't create index. self.assertRaises(NoFile, fs.get_last_version) - self.assertRaises(NotPrimaryError, fs.put, "data") + self.assertRaises(NotPrimaryError, fs.put, "data", encoding="utf-8") if __name__ == "__main__": From 3169f1fe314e448dea7126ba85ac0d2cd7ea836e Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 21 Jun 2022 10:46:49 -0700 Subject: [PATCH 0181/1588] PYTHON-3310 Test Failure - query_type must be str or None, not: (#978) --- pymongo/encryption.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 0a8bf69a38..a49cf7df5a 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -394,7 +394,7 @@ class Algorithm(str, enum.Enum): """ -class QueryType(enum.IntEnum): +class QueryType(str, enum.Enum): """**(BETA)** An enum that defines the supported values for explicit encryption query_type. .. note:: Support for Queryable Encryption is in beta. @@ -403,7 +403,7 @@ class QueryType(enum.IntEnum): .. versionadded:: 4.2 """ - EQUALITY = 1 + EQUALITY = "equality" """Used to encrypt a value for an equality query.""" @@ -599,7 +599,7 @@ def encrypt( key_id: Optional[Binary] = None, key_alt_name: Optional[str] = None, index_key_id: Optional[Binary] = None, - query_type: Optional[int] = None, + query_type: Optional[str] = None, contention_factor: Optional[int] = None, ) -> Binary: """Encrypt a BSON value with a given key and algorithm. @@ -617,7 +617,7 @@ def encrypt( - `key_alt_name`: Identifies a key vault document by 'keyAltName'. - `index_key_id`: **(BETA)** The index key id to use for Queryable Encryption. Must be a :class:`~bson.binary.Binary` with subtype 4 (:attr:`~bson.binary.UUID_SUBTYPE`). - - `query_type` (int): **(BETA)** The query type to execute. See + - `query_type` (str): **(BETA)** The query type to execute. See :class:`QueryType` for valid options. - `contention_factor` (int): **(BETA)** The contention factor to use when the algorithm is :attr:`Algorithm.INDEXED`. From 1f7f46faa2d5a20f2f175499b161155f8bf9ae50 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 21 Jun 2022 15:30:17 -0700 Subject: [PATCH 0182/1588] PYTHON-3282 Add comment option tests for distinct helper (#979) --- test/crud/unified/distinct-comment.json | 178 ++++++++++++++++++++++++ 1 file changed, 178 insertions(+) create mode 100644 test/crud/unified/distinct-comment.json diff --git a/test/crud/unified/distinct-comment.json b/test/crud/unified/distinct-comment.json new file mode 100644 index 0000000000..0669d4f30a --- /dev/null +++ b/test/crud/unified/distinct-comment.json @@ -0,0 +1,178 @@ +{ + "description": "distinct-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "distinct-comment-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "distinct-comment-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "distinct with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.14" + } + ], + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "comment": { + "key": "value" + } + }, + "expectResult": [ 11, 22, 33 ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": {}, + "comment": { + "key": "value" + } + }, + "commandName": "distinct", + "databaseName": "distinct-comment-tests" + } + } + ] + } + ] + }, + { + "description": "distinct with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "comment": "comment" + }, + "expectResult": [ 11, 22, 33 ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": {}, + "comment": "comment" + }, + "commandName": "distinct", + "databaseName": "distinct-comment-tests" + } + } + ] + } + ] + }, + { + "description": "distinct with document comment - pre 4.4, server error", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.13" + } + ], + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "comment": { + "key": "value" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": {}, + "comment": { + "key": "value" + } + }, + "commandName": "distinct", + "databaseName": "distinct-comment-tests" + } + } + ] + } + ] + } + ] +} From ae71872fa97b50fd85ef8efe4b8d0b2a362bd6f7 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 24 Jun 2022 10:52:09 -0700 Subject: [PATCH 0183/1588] PYTHON-3297 Test auto decryption occurs after CommandSucceeded events (#980) --- test/test_encryption.py | 78 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/test/test_encryption.py b/test/test_encryption.py index 209308aba6..f2a02780b3 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -61,6 +61,7 @@ from pymongo.encryption import Algorithm, ClientEncryption, QueryType from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts from pymongo.errors import ( + AutoReconnect, BulkWriteError, ConfigurationError, EncryptionError, @@ -1769,6 +1770,83 @@ def test_case_8(self): self.assertEqual(len(self.topology_listener.results["opened"]), 1) +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#14-decryption-events +class TestDecryptProse(EncryptionIntegrationTest): + def setUp(self): + self.client = client_context.client + self.client.db.drop_collection("decryption_events") + self.client.keyvault.drop_collection("datakeys") + self.client.keyvault.datakeys.create_index( + "keyAltNames", unique=True, partialFilterExpression={"keyAltNames": {"$exists": True}} + ) + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + + self.client_encryption = ClientEncryption( + kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() + ) + keyID = self.client_encryption.create_data_key("local") + self.cipher_text = self.client_encryption.encrypt( + "hello", key_id=keyID, algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + ) + if self.cipher_text[-1] == 0: + self.malformed_cipher_text = self.cipher_text[:-1] + b"1" + else: + self.malformed_cipher_text = self.cipher_text[:-1] + b"0" + self.malformed_cipher_text = Binary(self.malformed_cipher_text, 6) + opts = AutoEncryptionOpts( + key_vault_namespace="keyvault.datakeys", kms_providers=kms_providers_map + ) + self.listener = AllowListEventListener("aggregate") + self.encrypted_client = MongoClient( + auto_encryption_opts=opts, retryReads=False, event_listeners=[self.listener] + ) + self.addCleanup(self.encrypted_client.close) + + def test_01_command_error(self): + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"errorCode": 123, "failCommands": ["aggregate"]}, + } + ): + with self.assertRaises(OperationFailure): + self.encrypted_client.db.decryption_events.aggregate([]) + self.assertEqual(len(self.listener.results["failed"]), 1) + for event in self.listener.results["failed"]: + self.assertEqual(event.failure["code"], 123) + + def test_02_network_error(self): + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"errorCode": 123, "closeConnection": True, "failCommands": ["aggregate"]}, + } + ): + with self.assertRaises(AutoReconnect): + self.encrypted_client.db.decryption_events.aggregate([]) + self.assertEqual(len(self.listener.results["failed"]), 1) + self.assertEqual(self.listener.results["failed"][0].command_name, "aggregate") + + def test_03_decrypt_error(self): + self.encrypted_client.db.decryption_events.insert_one( + {"encrypted": self.malformed_cipher_text} + ) + with self.assertRaises(EncryptionError): + next(self.encrypted_client.db.decryption_events.aggregate([])) + event = self.listener.results["succeeded"][0] + self.assertEqual(len(self.listener.results["failed"]), 0) + self.assertEqual( + event.reply["cursor"]["firstBatch"][0]["encrypted"], self.malformed_cipher_text + ) + + def test_04_decrypt_success(self): + self.encrypted_client.db.decryption_events.insert_one({"encrypted": self.cipher_text}) + next(self.encrypted_client.db.decryption_events.aggregate([])) + event = self.listener.results["succeeded"][0] + self.assertEqual(len(self.listener.results["failed"]), 0) + self.assertEqual(event.reply["cursor"]["firstBatch"][0]["encrypted"], self.cipher_text) + + # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#bypass-spawning-mongocryptd class TestBypassSpawningMongocryptdProse(EncryptionIntegrationTest): @unittest.skipIf( From f2902902613b708c071a7c4a6d78cf23fb0f030b Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 29 Jun 2022 12:18:52 -0700 Subject: [PATCH 0184/1588] PYTHON-3097 Language specific examples for AWS Lambda (#984) --- test/auth_aws/test_auth_aws.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index 750d18c4fe..a63e60718c 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -54,5 +54,37 @@ def test_connect_uri(self): client.get_database().test.find_one() +class TestAWSLambdaExamples(unittest.TestCase): + def test_shared_client(self): + # Start AWS Lambda Example 1 + import os + + from pymongo import MongoClient + + client = MongoClient(host=os.environ["MONGODB_URI"]) + + def lambda_handler(event, context): + return client.db.command("ping") + + # End AWS Lambda Example 1 + + def test_IAM_auth(self): + # Start AWS Lambda Example 2 + import os + + from pymongo import MongoClient + + client = MongoClient( + host=os.environ["MONGODB_URI"], + authSource="$external", + authMechanism="MONGODB-AWS", + ) + + def lambda_handler(event, context): + return client.db.command("ping") + + # End AWS Lambda Example 2 + + if __name__ == "__main__": unittest.main() From 6ed38529e81df9cc39693a269574baeca9a35f4b Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 29 Jun 2022 15:11:13 -0500 Subject: [PATCH 0185/1588] PYTHON-3286 Update expected FLE 2 find payloads in tests (#988) --- test/client-side-encryption/spec/legacy/fle2-Delete.json | 2 +- .../spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json | 2 +- .../spec/legacy/fle2-FindOneAndUpdate.json | 4 ++-- .../spec/legacy/fle2-InsertFind-Indexed.json | 2 +- test/client-side-encryption/spec/legacy/fle2-Update.json | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/test/client-side-encryption/spec/legacy/fle2-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Delete.json index 790e818295..0e3e06396e 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2-Delete.json @@ -225,7 +225,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json index 69abfa7cfb..1d3227ee7f 100644 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json +++ b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json @@ -230,7 +230,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BYkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsSY20AAAAAAAAAAAAA", + "base64": "BbEAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json index b8088515ca..b31438876f 100644 --- a/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json @@ -230,7 +230,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", "subType": "06" } } @@ -490,7 +490,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json index 142cacf2fd..81a549590e 100644 --- a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json +++ b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json @@ -226,7 +226,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BYkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsSY20AAAAAAAAAAAAA", + "base64": "BbEAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2-Update.json b/test/client-side-encryption/spec/legacy/fle2-Update.json index 66a291902a..87830af32d 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2-Update.json @@ -232,7 +232,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", "subType": "06" } } @@ -496,7 +496,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", "subType": "06" } } From bacaf7fa50479b79ceda0ad6f32cbe3ae0d2dec8 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 29 Jun 2022 13:11:25 -0700 Subject: [PATCH 0186/1588] PYTHON-3309 Explicit Queryable Encryption doc example needs to utilize index_key_Id (#986) --- doc/examples/encryption.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 0e349f48da..5c3dc0864b 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -510,12 +510,12 @@ using an ``encrypted_fields`` mapping, as demonstrated by the following example: db = encrypted_client.test # Create the collection with encrypted fields. - coll = db.create_collection("coll", encrypted_fields=encrypted_fields) + coll = db.create_collection("coll", encryptedFields=encrypted_fields) # Create and encrypt an indexed and unindexed value. val = "encrypted indexed value" unindexed_val = "encrypted unindexed value" - insert_payload_indexed = client_encryption.encrypt(val, Algorithm.INDEXED, indexed_key_id) + insert_payload_indexed = client_encryption.encrypt(val, Algorithm.INDEXED, indexed_key_id, contention_factor=1) insert_payload_unindexed = client_encryption.encrypt(unindexed_val, Algorithm.UNINDEXED, unindexed_key_id) @@ -529,7 +529,7 @@ using an ``encrypted_fields`` mapping, as demonstrated by the following example: # The value of "data_key_id" must be the same as used to encrypt the values # above. find_payload = client_encryption.encrypt( - val, Algorithm.INDEXED, indexed_key_id, query_type=QueryType.EQUALITY + val, Algorithm.INDEXED, indexed_key_id, query_type=QueryType.EQUALITY, contention_factor=1 ) # Find the document we inserted using the encrypted payload. From 06310391185cad2af5310fa75e68d17b1f46522a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 29 Jun 2022 16:08:38 -0500 Subject: [PATCH 0187/1588] PYTHON-3319 Require contentionFactor for "Indexed" explicit encryption (#987) --- pymongo/encryption.py | 4 +++- test/test_encryption.py | 13 ++++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index a49cf7df5a..096090e4af 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -620,7 +620,9 @@ def encrypt( - `query_type` (str): **(BETA)** The query type to execute. See :class:`QueryType` for valid options. - `contention_factor` (int): **(BETA)** The contention factor to use - when the algorithm is :attr:`Algorithm.INDEXED`. + when the algorithm is :attr:`Algorithm.INDEXED`. An integer value + *must* be given when the :attr:`Algorithm.INDEXED` algorithm is + used. .. note:: `index_key_id`, `query_type`, and `contention_factor` are part of the Queryable Encryption beta. Backwards-breaking changes may be made before the diff --git a/test/test_encryption.py b/test/test_encryption.py index f2a02780b3..458dd68f32 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2098,13 +2098,15 @@ def setUp(self): def test_01_insert_encrypted_indexed_and_find(self): val = "encrypted indexed value" - insert_payload = self.client_encryption.encrypt(val, Algorithm.INDEXED, self.key1_id) + insert_payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=0 + ) self.encrypted_client[self.db.name].explicit_encryption.insert_one( {"encryptedIndexed": insert_payload} ) find_payload = self.client_encryption.encrypt( - val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 ) docs = list( self.encrypted_client[self.db.name].explicit_encryption.find( @@ -2125,9 +2127,8 @@ def test_02_insert_encrypted_indexed_and_find_contention(self): {"encryptedIndexed": insert_payload} ) - # Find without contention_factor non-deterministically returns 0-9 documents. find_payload = self.client_encryption.encrypt( - val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 ) docs = list( self.encrypted_client[self.db.name].explicit_encryption.find( @@ -2168,7 +2169,9 @@ def test_03_insert_encrypted_unindexed(self): def test_04_roundtrip_encrypted_indexed(self): val = "encrypted indexed value" - payload = self.client_encryption.encrypt(val, Algorithm.INDEXED, self.key1_id) + payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=0 + ) decrypted = self.client_encryption.decrypt(payload) self.assertEqual(decrypted, val) From b37b146ac88fc9647c3effd6e031dbf5cbee3cf5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 30 Jun 2022 12:35:29 -0500 Subject: [PATCH 0188/1588] PYTHON-3053 Key Management API (#958) --- pymongo/encryption.py | 189 +++++++++++++++- setup.py | 2 +- test/__init__.py | 22 ++ .../spec/unified/addKeyAltName.json | 6 +- ... createDataKey-kms_providers-invalid.json} | 17 +- .../{createKey.json => createDataKey.json} | 22 +- .../spec/unified/deleteKey.json | 6 +- .../spec/unified/getKey.json | 6 +- .../spec/unified/getKeyByAltName.json | 6 +- .../spec/unified/getKeys.json | 6 +- .../spec/unified/removeKeyAltName.json | 196 ++++++++++++----- .../spec/unified/rewrapManyDataKey.json | 204 +++++++++++++----- test/test_encryption.py | 92 +++++--- ...ntEncryptionOpts-additionalProperties.json | 30 +++ ...ncryptionOpts-keyVaultClient-required.json | 23 ++ ...entEncryptionOpts-keyVaultClient-type.json | 29 +++ ...yptionOpts-keyVaultNamespace-required.json | 28 +++ ...EncryptionOpts-keyVaultNamespace-type.json | 29 +++ ...pts-kmsProviders-additionalProperties.json | 29 +++ ...kmsProviders-aws-additionalProperties.json | 31 +++ ...tEncryptionOpts-kmsProviders-aws-type.json | 29 +++ ...sProviders-azure-additionalProperties.json | 31 +++ ...ncryptionOpts-kmsProviders-azure-type.json | 29 +++ ...kmsProviders-gcp-additionalProperties.json | 31 +++ ...tEncryptionOpts-kmsProviders-gcp-type.json | 29 +++ ...msProviders-kmip-additionalProperties.json | 31 +++ ...EncryptionOpts-kmsProviders-kmip-type.json | 29 +++ ...sProviders-local-additionalProperties.json | 31 +++ ...ncryptionOpts-kmsProviders-local-type.json | 29 +++ ...tEncryptionOpts-kmsProviders-required.json | 26 +++ ...lientEncryptionOpts-kmsProviders-type.json | 27 +++ ...cryptionOpts-tlsOptions_not_supported.json | 30 +++ ...clientEncryption-additionalProperties.json | 30 +++ ...ryption-clientEncryptionOpts-required.json | 17 ++ ...tEncryption-clientEncryptionOpts-type.json | 18 ++ .../entity-clientEncryption-id-required.json | 28 +++ .../entity-clientEncryption-id-type.json | 29 +++ .../invalid/runOnRequirement-csfle-type.json | 15 ++ ...Providers-missing_aws_kms_credentials.json | 36 ++++ ...oviders-missing_azure_kms_credentials.json | 36 ++++ ...Providers-missing_gcp_kms_credentials.json | 36 ++++ .../valid-fail/kmsProviders-no_kms.json | 32 +++ .../valid-fail/operation-unsupported.json | 22 ++ .../collectionData-createOptions.json | 3 +- ...kmsProviders-explicit_kms_credentials.json | 52 +++++ ...Providers-mixed_kms_credential_fields.json | 54 +++++ ...Providers-placeholder_kms_credentials.json | 70 ++++++ .../kmsProviders-unconfigured_kms.json | 39 ++++ test/unified_format.py | 103 ++++++++- 49 files changed, 1780 insertions(+), 165 deletions(-) rename test/client-side-encryption/spec/unified/{createKey-kms_providers-invalid.json => createDataKey-kms_providers-invalid.json} (86%) rename test/client-side-encryption/spec/unified/{createKey.json => createDataKey.json} (97%) create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-additionalProperties.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-required.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-type.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-required.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-type.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-additionalProperties.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-additionalProperties.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-type.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-additionalProperties.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-type.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-additionalProperties.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-type.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-additionalProperties.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-type.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-additionalProperties.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-type.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-required.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-type.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-tlsOptions_not_supported.json create mode 100644 test/unified-test-format/invalid/entity-clientEncryption-additionalProperties.json create mode 100644 test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-required.json create mode 100644 test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-type.json create mode 100644 test/unified-test-format/invalid/entity-clientEncryption-id-required.json create mode 100644 test/unified-test-format/invalid/entity-clientEncryption-id-type.json create mode 100644 test/unified-test-format/invalid/runOnRequirement-csfle-type.json create mode 100644 test/unified-test-format/valid-fail/kmsProviders-missing_aws_kms_credentials.json create mode 100644 test/unified-test-format/valid-fail/kmsProviders-missing_azure_kms_credentials.json create mode 100644 test/unified-test-format/valid-fail/kmsProviders-missing_gcp_kms_credentials.json create mode 100644 test/unified-test-format/valid-fail/kmsProviders-no_kms.json create mode 100644 test/unified-test-format/valid-fail/operation-unsupported.json create mode 100644 test/unified-test-format/valid-pass/kmsProviders-explicit_kms_credentials.json create mode 100644 test/unified-test-format/valid-pass/kmsProviders-mixed_kms_credential_fields.json create mode 100644 test/unified-test-format/valid-pass/kmsProviders-placeholder_kms_credentials.json create mode 100644 test/unified-test-format/valid-pass/kmsProviders-unconfigured_kms.json diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 096090e4af..b792a4487e 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -17,7 +17,6 @@ import contextlib import enum import socket -import uuid import weakref from typing import Any, Mapping, Optional, Sequence @@ -40,6 +39,7 @@ from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson from bson.son import SON from pymongo import _csot +from pymongo.cursor import Cursor from pymongo.daemon import _spawn_daemon from pymongo.encryption_options import AutoEncryptionOpts from pymongo.errors import ( @@ -50,8 +50,10 @@ ) from pymongo.mongo_client import MongoClient from pymongo.network import BLOCKING_IO_ERRORS +from pymongo.operations import UpdateOne from pymongo.pool import PoolOptions, _configured_socket from pymongo.read_concern import ReadConcern +from pymongo.results import BulkWriteResult, DeleteResult from pymongo.ssl_support import get_ssl_context from pymongo.uri_parser import parse_host from pymongo.write_concern import WriteConcern @@ -60,10 +62,11 @@ _KMS_CONNECT_TIMEOUT = 10 # TODO: CDRIVER-3262 will define this value. _MONGOCRYPTD_TIMEOUT_MS = 10000 + _DATA_KEY_OPTS: CodecOptions = CodecOptions(document_class=SON, uuid_representation=STANDARD) # Use RawBSONDocument codec options to avoid needlessly decoding # documents from the key vault. -_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument, uuid_representation=STANDARD) +_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument) @contextlib.contextmanager @@ -225,11 +228,11 @@ def insert_data_key(self, data_key): """ raw_doc = RawBSONDocument(data_key, _KEY_VAULT_OPTS) data_key_id = raw_doc.get("_id") - if not isinstance(data_key_id, uuid.UUID): - raise TypeError("data_key _id must be a UUID") + if not isinstance(data_key_id, Binary) or data_key_id.subtype != UUID_SUBTYPE: + raise TypeError("data_key _id must be Binary with a UUID subtype") self.key_vault_coll.insert_one(raw_doc) - return Binary(data_key_id.bytes, subtype=UUID_SUBTYPE) + return data_key_id def bson_encode(self, doc): """Encode a document to BSON. @@ -256,6 +259,30 @@ def close(self): self.mongocryptd_client = None +class RewrapManyDataKeyResult(object): + def __init__(self, bulk_write_result: Optional[BulkWriteResult] = None) -> None: + """Result object returned by a ``rewrap_many_data_key`` operation. + + :Parameters: + - `bulk_write_result`: The result of the bulk write operation used to + update the key vault collection with one or more rewrapped data keys. + If ``rewrap_many_data_key()`` does not find any matching keys to + rewrap, no bulk write operation will be executed and this field will + be ``None``. + """ + self._bulk_write_result = bulk_write_result + + @property + def bulk_write_result(self) -> Optional[BulkWriteResult]: + """The result of the bulk write operation used to update the key vault + collection with one or more rewrapped data keys. If + ``rewrap_many_data_key()`` does not find any matching keys to rewrap, + no bulk write operation will be executed and this field will be + ``None``. + """ + return self._bulk_write_result + + class _Encrypter(object): """Encrypts and decrypts MongoDB commands. @@ -514,12 +541,15 @@ def __init__( self._encryption = ExplicitEncrypter( self._io_callbacks, MongoCryptOptions(kms_providers, None) ) + # Use the same key vault collection as the callback. + self._key_vault_coll = self._io_callbacks.key_vault_coll def create_data_key( self, kms_provider: str, master_key: Optional[Mapping[str, Any]] = None, key_alt_names: Optional[Sequence[str]] = None, + key_material: Optional[bytes] = None, ) -> Binary: """Create and insert a new data key into the key vault collection. @@ -580,16 +610,24 @@ def create_data_key( # reference the key with the alternate name client_encryption.encrypt("457-55-5462", keyAltName="name1", algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random) + - `key_material` (optional): Sets the custom key material to be used + by the data key for encryption and decryption. :Returns: The ``_id`` of the created data key document as a :class:`~bson.binary.Binary` with subtype :data:`~bson.binary.UUID_SUBTYPE`. + + .. versionchanged:: 4.2 + Added the `key_material` parameter. """ self._check_closed() with _wrap_encryption_errors(): return self._encryption.create_data_key( - kms_provider, master_key=master_key, key_alt_names=key_alt_names + kms_provider, + master_key=master_key, + key_alt_names=key_alt_names, + key_material=key_material, ) def encrypt( @@ -676,6 +714,145 @@ def decrypt(self, value: Binary) -> Any: decrypted_doc = self._encryption.decrypt(doc) return decode(decrypted_doc, codec_options=self._codec_options)["v"] + def get_key(self, id: Binary) -> Optional[RawBSONDocument]: + """Get a data key by id. + + :Parameters: + - `id` (Binary): The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + + :Returns: + The key document. + """ + self._check_closed() + return self._key_vault_coll.find_one({"_id": id}) + + def get_keys(self) -> Cursor[RawBSONDocument]: + """Get all of the data keys. + + :Returns: + An instance of :class:`~pymongo.cursor.Cursor` over the data key + documents. + """ + self._check_closed() + return self._key_vault_coll.find({}) + + def delete_key(self, id: Binary) -> DeleteResult: + """Delete a key document in the key vault collection that has the given ``key_id``. + + :Parameters: + - `id` (Binary): The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + + :Returns: + The delete result. + """ + self._check_closed() + return self._key_vault_coll.delete_one({"_id": id}) + + def add_key_alt_name(self, id: Binary, key_alt_name: str) -> Any: + """Add ``key_alt_name`` to the set of alternate names in the key document with UUID ``key_id``. + + :Parameters: + - ``id``: The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + - ``key_alt_name``: The key alternate name to add. + + :Returns: + The previous version of the key document. + """ + self._check_closed() + update = {"$addToSet": {"keyAltNames": key_alt_name}} + return self._key_vault_coll.find_one_and_update({"_id": id}, update) + + def get_key_by_alt_name(self, key_alt_name: str) -> Optional[RawBSONDocument]: + """Get a key document in the key vault collection that has the given ``key_alt_name``. + + :Parameters: + - `key_alt_name`: (str): The key alternate name of the key to get. + + :Returns: + The key document. + """ + self._check_closed() + return self._key_vault_coll.find_one({"keyAltNames": key_alt_name}) + + def remove_key_alt_name(self, id: Binary, key_alt_name: str) -> Optional[RawBSONDocument]: + """Remove ``key_alt_name`` from the set of keyAltNames in the key document with UUID ``id``. + + Also removes the ``keyAltNames`` field from the key document if it would otherwise be empty. + + :Parameters: + - ``id``: The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + - ``key_alt_name``: The key alternate name to remove. + + :Returns: + Returns the previous version of the key document. + """ + self._check_closed() + pipeline = [ + { + "$set": { + "keyAltNames": { + "$cond": [ + {"$eq": ["$keyAltNames", [key_alt_name]]}, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": {"$ne": ["$$this", key_alt_name]}, + } + }, + ] + } + } + } + ] + return self._key_vault_coll.find_one_and_update({"_id": id}, pipeline) + + def rewrap_many_data_key( + self, + filter: Mapping[str, Any], + provider: Optional[str] = None, + master_key: Optional[Mapping[str, Any]] = None, + ) -> RewrapManyDataKeyResult: + """Decrypts and encrypts all matching data keys in the key vault with a possibly new `master_key` value. + + :Parameters: + - `filter`: A document used to filter the data keys. + - `provider`: The new KMS provider to use to encrypt the data keys, + or ``None`` to use the current KMS provider(s). + - ``master_key``: The master key fields corresponding to the new KMS + provider when ``provider`` is not ``None``. + + :Returns: + A :class:`RewrapManyDataKeyResult`. + """ + self._check_closed() + with _wrap_encryption_errors(): + raw_result = self._encryption.rewrap_many_data_key(filter, provider, master_key) + if raw_result is None: + return RewrapManyDataKeyResult() + + raw_doc = RawBSONDocument(raw_result, DEFAULT_RAW_BSON_OPTIONS) + replacements = [] + for key in raw_doc["v"]: + update_model = { + "$set": {"keyMaterial": key["keyMaterial"], "masterKey": key["masterKey"]}, + "$currentDate": {"updateDate": True}, + } + op = UpdateOne({"_id": key["_id"]}, update_model) + replacements.append(op) + if not replacements: + return RewrapManyDataKeyResult() + result = self._key_vault_coll.bulk_write(replacements) + return RewrapManyDataKeyResult(result) + def __enter__(self) -> "ClientEncryption": return self diff --git a/setup.py b/setup.py index c6b32b9fba..a61f56c3f6 100755 --- a/setup.py +++ b/setup.py @@ -277,7 +277,7 @@ def build_extension(self, ext): extras_require = { "encryption": [ - "pymongocrypt@git+ssh://git@github.com/mongodb/libmongocrypt.git@pymongocrypt-1.3.0b0#subdirectory=bindings/python" + "pymongocrypt@git+ssh://git@github.com/mongodb/libmongocrypt.git@161dbc8ae#subdirectory=bindings/python" ], "ocsp": pyopenssl_reqs, "snappy": ["python-snappy"], diff --git a/test/__init__.py b/test/__init__.py index 64c812c112..4ecc3c9e9e 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -15,6 +15,7 @@ """Test suite for pymongo, bson, and gridfs. """ +import base64 import gc import os import socket @@ -116,6 +117,27 @@ COMPRESSORS = COMPRESSORS or "zlib" +# Shared KMS data. +LOCAL_MASTER_KEY = base64.b64decode( + b"Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ" + b"5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" +) +AWS_CREDS = { + "accessKeyId": os.environ.get("FLE_AWS_KEY", ""), + "secretAccessKey": os.environ.get("FLE_AWS_SECRET", ""), +} +AZURE_CREDS = { + "tenantId": os.environ.get("FLE_AZURE_TENANTID", ""), + "clientId": os.environ.get("FLE_AZURE_CLIENTID", ""), + "clientSecret": os.environ.get("FLE_AZURE_CLIENTSECRET", ""), +} +GCP_CREDS = { + "email": os.environ.get("FLE_GCP_EMAIL", ""), + "privateKey": os.environ.get("FLE_GCP_PRIVATEKEY", ""), +} +KMIP_CREDS = {"endpoint": os.environ.get("FLE_KMIP_ENDPOINT", "localhost:5698")} + + def is_server_resolvable(): """Returns True if 'server' is resolvable.""" socket_timeout = socket.getdefaulttimeout() diff --git a/test/client-side-encryption/spec/unified/addKeyAltName.json b/test/client-side-encryption/spec/unified/addKeyAltName.json index 7dc371143b..8b6c174cbc 100644 --- a/test/client-side-encryption/spec/unified/addKeyAltName.json +++ b/test/client-side-encryption/spec/unified/addKeyAltName.json @@ -22,7 +22,11 @@ "keyVaultClient": "client0", "keyVaultNamespace": "keyvault.datakeys", "kmsProviders": { - "local": {} + "local": { + "key": { + "$$placeholder": 1 + } + } } } } diff --git a/test/client-side-encryption/spec/unified/createKey-kms_providers-invalid.json b/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json similarity index 86% rename from test/client-side-encryption/spec/unified/createKey-kms_providers-invalid.json rename to test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json index b2c8d83e05..16cf6ca70d 100644 --- a/test/client-side-encryption/spec/unified/createKey-kms_providers-invalid.json +++ b/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json @@ -1,5 +1,5 @@ { - "description": "createKey-provider-invalid", + "description": "createDataKey-provider-invalid", "schemaVersion": "1.8", "runOnRequirements": [ { @@ -24,7 +24,14 @@ "keyVaultClient": "client0", "keyVaultNamespace": "keyvault.datakeys", "kmsProviders": { - "aws": {} + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } } } } @@ -35,7 +42,7 @@ "description": "create data key without required master key fields", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "aws", @@ -59,7 +66,7 @@ "description": "create data key with invalid master key field", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local", @@ -85,7 +92,7 @@ "description": "create data key with invalid master key", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "aws", diff --git a/test/client-side-encryption/spec/unified/createKey.json b/test/client-side-encryption/spec/unified/createDataKey.json similarity index 97% rename from test/client-side-encryption/spec/unified/createKey.json rename to test/client-side-encryption/spec/unified/createDataKey.json index adb3fff20d..110c726f9a 100644 --- a/test/client-side-encryption/spec/unified/createKey.json +++ b/test/client-side-encryption/spec/unified/createDataKey.json @@ -1,5 +1,5 @@ { - "description": "createKey", + "description": "createDataKey", "schemaVersion": "1.8", "runOnRequirements": [ { @@ -90,7 +90,7 @@ "description": "create data key with AWS KMS provider", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "aws", @@ -153,7 +153,7 @@ "description": "create datakey with Azure KMS provider", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "azure", @@ -216,7 +216,7 @@ "description": "create datakey with GCP KMS provider", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "gcp", @@ -283,7 +283,7 @@ "description": "create datakey with KMIP KMS provider", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "kmip" @@ -341,7 +341,7 @@ "description": "create datakey with local KMS provider", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local" @@ -396,7 +396,7 @@ "description": "create datakey with no keyAltName", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local", @@ -457,7 +457,7 @@ "description": "create datakey with single keyAltName", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local", @@ -520,7 +520,7 @@ "description": "create datakey with multiple keyAltNames", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local", @@ -619,7 +619,7 @@ "description": "create datakey with custom key material", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local", @@ -682,7 +682,7 @@ "description": "create datakey with invalid custom key material (too short)", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local", diff --git a/test/client-side-encryption/spec/unified/deleteKey.json b/test/client-side-encryption/spec/unified/deleteKey.json index a3b2f98a50..3a10fb082f 100644 --- a/test/client-side-encryption/spec/unified/deleteKey.json +++ b/test/client-side-encryption/spec/unified/deleteKey.json @@ -22,7 +22,11 @@ "keyVaultClient": "client0", "keyVaultNamespace": "keyvault.datakeys", "kmsProviders": { - "local": {} + "local": { + "key": { + "$$placeholder": 1 + } + } } } } diff --git a/test/client-side-encryption/spec/unified/getKey.json b/test/client-side-encryption/spec/unified/getKey.json index f2f2c68113..6a7269b2ca 100644 --- a/test/client-side-encryption/spec/unified/getKey.json +++ b/test/client-side-encryption/spec/unified/getKey.json @@ -22,7 +22,11 @@ "keyVaultClient": "client0", "keyVaultNamespace": "keyvault.datakeys", "kmsProviders": { - "local": {} + "local": { + "key": { + "$$placeholder": 1 + } + } } } } diff --git a/test/client-side-encryption/spec/unified/getKeyByAltName.json b/test/client-side-encryption/spec/unified/getKeyByAltName.json index 18ed2e1943..f94459bbd8 100644 --- a/test/client-side-encryption/spec/unified/getKeyByAltName.json +++ b/test/client-side-encryption/spec/unified/getKeyByAltName.json @@ -22,7 +22,11 @@ "keyVaultClient": "client0", "keyVaultNamespace": "keyvault.datakeys", "kmsProviders": { - "local": {} + "local": { + "key": { + "$$placeholder": 1 + } + } } } } diff --git a/test/client-side-encryption/spec/unified/getKeys.json b/test/client-side-encryption/spec/unified/getKeys.json index bd07af3804..d944712357 100644 --- a/test/client-side-encryption/spec/unified/getKeys.json +++ b/test/client-side-encryption/spec/unified/getKeys.json @@ -87,7 +87,7 @@ "description": "getKeys with single key documents", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local", @@ -160,7 +160,7 @@ "description": "getKeys with many key documents", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local" @@ -170,7 +170,7 @@ } }, { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local" diff --git a/test/client-side-encryption/spec/unified/removeKeyAltName.json b/test/client-side-encryption/spec/unified/removeKeyAltName.json index f94d9b02dc..bef13c87de 100644 --- a/test/client-side-encryption/spec/unified/removeKeyAltName.json +++ b/test/client-side-encryption/spec/unified/removeKeyAltName.json @@ -22,7 +22,11 @@ "keyVaultClient": "client0", "keyVaultNamespace": "keyvault.datakeys", "kmsProviders": { - "local": {} + "local": { + "key": { + "$$placeholder": 1 + } + } } } } @@ -118,11 +122,36 @@ } } }, - "update": { - "$pull": { - "keyAltNames": "does_not_exist" + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "does_not_exist" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "does_not_exist" + ] + } + } + } + ] + } + } } - }, + ], "writeConcern": { "w": "majority" } @@ -239,11 +268,36 @@ } } }, - "update": { - "$pull": { - "keyAltNames": "does_not_exist" + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "does_not_exist" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "does_not_exist" + ] + } + } + } + ] + } + } } - }, + ], "writeConcern": { "w": "majority" } @@ -378,11 +432,36 @@ } } }, - "update": { - "$pull": { - "keyAltNames": "alternate_name" + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "alternate_name" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "alternate_name" + ] + } + } + } + ] + } + } } - }, + ], "writeConcern": { "w": "majority" } @@ -501,11 +580,36 @@ } } }, - "update": { - "$pull": { - "keyAltNames": "alternate_name" + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "alternate_name" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "alternate_name" + ] + } + } + } + ] + } + } } - }, + ], "writeConcern": { "w": "majority" } @@ -525,42 +629,36 @@ } } }, - "update": { - "$pull": { - "keyAltNames": "local_key" - } - }, - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "commandStartedEvent": { - "databaseName": "keyvault", - "command": { - "update": "datakeys", - "updates": [ + "update": [ { - "q": { - "_id": { - "$binary": { - "base64": "bG9jYWxrZXlsb2NhbGtleQ==", - "subType": "04" - } - } - }, - "u": { - "$unset": { - "keyAltNames": true + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "local_key" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "local_key" + ] + } + } + } + ] } } } - ], - "writeConcern": { - "w": "majority" - } + ] } } } diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json index ed7568ca4d..7e3abb1274 100644 --- a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json @@ -266,7 +266,9 @@ } }, "expectResult": { - "bulkWriteResult": {} + "bulkWriteResult": { + "$$exists": false + } } } ], @@ -372,8 +374,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -396,8 +402,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -420,8 +430,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -444,8 +458,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } } ], "writeConcern": { @@ -538,8 +556,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -562,8 +584,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -586,8 +612,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -610,8 +640,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } } ], "writeConcern": { @@ -708,8 +742,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -734,8 +772,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -760,8 +802,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -786,8 +832,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } } ], "writeConcern": { @@ -877,8 +927,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -902,8 +956,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -927,8 +985,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -952,8 +1014,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } } ], "writeConcern": { @@ -1040,8 +1106,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -1062,8 +1132,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -1084,8 +1158,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -1106,8 +1184,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } } ], "writeConcern": { @@ -1262,8 +1344,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -1284,8 +1370,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -1306,8 +1396,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -1328,8 +1422,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -1350,8 +1448,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } } ], "writeConcern": { diff --git a/test/test_encryption.py b/test/test_encryption.py index 458dd68f32..c3ba61d6e4 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -31,14 +31,20 @@ sys.path[0:0] = [""] from test import ( + AWS_CREDS, + AZURE_CREDS, CA_PEM, CLIENT_PEM, + GCP_CREDS, + KMIP_CREDS, + LOCAL_MASTER_KEY, IntegrationTest, PyMongoTestCase, client_context, unittest, ) from test.test_bulk import BulkTestBase +from test.unified_format import generate_test_classes from test.utils import ( AllowListEventListener, OvertCommandListener, @@ -64,6 +70,7 @@ AutoReconnect, BulkWriteError, ConfigurationError, + DuplicateKeyError, EncryptionError, InvalidOperation, OperationFailure, @@ -74,14 +81,13 @@ from pymongo.operations import InsertOne, ReplaceOne, UpdateOne from pymongo.write_concern import WriteConcern +KMS_PROVIDERS = {"local": {"key": b"\x00" * 96}} + def get_client_opts(client): return client._MongoClient__options -KMS_PROVIDERS = {"local": {"key": b"\x00" * 96}} - - class TestAutoEncryptionOpts(PyMongoTestCase): @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") @@ -211,7 +217,7 @@ def assertBinaryUUID(self, val): # Location of JSON test files. BASE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "client-side-encryption") -SPEC_PATH = os.path.join(BASE, "spec", "legacy") +SPEC_PATH = os.path.join(BASE, "spec") OPTS = CodecOptions() @@ -547,11 +553,6 @@ def test_with_statement(self): # Spec tests -AWS_CREDS = { - "accessKeyId": os.environ.get("FLE_AWS_KEY", ""), - "secretAccessKey": os.environ.get("FLE_AWS_SECRET", ""), -} - AWS_TEMP_CREDS = { "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), @@ -562,19 +563,6 @@ def test_with_statement(self): "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), } - -AZURE_CREDS = { - "tenantId": os.environ.get("FLE_AZURE_TENANTID", ""), - "clientId": os.environ.get("FLE_AZURE_CLIENTID", ""), - "clientSecret": os.environ.get("FLE_AZURE_CLIENTSECRET", ""), -} - -GCP_CREDS = { - "email": os.environ.get("FLE_GCP_EMAIL", ""), - "privateKey": os.environ.get("FLE_GCP_PRIVATEKEY", ""), -} - -KMIP = {"endpoint": os.environ.get("FLE_KMIP_ENDPOINT", "localhost:5698")} KMS_TLS_OPTS = {"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}} @@ -611,7 +599,7 @@ def parse_auto_encrypt_opts(self, opts): if not any(AZURE_CREDS.values()): self.skipTest("GCP environment credentials are not set") if "kmip" in kms_providers: - kms_providers["kmip"] = KMIP + kms_providers["kmip"] = KMIP_CREDS opts["kms_tls_options"] = KMS_TLS_OPTS if "key_vault_namespace" not in opts: opts["key_vault_namespace"] = "keyvault.datakeys" @@ -685,21 +673,24 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestSpec, SPEC_PATH) +test_creator = TestCreator(create_test, TestSpec, os.path.join(SPEC_PATH, "legacy")) test_creator.create_tests() -# Prose Tests -LOCAL_MASTER_KEY = base64.b64decode( - b"Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ" - b"5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" -) +if _HAVE_PYMONGOCRYPT: + globals().update( + generate_test_classes( + os.path.join(SPEC_PATH, "unified"), + module=__name__, + ) + ) +# Prose Tests ALL_KMS_PROVIDERS = { "aws": AWS_CREDS, "azure": AZURE_CREDS, "gcp": GCP_CREDS, - "kmip": KMIP, + "kmip": KMIP_CREDS, "local": {"key": LOCAL_MASTER_KEY}, } @@ -1232,7 +1223,12 @@ def setUpClass(cls): super(TestCustomEndpoint, cls).setUpClass() def setUp(self): - kms_providers = {"aws": AWS_CREDS, "azure": AZURE_CREDS, "gcp": GCP_CREDS, "kmip": KMIP} + kms_providers = { + "aws": AWS_CREDS, + "azure": AZURE_CREDS, + "gcp": GCP_CREDS, + "kmip": KMIP_CREDS, + } self.client_encryption = ClientEncryption( kms_providers=kms_providers, key_vault_namespace="keyvault.datakeys", @@ -1409,7 +1405,7 @@ def test_10_kmip_invalid_endpoint(self): self.client_encryption_invalid.create_data_key("kmip", key) def test_11_kmip_master_key_endpoint(self): - key = {"keyId": "1", "endpoint": KMIP["endpoint"]} + key = {"keyId": "1", "endpoint": KMIP_CREDS["endpoint"]} self.run_test_expected_success("kmip", key) # Override invalid endpoint: data_key_id = self.client_encryption_invalid.create_data_key("kmip", master_key=key) @@ -2066,6 +2062,38 @@ def test_04_kmip(self): self.client_encryption_invalid_hostname.create_data_key("kmip") +# https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.rst#unique-index-on-keyaltnames +class TestUniqueIndexOnKeyAltNamesProse(EncryptionIntegrationTest): + def setUp(self): + self.client = client_context.client + self.client.keyvault.drop_collection("datakeys") + self.client.keyvault.datakeys.create_index( + "keyAltNames", unique=True, partialFilterExpression={"keyAltNames": {"$exists": True}} + ) + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + self.client_encryption = ClientEncryption( + kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() + ) + self.def_key_id = self.client_encryption.create_data_key("local", key_alt_names=["def"]) + + def test_01_create_key(self): + self.client_encryption.create_data_key("local", key_alt_names=["abc"]) + with self.assertRaisesRegex(EncryptionError, "E11000 duplicate key error collection"): + self.client_encryption.create_data_key("local", key_alt_names=["abc"]) + with self.assertRaisesRegex(EncryptionError, "E11000 duplicate key error collection"): + self.client_encryption.create_data_key("local", key_alt_names=["def"]) + + def test_02_add_key_alt_name(self): + key_id = self.client_encryption.create_data_key("local") + self.client_encryption.add_key_alt_name(key_id, "abc") + key_doc = self.client_encryption.add_key_alt_name(key_id, "abc") + assert key_doc["keyAltNames"] == ["abc"] + with self.assertRaisesRegex(DuplicateKeyError, "E11000 duplicate key error collection"): + self.client_encryption.add_key_alt_name(key_id, "def") + key_doc = self.client_encryption.add_key_alt_name(self.def_key_id, "def") + assert key_doc["keyAltNames"] == ["def"] + + # https://github.com/mongodb/specifications/blob/d4c9432/source/client-side-encryption/tests/README.rst#explicit-encryption class TestExplicitQueryableEncryption(EncryptionIntegrationTest): @client_context.require_no_standalone diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-additionalProperties.json new file mode 100644 index 0000000000..26d14051a7 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-additionalProperties.json @@ -0,0 +1,30 @@ +{ + "description": "clientEncryptionOpts-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + }, + "invalid": {} + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-required.json b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-required.json new file mode 100644 index 0000000000..c43a2a9125 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-required.json @@ -0,0 +1,23 @@ +{ + "description": "clientEncryptionOpts-keyVaultClient-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-type.json new file mode 100644 index 0000000000..1be9167a40 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-keyVaultClient-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": 0, + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-required.json b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-required.json new file mode 100644 index 0000000000..3f54d89aa7 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-required.json @@ -0,0 +1,28 @@ +{ + "description": "clientEncryptionOpts-keyVaultNamespace-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-type.json new file mode 100644 index 0000000000..53f2f5f086 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-keyVaultNamespace-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": 0, + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-additionalProperties.json new file mode 100644 index 0000000000..cfd979e2b2 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-additionalProperties.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "invalid": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-additionalProperties.json new file mode 100644 index 0000000000..59b273487d --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-aws-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-type.json new file mode 100644 index 0000000000..ffcc85bfcf --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-aws-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-additionalProperties.json new file mode 100644 index 0000000000..1664b79097 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-azure-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "azure": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-type.json new file mode 100644 index 0000000000..5bd50c8078 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-azure-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "azure": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-additionalProperties.json new file mode 100644 index 0000000000..120c088b00 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-gcp-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "gcp": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-type.json new file mode 100644 index 0000000000..1dd1c8a2a3 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-gcp-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "gcp": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-additionalProperties.json new file mode 100644 index 0000000000..22ded20440 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-kmip-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "kmip": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-type.json new file mode 100644 index 0000000000..9b9e74be37 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-kmip-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "kmip": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-additionalProperties.json new file mode 100644 index 0000000000..b93cfe00d1 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-local-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-type.json new file mode 100644 index 0000000000..526ea24831 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-local-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-required.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-required.json new file mode 100644 index 0000000000..b823a67baf --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-required.json @@ -0,0 +1,26 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys" + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-type.json new file mode 100644 index 0000000000..e7a6190b68 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-type.json @@ -0,0 +1,27 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": 0 + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-tlsOptions_not_supported.json b/test/unified-test-format/invalid/clientEncryptionOpts-tlsOptions_not_supported.json new file mode 100644 index 0000000000..3b4972f23d --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-tlsOptions_not_supported.json @@ -0,0 +1,30 @@ +{ + "description": "clientEncryptionOpts-tlsOptions_not_supported", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + }, + "tlsOptions": {} + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-additionalProperties.json b/test/unified-test-format/invalid/entity-clientEncryption-additionalProperties.json new file mode 100644 index 0000000000..77c0a91434 --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-additionalProperties.json @@ -0,0 +1,30 @@ +{ + "description": "entity-clientEncryption-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + }, + "invalid": {} + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-required.json b/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-required.json new file mode 100644 index 0000000000..88e852342a --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-required.json @@ -0,0 +1,17 @@ +{ + "description": "entity-clientEncryption-clientEncryptionOpts-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "clientEncryption": { + "id": "clientEncryption0" + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-type.json b/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-type.json new file mode 100644 index 0000000000..77fb6a362a --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-clientEncryption-clientEncryptionOpts-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": 0 + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-id-required.json b/test/unified-test-format/invalid/entity-clientEncryption-id-required.json new file mode 100644 index 0000000000..464ba7159a --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-id-required.json @@ -0,0 +1,28 @@ +{ + "description": "entity-clientEncryption-id-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-id-type.json b/test/unified-test-format/invalid/entity-clientEncryption-id-type.json new file mode 100644 index 0000000000..a7746657fc --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-id-type.json @@ -0,0 +1,29 @@ +{ + "description": "entity-clientEncryption-id-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": 0, + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-csfle-type.json b/test/unified-test-format/invalid/runOnRequirement-csfle-type.json new file mode 100644 index 0000000000..b48c850d14 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-csfle-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-csfle-type", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": "foo" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/kmsProviders-missing_aws_kms_credentials.json b/test/unified-test-format/valid-fail/kmsProviders-missing_aws_kms_credentials.json new file mode 100644 index 0000000000..e62de80033 --- /dev/null +++ b/test/unified-test-format/valid-fail/kmsProviders-missing_aws_kms_credentials.json @@ -0,0 +1,36 @@ +{ + "description": "kmsProviders-missing_aws_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": "accessKeyId" + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/kmsProviders-missing_azure_kms_credentials.json b/test/unified-test-format/valid-fail/kmsProviders-missing_azure_kms_credentials.json new file mode 100644 index 0000000000..8ef805d0fa --- /dev/null +++ b/test/unified-test-format/valid-fail/kmsProviders-missing_azure_kms_credentials.json @@ -0,0 +1,36 @@ +{ + "description": "kmsProviders-missing_azure_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "azure": { + "tenantId": "tenantId" + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/kmsProviders-missing_gcp_kms_credentials.json b/test/unified-test-format/valid-fail/kmsProviders-missing_gcp_kms_credentials.json new file mode 100644 index 0000000000..c6da1ce58c --- /dev/null +++ b/test/unified-test-format/valid-fail/kmsProviders-missing_gcp_kms_credentials.json @@ -0,0 +1,36 @@ +{ + "description": "kmsProviders-missing_gcp_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "gcp": { + "email": "email" + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/kmsProviders-no_kms.json b/test/unified-test-format/valid-fail/kmsProviders-no_kms.json new file mode 100644 index 0000000000..57499b4eaf --- /dev/null +++ b/test/unified-test-format/valid-fail/kmsProviders-no_kms.json @@ -0,0 +1,32 @@ +{ + "description": "clientEncryptionOpts-no_kms", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": {} + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/operation-unsupported.json b/test/unified-test-format/valid-fail/operation-unsupported.json new file mode 100644 index 0000000000..d8ef5ab1c8 --- /dev/null +++ b/test/unified-test-format/valid-fail/operation-unsupported.json @@ -0,0 +1,22 @@ +{ + "description": "operation-unsupported", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "Unsupported operation", + "operations": [ + { + "name": "unsupportedOperation", + "object": "client0" + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/collectionData-createOptions.json b/test/unified-test-format/valid-pass/collectionData-createOptions.json index 07ab66baa0..df3321a55b 100644 --- a/test/unified-test-format/valid-pass/collectionData-createOptions.json +++ b/test/unified-test-format/valid-pass/collectionData-createOptions.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "3.6" + "minServerVersion": "3.6", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/unified-test-format/valid-pass/kmsProviders-explicit_kms_credentials.json b/test/unified-test-format/valid-pass/kmsProviders-explicit_kms_credentials.json new file mode 100644 index 0000000000..7cc74939eb --- /dev/null +++ b/test/unified-test-format/valid-pass/kmsProviders-explicit_kms_credentials.json @@ -0,0 +1,52 @@ +{ + "description": "kmsProviders-explicit_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": "accessKeyId", + "secretAccessKey": "secretAccessKey" + }, + "azure": { + "tenantId": "tenantId", + "clientId": "clientId", + "clientSecret": "clientSecret" + }, + "gcp": { + "email": "email", + "privateKey": "cHJpdmF0ZUtleQo=" + }, + "kmip": { + "endpoint": "endpoint" + }, + "local": { + "key": "a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5" + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/kmsProviders-mixed_kms_credential_fields.json b/test/unified-test-format/valid-pass/kmsProviders-mixed_kms_credential_fields.json new file mode 100644 index 0000000000..363f2a4576 --- /dev/null +++ b/test/unified-test-format/valid-pass/kmsProviders-mixed_kms_credential_fields.json @@ -0,0 +1,54 @@ +{ + "description": "kmsProviders-mixed_kms_credential_fields", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": "accessKeyId", + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": "tenantId", + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": "email", + "privateKey": { + "$$placeholder": 1 + } + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/kmsProviders-placeholder_kms_credentials.json b/test/unified-test-format/valid-pass/kmsProviders-placeholder_kms_credentials.json new file mode 100644 index 0000000000..3f7721f01d --- /dev/null +++ b/test/unified-test-format/valid-pass/kmsProviders-placeholder_kms_credentials.json @@ -0,0 +1,70 @@ +{ + "description": "kmsProviders-placeholder_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/kmsProviders-unconfigured_kms.json b/test/unified-test-format/valid-pass/kmsProviders-unconfigured_kms.json new file mode 100644 index 0000000000..12ca580941 --- /dev/null +++ b/test/unified-test-format/valid-pass/kmsProviders-unconfigured_kms.json @@ -0,0 +1,39 @@ +{ + "description": "kmsProviders-unconfigured_kms", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {}, + "azure": {}, + "gcp": {}, + "kmip": {}, + "local": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "skipReason": "DRIVERS-2280: waiting on driver support for on-demand credentials", + "operations": [] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index 001af4434c..a7d8b533dd 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -26,7 +26,18 @@ import time import types from collections import abc -from test import IntegrationTest, client_context, unittest +from test import ( + AWS_CREDS, + AZURE_CREDS, + CA_PEM, + CLIENT_PEM, + GCP_CREDS, + KMIP_CREDS, + LOCAL_MASTER_KEY, + IntegrationTest, + client_context, + unittest, +) from test.utils import ( CMAPListener, camel_to_snake, @@ -45,6 +56,7 @@ import pymongo from bson import SON, Code, DBRef, Decimal128, Int64, MaxKey, MinKey, json_util from bson.binary import Binary +from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.objectid import ObjectId from bson.regex import RE_TYPE, Regex from gridfs import GridFSBucket @@ -53,10 +65,13 @@ from pymongo.client_session import ClientSession, TransactionOptions, _TxnState from pymongo.collection import Collection from pymongo.database import Database +from pymongo.encryption import ClientEncryption +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT from pymongo.errors import ( BulkWriteError, ConfigurationError, ConnectionFailure, + EncryptionError, ExecutionTimeout, InvalidOperation, NetworkTimeout, @@ -93,6 +108,27 @@ IS_INTERRUPTED = False +KMS_TLS_OPTS = { + "kmip": { + "tlsCAFile": CA_PEM, + "tlsCertificateKeyFile": CLIENT_PEM, + } +} + + +# Build up a placeholder map. +PLACEHOLDER_MAP = dict() +for (provider_name, provider_data) in [ + ("local", {"key": LOCAL_MASTER_KEY}), + ("aws", AWS_CREDS), + ("azure", AZURE_CREDS), + ("gcp", GCP_CREDS), + ("kmip", KMIP_CREDS), +]: + for (key, value) in provider_data.items(): + placeholder = f"/clientEncryptionOpts/kmsProviders/{provider_name}/{key}" + PLACEHOLDER_MAP[placeholder] = value + def interrupt_loop(): global IS_INTERRUPTED @@ -169,6 +205,12 @@ def is_run_on_requirement_satisfied(requirement): else: auth_satisfied = not client_context.auth_enabled + csfle_satisfied = True + req_csfle = requirement.get("csfle") + if req_csfle is True: + min_version_satisfied = Version.from_string("4.2") <= server_version + csfle_satisfied = _HAVE_PYMONGOCRYPT and min_version_satisfied + return ( topology_satisfied and min_version_satisfied @@ -176,6 +218,7 @@ def is_run_on_requirement_satisfied(requirement): and serverless_satisfied and params_satisfied and auth_satisfied + and csfle_satisfied ) @@ -328,6 +371,19 @@ def __setitem__(self, key, value): self._entities[key] = value + def _handle_placeholders(self, spec: dict, current: dict, path: str) -> Any: + if "$$placeholder" in current: + if path not in PLACEHOLDER_MAP: + raise ValueError(f"Could not find a placeholder value for {path}") + return PLACEHOLDER_MAP[path] + + for key in list(current): + value = current[key] + if isinstance(value, dict): + subpath = f"{path}/{key}" + current[key] = self._handle_placeholders(spec, value, subpath) + return current + def _create_entity(self, entity_spec, uri=None): if len(entity_spec) != 1: self.test.fail( @@ -335,6 +391,7 @@ def _create_entity(self, entity_spec, uri=None): ) entity_type, spec = next(iter(entity_spec.items())) + spec = self._handle_placeholders(spec, spec, "") if entity_type == "client": kwargs: dict = {} observe_events = spec.get("observeEvents", []) @@ -410,6 +467,19 @@ def _create_entity(self, entity_spec, uri=None): elif entity_type == "bucket": # TODO: implement the 'bucket' entity type self.test.skipTest("GridFS is not currently supported (PYTHON-2459)") + elif entity_type == "clientEncryption": + opts = camel_to_snake_args(spec["clientEncryptionOpts"].copy()) + if isinstance(opts["key_vault_client"], str): + opts["key_vault_client"] = self[opts["key_vault_client"]] + self[spec["id"]] = ClientEncryption( + opts["kms_providers"], + opts["key_vault_namespace"], + opts["key_vault_client"], + DEFAULT_CODEC_OPTIONS, + opts.get("kms_tls_options", KMS_TLS_OPTS), + ) + return + self.test.fail("Unable to create entity of unknown type %s" % (entity_type,)) def create_entities_from_spec(self, entity_spec, uri=None): @@ -872,7 +942,7 @@ def process_error(self, exception, spec): # Connection errors are considered client errors. if isinstance(exception, ConnectionFailure): self.assertNotIsInstance(exception, NotPrimaryError) - elif isinstance(exception, (InvalidOperation, ConfigurationError)): + elif isinstance(exception, (InvalidOperation, ConfigurationError, EncryptionError)): pass else: self.assertNotIsInstance(exception, PyMongoError) @@ -1033,6 +1103,33 @@ def _cursor_close(self, target, *args, **kwargs): self.__raise_if_unsupported("close", target, NonLazyCursor) return target.close() + def _clientEncryptionOperation_createDataKey(self, target, *args, **kwargs): + if "opts" in kwargs: + opts = kwargs.pop("opts") + kwargs["master_key"] = opts.get("masterKey") + kwargs["key_alt_names"] = opts.get("keyAltNames") + kwargs["key_material"] = opts.get("keyMaterial") + return target.create_data_key(*args, **kwargs) + + def _clientEncryptionOperation_getKeys(self, target, *args, **kwargs): + return list(target.get_keys(*args, **kwargs)) + + def _clientEncryptionOperation_deleteKey(self, target, *args, **kwargs): + result = target.delete_key(*args, **kwargs) + response = result.raw_result + response["deletedCount"] = result.deleted_count + return response + + def _clientEncryptionOperation_rewrapManyDataKey(self, target, *args, **kwargs): + if "opts" in kwargs: + opts = kwargs.pop("opts") + kwargs["provider"] = opts.get("provider") + kwargs["master_key"] = opts.get("masterKey") + data = target.rewrap_many_data_key(*args, **kwargs) + if data.bulk_write_result: + return dict(bulkWriteResult=parse_bulk_write_result(data.bulk_write_result)) + return dict() + def run_entity_operation(self, spec): target = self.entity_map[spec["object"]] client = target @@ -1075,6 +1172,8 @@ def run_entity_operation(self, spec): client = target._client elif isinstance(target, GridFSBucket): raise NotImplementedError + elif isinstance(target, ClientEncryption): + method_name = "_clientEncryptionOperation_%s" % (opname,) else: method_name = "doesNotExist" From 6d916d68c2db341847b46fabf961f3ad4ba045e4 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 1 Jul 2022 12:36:12 -0700 Subject: [PATCH 0189/1588] PYTHON-3315 Remove index_key_id option from ClientEncryption encrypt method (#989) --- pymongo/encryption.py | 13 +++---------- test/test_encryption.py | 7 ------- 2 files changed, 3 insertions(+), 17 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index b792a4487e..adbdeb9d9f 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -636,7 +636,6 @@ def encrypt( algorithm: str, key_id: Optional[Binary] = None, key_alt_name: Optional[str] = None, - index_key_id: Optional[Binary] = None, query_type: Optional[str] = None, contention_factor: Optional[int] = None, ) -> Binary: @@ -653,8 +652,6 @@ def encrypt( :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - `key_alt_name`: Identifies a key vault document by 'keyAltName'. - - `index_key_id`: **(BETA)** The index key id to use for Queryable Encryption. Must be - a :class:`~bson.binary.Binary` with subtype 4 (:attr:`~bson.binary.UUID_SUBTYPE`). - `query_type` (str): **(BETA)** The query type to execute. See :class:`QueryType` for valid options. - `contention_factor` (int): **(BETA)** The contention factor to use @@ -662,7 +659,7 @@ def encrypt( *must* be given when the :attr:`Algorithm.INDEXED` algorithm is used. - .. note:: `index_key_id`, `query_type`, and `contention_factor` are part of the + .. note:: `query_type` and `contention_factor` are part of the Queryable Encryption beta. Backwards-breaking changes may be made before the final release. @@ -670,17 +667,14 @@ def encrypt( The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. .. versionchanged:: 4.2 - Added the `index_key_id`, `query_type`, and `contention_factor` parameters. + Added the `query_type` and `contention_factor` parameters. + """ self._check_closed() if key_id is not None and not ( isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE ): raise TypeError("key_id must be a bson.binary.Binary with subtype 4") - if index_key_id is not None and not ( - isinstance(index_key_id, Binary) and index_key_id.subtype == UUID_SUBTYPE - ): - raise TypeError("index_key_id must be a bson.binary.Binary with subtype 4") doc = encode({"v": value}, codec_options=self._codec_options) with _wrap_encryption_errors(): @@ -689,7 +683,6 @@ def encrypt( algorithm, key_id=key_id, key_alt_name=key_alt_name, - index_key_id=index_key_id, query_type=query_type, contention_factor=contention_factor, ) diff --git a/test/test_encryption.py b/test/test_encryption.py index c3ba61d6e4..45e78d427a 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -462,13 +462,6 @@ def test_validation(self): with self.assertRaisesRegex(TypeError, msg): client_encryption.encrypt("str", algo, key_id=Binary(b"123")) - msg = "index_key_id must be a bson.binary.Binary with subtype 4" - algo = Algorithm.INDEXED - with self.assertRaisesRegex(TypeError, msg): - client_encryption.encrypt("str", algo, index_key_id=uid) # type: ignore[arg-type] - with self.assertRaisesRegex(TypeError, msg): - client_encryption.encrypt("str", algo, index_key_id=Binary(b"123")) - def test_bson_errors(self): client_encryption = ClientEncryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS From 02de2c93e0b15d635c55321680fe637818017170 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 6 Jul 2022 11:39:07 -0700 Subject: [PATCH 0190/1588] PYTHON-3337 Fix capped collection test on MMAPv1 (#990) --- .../valid-pass/collectionData-createOptions.json | 4 ++-- test/unified_format.py | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/test/unified-test-format/valid-pass/collectionData-createOptions.json b/test/unified-test-format/valid-pass/collectionData-createOptions.json index df3321a55b..64f8fb02ff 100644 --- a/test/unified-test-format/valid-pass/collectionData-createOptions.json +++ b/test/unified-test-format/valid-pass/collectionData-createOptions.json @@ -34,7 +34,7 @@ "databaseName": "database0", "createOptions": { "capped": true, - "size": 512 + "size": 4096 }, "documents": [ { @@ -60,7 +60,7 @@ }, "expectResult": { "capped": true, - "maxSize": 512 + "maxSize": 4096 } } ] diff --git a/test/unified_format.py b/test/unified_format.py index a7d8b533dd..2d223d26d2 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -880,6 +880,10 @@ def maybe_skip_test(self, spec): class_name = self.__class__.__name__.lower() description = spec["description"].lower() if "csot" in class_name: + if client_context.storage_engine == "mmapv1": + self.skipTest( + "MMAPv1 does not support retryable writes which is required for CSOT tests" + ) if "change" in description or "change" in class_name: self.skipTest("CSOT not implemented for watch()") if "cursors" in class_name: From 6acc9f64cff1db85e41796ff7eefce5bec594848 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 6 Jul 2022 11:39:41 -0700 Subject: [PATCH 0191/1588] PYTHON-3333 Fix bug where non-cursor read operations fail in a transaction with directConnection=True on primary (#991) --- pymongo/message.py | 2 ++ pymongo/mongo_client.py | 2 +- test/test_transactions.py | 38 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/pymongo/message.py b/pymongo/message.py index bcdedd7b48..8f37fdc062 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -367,6 +367,8 @@ def as_command(self, sock_info, apply_timeout=False): def get_message(self, read_preference, sock_info, use_cmd=False): """Get a query message, possibly setting the secondaryOk bit.""" + # Use the read_preference decided by _socket_from_server. + self.read_preference = read_preference if read_preference.mode: # Set the secondaryOk bit. flags = self.flags | 4 diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 4b20c2e5b7..bfa22f5458 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1251,7 +1251,7 @@ def _socket_from_server(self, read_preference, server, session): with self._get_socket(server, session) as sock_info: if single: - if sock_info.is_repl: + if sock_info.is_repl and not (session and session.in_transaction): # Use primary preferred to ensure any repl set member # can handle the request. read_preference = ReadPreference.PRIMARY_PREFERRED diff --git a/test/test_transactions.py b/test/test_transactions.py index 136a19baaa..4cee3fa236 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -35,6 +35,8 @@ from gridfs import GridFS, GridFSBucket from pymongo import WriteConcern, client_session from pymongo.client_session import TransactionOptions +from pymongo.command_cursor import CommandCursor +from pymongo.cursor import Cursor from pymongo.errors import ( CollectionInvalid, ConfigurationError, @@ -351,6 +353,42 @@ def test_transaction_starts_with_batched_write(self): self.assertEqual(txn_number, event.command["txnNumber"]) self.assertEqual(48, coll.count_documents({})) + @client_context.require_transactions + def test_transaction_direct_connection(self): + client = single_client() + self.addCleanup(client.close) + coll = client.pymongo_test.test + + # Make sure the collection exists. + coll.insert_one({}) + self.assertEqual(client.topology_description.topology_type_name, "Single") + ops = [ + (coll.bulk_write, [[InsertOne({})]]), + (coll.insert_one, [{}]), + (coll.insert_many, [[{}, {}]]), + (coll.replace_one, [{}, {}]), + (coll.update_one, [{}, {"$set": {"a": 1}}]), + (coll.update_many, [{}, {"$set": {"a": 1}}]), + (coll.delete_one, [{}]), + (coll.delete_many, [{}]), + (coll.find_one_and_replace, [{}, {}]), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}]), + (coll.find_one_and_delete, [{}, {}]), + (coll.find_one, [{}]), + (coll.count_documents, [{}]), + (coll.distinct, ["foo"]), + (coll.aggregate, [[]]), + (coll.find, [{}]), + (coll.aggregate_raw_batches, [[]]), + (coll.find_raw_batches, [{}]), + (coll.database.command, ["find", coll.name]), + ] + for f, args in ops: + with client.start_session() as s, s.start_transaction(): + res = f(*args, session=s) + if isinstance(res, (CommandCursor, Cursor)): + list(res) + class PatchSessionTimeout(object): """Patches the client_session's with_transaction timeout for testing.""" From 256cd002d671d998ccc3e9e594146f706cd8fb56 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 6 Jul 2022 14:23:14 -0500 Subject: [PATCH 0192/1588] PYTHON-3339 Ignore Sourceforge link that is giving 403 Error (#993) --- doc/conf.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index ff330b59a4..1e18eb29bf 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -82,11 +82,14 @@ # Options for link checking # The anchors on the rendered markdown page are created after the fact, -# so this link results in a 404. +# so those link results in a 404. +# wiki.centos.org has been flakey. +# sourceforge.net is giving a 403 error, but is still accessible from the browser. linkcheck_ignore = [ "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check", "https://github.com/mongodb/libmongocrypt/blob/master/bindings/python/README.rst#installing-from-source", r"https://wiki.centos.org/[\w/]*", + r"http://sourceforge.net/", ] # -- Options for extensions ---------------------------------------------------- From 751949a22a5174fb3d08806d4722f07082952adb Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 6 Jul 2022 14:24:06 -0500 Subject: [PATCH 0193/1588] PYTHON-3316 Add Type Check Test for Transactions (#995) --- test/test_mypy.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/test_mypy.py b/test/test_mypy.py index dfdcefbdb3..b320d5d139 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -47,6 +47,7 @@ class Movie(TypedDict): # type: ignore[misc] from pymongo.collection import Collection from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne +from pymongo.read_preferences import ReadPreference TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mypy_fails") @@ -163,6 +164,15 @@ class mydict(Dict[str, Any]): ) self.assertTrue(len(list(result))) + def test_with_transaction(self) -> None: + def execute_transaction(session): + pass + + with self.client.start_session() as session: + return session.with_transaction( + execute_transaction, read_preference=ReadPreference.PRIMARY + ) + class TestDecode(unittest.TestCase): def test_bson_decode(self) -> None: From b8f857d19e69f4c78959909e149c9943068aac01 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 6 Jul 2022 16:59:46 -0500 Subject: [PATCH 0194/1588] PYTHON-3292 Remove ElectionInProgress (216) from ResumableChangeStreamError (#996) --- pymongo/change_stream.py | 1 - test/change_streams/unified/change-streams-errors.json | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index d2d60e25a4..ef3573022d 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -57,7 +57,6 @@ 13388, # StaleConfig 234, # RetryChangeStream 133, # FailedToSatisfyReadPreference - 216, # ElectionInProgress ] ) diff --git a/test/change_streams/unified/change-streams-errors.json b/test/change_streams/unified/change-streams-errors.json index 4a413fce84..04fe8f04f3 100644 --- a/test/change_streams/unified/change-streams-errors.json +++ b/test/change_streams/unified/change-streams-errors.json @@ -187,7 +187,7 @@ "description": "change stream errors on ElectionInProgress", "runOnRequirements": [ { - "minServerVersion": "4.4", + "minServerVersion": "4.2", "topologies": [ "replicaset", "sharded-replicaset", From ff1efd1ab28c56e70a101768bd285055b5e4fd9a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 6 Jul 2022 17:03:58 -0500 Subject: [PATCH 0195/1588] PYTHON-2986 Update serverless testing for load balancer fronting single proxy (#997) --- .evergreen/config.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 721de7cc61..653515279a 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -467,11 +467,11 @@ functions: fi if [ -n "${test_serverless}" ]; then export TEST_SERVERLESS=1 - export MONGODB_URI="${SINGLE_ATLASPROXY_SERVERLESS_URI}" export SERVERLESS_ATLAS_USER="${SERVERLESS_ATLAS_USER}" export SERVERLESS_ATLAS_PASSWORD="${SERVERLESS_ATLAS_PASSWORD}" - export SINGLE_MONGOS_LB_URI="${SINGLE_ATLASPROXY_SERVERLESS_URI}" - export MULTI_MONGOS_LB_URI="${MULTI_ATLASPROXY_SERVERLESS_URI}" + export MONGODB_URI="${SERVERLESS_URI}" + export SINGLE_MONGOS_LB_URI="${MONGODB_URI}" + export MULTI_MONGOS_LB_URI="${MONGODB_URI}" fi PYTHON_BINARY=${PYTHON_BINARY} \ From c09af5876dcc6325e54116ca4af9377f2676e0ba Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 6 Jul 2022 18:29:28 -0500 Subject: [PATCH 0196/1588] PYTHON-3338 Add versionadded to docs for key management APIs (#992) --- pymongo/encryption.py | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index adbdeb9d9f..9fef5963a6 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -260,23 +260,19 @@ def close(self): class RewrapManyDataKeyResult(object): - def __init__(self, bulk_write_result: Optional[BulkWriteResult] = None) -> None: - """Result object returned by a ``rewrap_many_data_key`` operation. + """Result object returned by a :meth:`~ClientEncryption.rewrap_many_data_key` operation. - :Parameters: - - `bulk_write_result`: The result of the bulk write operation used to - update the key vault collection with one or more rewrapped data keys. - If ``rewrap_many_data_key()`` does not find any matching keys to - rewrap, no bulk write operation will be executed and this field will - be ``None``. - """ + .. versionadded:: 4.2 + """ + + def __init__(self, bulk_write_result: Optional[BulkWriteResult] = None) -> None: self._bulk_write_result = bulk_write_result @property def bulk_write_result(self) -> Optional[BulkWriteResult]: """The result of the bulk write operation used to update the key vault collection with one or more rewrapped data keys. If - ``rewrap_many_data_key()`` does not find any matching keys to rewrap, + :meth:`~ClientEncryption.rewrap_many_data_key` does not find any matching keys to rewrap, no bulk write operation will be executed and this field will be ``None``. """ @@ -717,6 +713,8 @@ def get_key(self, id: Binary) -> Optional[RawBSONDocument]: :Returns: The key document. + + .. versionadded:: 4.2 """ self._check_closed() return self._key_vault_coll.find_one({"_id": id}) @@ -727,6 +725,8 @@ def get_keys(self) -> Cursor[RawBSONDocument]: :Returns: An instance of :class:`~pymongo.cursor.Cursor` over the data key documents. + + .. versionadded:: 4.2 """ self._check_closed() return self._key_vault_coll.find({}) @@ -741,6 +741,8 @@ def delete_key(self, id: Binary) -> DeleteResult: :Returns: The delete result. + + .. versionadded:: 4.2 """ self._check_closed() return self._key_vault_coll.delete_one({"_id": id}) @@ -756,6 +758,8 @@ def add_key_alt_name(self, id: Binary, key_alt_name: str) -> Any: :Returns: The previous version of the key document. + + .. versionadded:: 4.2 """ self._check_closed() update = {"$addToSet": {"keyAltNames": key_alt_name}} @@ -769,6 +773,8 @@ def get_key_by_alt_name(self, key_alt_name: str) -> Optional[RawBSONDocument]: :Returns: The key document. + + .. versionadded:: 4.2 """ self._check_closed() return self._key_vault_coll.find_one({"keyAltNames": key_alt_name}) @@ -786,6 +792,8 @@ def remove_key_alt_name(self, id: Binary, key_alt_name: str) -> Optional[RawBSON :Returns: Returns the previous version of the key document. + + .. versionadded:: 4.2 """ self._check_closed() pipeline = [ @@ -825,6 +833,8 @@ def rewrap_many_data_key( :Returns: A :class:`RewrapManyDataKeyResult`. + + .. versionadded:: 4.2 """ self._check_closed() with _wrap_encryption_errors(): From b40f13bf7d09d08ab7398c5da7b9f36420206f02 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 6 Jul 2022 18:30:00 -0500 Subject: [PATCH 0197/1588] PYTHON-3311 Module "pymongo" does not explicitly export attribute "MongoClient"; implicit reexport disabled (#994) --- bson/__init__.py | 62 +++++++++++++++++++++++++++++++- bson/objectid.py | 5 ++- gridfs/__init__.py | 10 ++++++ pymongo/__init__.py | 46 ++++++++++++++++++------ pymongo/database.py | 7 ++-- test/test_default_exports.py | 70 ++++++++++++++++++++++++++++++++++++ test/test_mypy.py | 3 +- 7 files changed, 184 insertions(+), 19 deletions(-) create mode 100644 test/test_default_exports.py diff --git a/bson/__init__.py b/bson/__init__.py index 70aa6ae86c..cc0850709e 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -86,7 +86,7 @@ cast, ) -from bson.binary import ( # noqa: F401 +from bson.binary import ( ALL_UUID_SUBTYPES, CSHARP_LEGACY, JAVA_LEGACY, @@ -128,6 +128,66 @@ except ImportError: _USE_C = False +__all__ = [ + "ALL_UUID_SUBTYPES", + "CSHARP_LEGACY", + "JAVA_LEGACY", + "OLD_UUID_SUBTYPE", + "STANDARD", + "UUID_SUBTYPE", + "Binary", + "UuidRepresentation", + "Code", + "DEFAULT_CODEC_OPTIONS", + "CodecOptions", + "DBRef", + "Decimal128", + "InvalidBSON", + "InvalidDocument", + "InvalidStringData", + "Int64", + "MaxKey", + "MinKey", + "ObjectId", + "Regex", + "RE_TYPE", + "SON", + "Timestamp", + "utc", + "EPOCH_AWARE", + "EPOCH_NAIVE", + "BSONNUM", + "BSONSTR", + "BSONOBJ", + "BSONARR", + "BSONBIN", + "BSONUND", + "BSONOID", + "BSONBOO", + "BSONDAT", + "BSONNUL", + "BSONRGX", + "BSONREF", + "BSONCOD", + "BSONSYM", + "BSONCWS", + "BSONINT", + "BSONTIM", + "BSONLON", + "BSONDEC", + "BSONMIN", + "BSONMAX", + "get_data_and_view", + "gen_list_name", + "encode", + "decode", + "decode_all", + "decode_iter", + "decode_file_iter", + "is_valid", + "BSON", + "has_c", +] EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0) diff --git a/bson/objectid.py b/bson/objectid.py index c174b47327..4bc0243532 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for working with MongoDB `ObjectIds -`_. +"""Tools for working with MongoDB ObjectIds. """ import binascii @@ -88,7 +87,7 @@ def __init__(self, oid: Optional[Union[str, "ObjectId", bytes]] = None) -> None: :Parameters: - `oid` (optional): a valid ObjectId. - .. seealso:: The MongoDB documentation on `ObjectIds`_. + .. seealso:: The MongoDB documentation on `ObjectIds `_. .. versionchanged:: 3.8 :class:`~bson.objectid.ObjectId` now implements the `ObjectID diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 29d582cd21..08c7e1d2cd 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -42,6 +42,16 @@ from pymongo.read_preferences import _ServerMode from pymongo.write_concern import WriteConcern +__all__ = [ + "GridFS", + "GridFSBucket", + "NoFile", + "DEFAULT_CHUNK_SIZE", + "GridIn", + "GridOut", + "GridOutCursor", +] + class GridFS(object): """An instance of GridFS on top of a single Database.""" diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 30bfc2bdf7..32e8f0f82e 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -16,6 +16,35 @@ from typing import ContextManager, Optional, Tuple, Union +__all__ = [ + "ASCENDING", + "DESCENDING", + "GEO2D", + "GEOSPHERE", + "HASHED", + "TEXT", + "version_tuple", + "get_version_string", + "__version__", + "version", + "ReturnDocument", + "MAX_SUPPORTED_WIRE_VERSION", + "MIN_SUPPORTED_WIRE_VERSION", + "CursorType", + "MongoClient", + "DeleteMany", + "DeleteOne", + "IndexModel", + "InsertOne", + "ReplaceOne", + "UpdateMany", + "UpdateOne", + "ReadPreference", + "WriteConcern", + "has_c", + "timeout", +] + ASCENDING = 1 """Ascending sort order.""" DESCENDING = -1 @@ -70,14 +99,11 @@ def get_version_string() -> str: """Current version of PyMongo.""" from pymongo import _csot -from pymongo.collection import ReturnDocument # noqa: F401 -from pymongo.common import ( # noqa: F401 - MAX_SUPPORTED_WIRE_VERSION, - MIN_SUPPORTED_WIRE_VERSION, -) -from pymongo.cursor import CursorType # noqa: F401 -from pymongo.mongo_client import MongoClient # noqa: F401 -from pymongo.operations import ( # noqa: F401 +from pymongo.collection import ReturnDocument +from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION +from pymongo.cursor import CursorType +from pymongo.mongo_client import MongoClient +from pymongo.operations import ( DeleteMany, DeleteOne, IndexModel, @@ -86,8 +112,8 @@ def get_version_string() -> str: UpdateMany, UpdateOne, ) -from pymongo.read_preferences import ReadPreference # noqa: F401 -from pymongo.write_concern import WriteConcern # noqa: F401 +from pymongo.read_preferences import ReadPreference +from pymongo.write_concern import WriteConcern def has_c() -> bool: diff --git a/pymongo/database.py b/pymongo/database.py index fcf1f3e36c..d182012cd4 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -55,6 +55,7 @@ def _check_name(name): if TYPE_CHECKING: + import bson.codec_options from pymongo.client_session import ClientSession from pymongo.mongo_client import MongoClient from pymongo.read_concern import ReadConcern @@ -699,7 +700,7 @@ def command( check: bool = True, allowable_errors: Optional[Sequence[Union[str, int]]] = None, read_preference: Optional[_ServerMode] = None, - codec_options: "Optional[CodecOptions[_CodecDocumentType]]" = None, + codec_options: "Optional[bson.codec_options.CodecOptions[_CodecDocumentType]]" = None, session: Optional["ClientSession"] = None, comment: Optional[Any] = None, **kwargs: Any, @@ -764,7 +765,7 @@ def command( .. note:: :meth:`command` does **not** obey this Database's :attr:`read_preference` or :attr:`codec_options`. You must use the - `read_preference` and `codec_options` parameters instead. + ``read_preference`` and ``codec_options`` parameters instead. .. note:: :meth:`command` does **not** apply any custom TypeDecoders when decoding the command response. @@ -785,7 +786,7 @@ def command( regular expressions as :class:`~bson.regex.Regex` objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to convert from a BSON regular expression to a Python regular expression object. - Added the `codec_options` parameter. + Added the ``codec_options`` parameter. .. seealso:: The MongoDB documentation on `commands `_. """ diff --git a/test/test_default_exports.py b/test/test_default_exports.py new file mode 100644 index 0000000000..42e5831646 --- /dev/null +++ b/test/test_default_exports.py @@ -0,0 +1,70 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the default exports of the top level packages.""" +import inspect +import unittest + +import bson +import gridfs +import pymongo + +BSON_IGNORE = [] +GRIDFS_IGNORE = [ + "ASCENDING", + "DESCENDING", + "ClientSession", + "Collection", + "ObjectId", + "validate_string", + "Database", + "ConfigurationError", + "WriteConcern", +] +PYMONGO_IGNORE = [] +GLOBAL_INGORE = ["TYPE_CHECKING"] + + +class TestDefaultExports(unittest.TestCase): + def check_module(self, mod, ignores): + names = dir(mod) + names.remove("__all__") + for name in mod.__all__: + if name not in names and name not in ignores: + self.fail(f"{name} was included in {mod}.__all__ but is not a valid symbol") + + for name in names: + if name not in mod.__all__ and name not in ignores: + if name in GLOBAL_INGORE: + continue + value = getattr(mod, name) + if inspect.ismodule(value): + continue + if getattr(value, "__module__", None) == "typing": + continue + if not name.startswith("_"): + self.fail(f"{name} was not included in {mod}.__all__") + + def test_pymongo(self): + self.check_module(pymongo, PYMONGO_IGNORE) + + def test_gridfs(self): + self.check_module(gridfs, GRIDFS_IGNORE) + + def test_bson(self): + self.check_module(bson, BSON_IGNORE) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_mypy.py b/test/test_mypy.py index b320d5d139..c692c70789 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -43,9 +43,8 @@ class Movie(TypedDict): # type: ignore[misc] from bson import CodecOptions, decode, decode_all, decode_file_iter, decode_iter, encode from bson.raw_bson import RawBSONDocument from bson.son import SON -from pymongo import ASCENDING +from pymongo import ASCENDING, MongoClient from pymongo.collection import Collection -from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne from pymongo.read_preferences import ReadPreference From b16533951ca40d0b30a28ad4c781b256901ab151 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 7 Jul 2022 16:55:16 -0700 Subject: [PATCH 0198/1588] PYTHON-3345 CSOT use connection handshake RTT for load balanced mode (#998) --- pymongo/mongo_client.py | 10 ++++++++++ pymongo/pool.py | 7 +++++++ test/test_csot.py | 3 +++ 3 files changed, 20 insertions(+) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index bfa22f5458..6d139a238a 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1336,6 +1336,11 @@ def is_retrying(): bulk.started_retryable_write = True while True: + if is_retrying(): + remaining = _csot.remaining() + if remaining is not None and remaining <= 0: + assert last_error is not None + raise last_error try: server = self._select_server(writable_server_selector, session) supports_session = ( @@ -1394,6 +1399,11 @@ def _retryable_read(self, func, read_pref, session, address=None, retryable=True multiple_retries = _csot.get_timeout() is not None while True: + if retrying: + remaining = _csot.remaining() + if remaining is not None and remaining <= 0: + assert last_error is not None + raise last_error try: server = self._select_server(read_pref, session, address=address) with self._socket_from_server(read_pref, server, session) as (sock_info, read_pref): diff --git a/pymongo/pool.py b/pymongo/pool.py index 8a1e72fc0d..f8cc60329b 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -559,6 +559,7 @@ def __init__(self, sock, pool, address, id): self.pinned_cursor = False self.active = False self.last_timeout = self.opts.socket_timeout + self.connect_rtt = 0.0 def set_socket_timeout(self, timeout): """Cache last timeout to avoid duplicate calls to sock.settimeout.""" @@ -580,6 +581,8 @@ def apply_timeout(self, client, cmd, write_concern=None): return None # RTT validation. rtt = _csot.get_rtt() + if rtt is None: + rtt = self.connect_rtt max_time_ms = timeout - rtt if max_time_ms < 0: # CSOT: raise an error without running the command since we know it will time out. @@ -655,7 +658,11 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): else: auth_ctx = None + if performing_handshake: + start = time.monotonic() doc = self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable) + if performing_handshake: + self.connect_rtt = time.monotonic() - start hello = Hello(doc, awaitable=awaitable) self.is_writable = hello.is_writable self.max_wire_version = hello.max_wire_version diff --git a/test/test_csot.py b/test/test_csot.py index 290851159d..4d71973320 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -33,6 +33,9 @@ class TestCSOT(IntegrationTest): + RUN_ON_SERVERLESS = True + RUN_ON_LOAD_BALANCER = True + def test_timeout_nested(self): coll = self.db.coll self.assertEqual(_csot.get_timeout(), None) From b9884f34a963d85d0da7d6dbeaa84f76e32f353a Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 8 Jul 2022 12:30:39 -0700 Subject: [PATCH 0199/1588] Test Failure - crypt_shared FLE tests fail on Windows/macos (#999) --- test/test_encryption.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index 45e78d427a..c75b5f3ebd 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1786,7 +1786,7 @@ def setUp(self): key_vault_namespace="keyvault.datakeys", kms_providers=kms_providers_map ) self.listener = AllowListEventListener("aggregate") - self.encrypted_client = MongoClient( + self.encrypted_client = rs_or_single_client( auto_encryption_opts=opts, retryReads=False, event_listeners=[self.listener] ) self.addCleanup(self.encrypted_client.close) From d2b95d1bf027c17ee1f049c3077354a0ecdcf947 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 8 Jul 2022 19:40:25 -0500 Subject: [PATCH 0200/1588] PYTHON-3336 Test Failure - test_load_balancer failing (#1000) --- .pre-commit-config.yaml | 2 +- CONTRIBUTING.rst | 17 +++++++++++++++++ pymongo/errors.py | 9 +++++++++ pymongo/mongo_client.py | 8 ++++++-- pymongo/monitoring.py | 2 +- pymongo/pool.py | 5 +++-- test/test_cmap.py | 9 +++++++-- test/utils.py | 26 +++++++++++++++++++++++++- 8 files changed, 69 insertions(+), 9 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1fd86e0926..d72d51971c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -56,7 +56,7 @@ repos: rev: 0.11.1 hooks: - id: doc8 - args: [--max-line-length=200] + args: ["--ignore=D001"] # ignore line length stages: [manual] - repo: https://github.com/sirosen/check-jsonschema diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index b8bbad93f6..f44e746888 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -70,6 +70,23 @@ branch and submit a `pull request `_ button. +Running Tests Locally +--------------------- +- Ensure you have started the appropriate Mongo Server(s). +- Run ``python setup.py test`` to run all of the tests. +- Run ``python setup.py test -s test...`` to + run specific tests. You can omit the ```` to test a full class + and the ```` to test a full module. For example: + ``python setup.py test -s test.test_change_stream.TestUnifiedChangeStreamsErrors.test_change_stream_errors_on_ElectionInProgress``. + +Running Load Balancer Tests Locally +----------------------------------- +- Install ``haproxy`` (available as ``brew install haproxy`` on macOS). +- Clone ``drivers-evergreen-tools``: ``git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git``. +- Start the servers using ``LOAD_BALANCER=true TOPOLOGY=sharded_cluster AUTH=noauth SSL=nossl MONGODB_VERSION=6.0 DRIVERS_TOOLS=./drivers-evergreen-tools MONGO_ORCHESTRATION_HOME=./drivers-evergreen-tools/.evergreen/orchestration ./drivers-evergreen-tools/.evergreen/run-orchestration.sh``. +- Start the load balancer using: ``MONGODB_URI='mongodb://localhost:27017,localhost:27018/' .evergreen/run-load-balancer.sh start``. +- Run the tests using: ``LOADBALANCER=1 TEST_LOADBALANCER=1 SINGLE_MONGOS_LB_URI='mongodb://127.0.0.1:8000/?loadBalanced=true' MULTI_MONGOS_LB_URI='mongodb://127.0.0.1:8001/?loadBalanced=true' MONGODB_URI='mongodb://localhost:27017,localhost:27018/' python setup.py test -s test.test_load_balancer``. + Re-sync Spec Tests ------------------ diff --git a/pymongo/errors.py b/pymongo/errors.py index 4a167383ca..a01911c7eb 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -61,6 +61,15 @@ class ConnectionFailure(PyMongoError): """Raised when a connection to the database cannot be made or is lost.""" +class WaitQueueTimeoutError(ConnectionFailure): + """Raised when an operation times out waiting to checkout a connection from the pool. + + Subclass of :exc:`~pymongo.errors.ConnectionFailure`. + + .. versionadded:: 4.2 + """ + + class AutoReconnect(ConnectionFailure): """Raised when a connection to the database is lost and an attempt to auto-reconnect will be made. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 6d139a238a..1defe32536 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -80,6 +80,7 @@ OperationFailure, PyMongoError, ServerSelectionTimeoutError, + WaitQueueTimeoutError, ) from pymongo.pool import ConnectionClosedReason from pymongo.read_preferences import ReadPreference, _ServerMode @@ -1182,6 +1183,7 @@ def _get_socket(self, server, session): with _MongoClientErrorHandler(self, server, session) as err_handler: # Reuse the pinned connection, if it exists. if in_txn and session._pinned_connection: + err_handler.contribute_socket(session._pinned_connection) yield session._pinned_connection return with server.get_socket(handler=err_handler) as sock_info: @@ -2064,9 +2066,11 @@ def _add_retryable_write_error(exc, max_wire_version): if code in helpers._RETRYABLE_ERROR_CODES: exc._add_error_label("RetryableWriteError") - # Connection errors are always retryable except NotPrimaryError which is + # Connection errors are always retryable except NotPrimaryError and WaitQueueTimeoutError which is # handled above. - if isinstance(exc, ConnectionFailure) and not isinstance(exc, NotPrimaryError): + if isinstance(exc, ConnectionFailure) and not isinstance( + exc, (NotPrimaryError, WaitQueueTimeoutError) + ): exc._add_error_label("RetryableWriteError") diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index ad604f3f16..f3f773fbbd 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -1774,7 +1774,7 @@ def publish_connection_check_out_failed(self, address, reason): event = ConnectionCheckOutFailedEvent(address, reason) for subscriber in self.__cmap_listeners: try: - subscriber.connection_check_out_started(event) + subscriber.connection_check_out_failed(event) except Exception: _handle_exception() diff --git a/pymongo/pool.py b/pymongo/pool.py index f8cc60329b..493a544d01 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -52,6 +52,7 @@ NotPrimaryError, OperationFailure, PyMongoError, + WaitQueueTimeoutError, _CertificateError, ) from pymongo.hello import Hello, HelloCompat @@ -1637,7 +1638,7 @@ def _raise_wait_queue_timeout(self) -> NoReturn: timeout = _csot.get_timeout() or self.opts.wait_queue_timeout if self.opts.load_balanced: other_ops = self.active_sockets - self.ncursors - self.ntxns - raise ConnectionFailure( + raise WaitQueueTimeoutError( "Timeout waiting for connection from the connection pool. " "maxPoolSize: %s, connections in use by cursors: %s, " "connections in use by transactions: %s, connections in use " @@ -1650,7 +1651,7 @@ def _raise_wait_queue_timeout(self) -> NoReturn: timeout, ) ) - raise ConnectionFailure( + raise WaitQueueTimeoutError( "Timed out while checking out a connection from connection pool. " "maxPoolSize: %s, timeout: %s" % (self.opts.max_pool_size, timeout) ) diff --git a/test/test_cmap.py b/test/test_cmap.py index a2a1d8d214..360edef0e8 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -38,7 +38,12 @@ from bson.objectid import ObjectId from bson.son import SON -from pymongo.errors import ConnectionFailure, OperationFailure, PyMongoError +from pymongo.errors import ( + ConnectionFailure, + OperationFailure, + PyMongoError, + WaitQueueTimeoutError, +) from pymongo.monitoring import ( ConnectionCheckedInEvent, ConnectionCheckedOutEvent, @@ -73,7 +78,7 @@ "ConnectionPoolClosed": PoolClosedEvent, # Error types. "PoolClosedError": _PoolClosedError, - "WaitQueueTimeoutError": ConnectionFailure, + "WaitQueueTimeoutError": WaitQueueTimeoutError, } diff --git a/test/utils.py b/test/utils.py index 7071764b15..d80bf551df 100644 --- a/test/utils.py +++ b/test/utils.py @@ -38,7 +38,20 @@ from pymongo.cursor import CursorType from pymongo.errors import ConfigurationError, OperationFailure from pymongo.hello import HelloCompat -from pymongo.monitoring import _SENSITIVE_COMMANDS +from pymongo.monitoring import ( + _SENSITIVE_COMMANDS, + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionCreatedEvent, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, +) from pymongo.pool import _CancellationContext, _PoolGeneration from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference @@ -81,36 +94,47 @@ def wait_for_event(self, event, count): class CMAPListener(BaseListener, monitoring.ConnectionPoolListener): def connection_created(self, event): + assert isinstance(event, ConnectionCreatedEvent) self.add_event(event) def connection_ready(self, event): + assert isinstance(event, ConnectionReadyEvent) self.add_event(event) def connection_closed(self, event): + assert isinstance(event, ConnectionClosedEvent) self.add_event(event) def connection_check_out_started(self, event): + assert isinstance(event, ConnectionCheckOutStartedEvent) self.add_event(event) def connection_check_out_failed(self, event): + assert isinstance(event, ConnectionCheckOutFailedEvent) self.add_event(event) def connection_checked_out(self, event): + assert isinstance(event, ConnectionCheckedOutEvent) self.add_event(event) def connection_checked_in(self, event): + assert isinstance(event, ConnectionCheckedInEvent) self.add_event(event) def pool_created(self, event): + assert isinstance(event, PoolCreatedEvent) self.add_event(event) def pool_ready(self, event): + assert isinstance(event, PoolReadyEvent) self.add_event(event) def pool_cleared(self, event): + assert isinstance(event, PoolClearedEvent) self.add_event(event) def pool_closed(self, event): + assert isinstance(event, PoolClosedEvent) self.add_event(event) From 34f3a1585c31b51c31395958ee6eab84b7bc3967 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 11 Jul 2022 16:11:32 -0500 Subject: [PATCH 0201/1588] PYTHON-3349 Don't clear entire load balanced pool when serviceId is unknown (#1001) --- CONTRIBUTING.rst | 6 +++--- pymongo/topology.py | 8 ++++++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index f44e746888..a457b3e4c3 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -83,9 +83,9 @@ Running Load Balancer Tests Locally ----------------------------------- - Install ``haproxy`` (available as ``brew install haproxy`` on macOS). - Clone ``drivers-evergreen-tools``: ``git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git``. -- Start the servers using ``LOAD_BALANCER=true TOPOLOGY=sharded_cluster AUTH=noauth SSL=nossl MONGODB_VERSION=6.0 DRIVERS_TOOLS=./drivers-evergreen-tools MONGO_ORCHESTRATION_HOME=./drivers-evergreen-tools/.evergreen/orchestration ./drivers-evergreen-tools/.evergreen/run-orchestration.sh``. -- Start the load balancer using: ``MONGODB_URI='mongodb://localhost:27017,localhost:27018/' .evergreen/run-load-balancer.sh start``. -- Run the tests using: ``LOADBALANCER=1 TEST_LOADBALANCER=1 SINGLE_MONGOS_LB_URI='mongodb://127.0.0.1:8000/?loadBalanced=true' MULTI_MONGOS_LB_URI='mongodb://127.0.0.1:8001/?loadBalanced=true' MONGODB_URI='mongodb://localhost:27017,localhost:27018/' python setup.py test -s test.test_load_balancer``. +- Start the servers using ``LOAD_BALANCER=true TOPOLOGY=sharded_cluster AUTH=noauth SSL=nossl MONGODB_VERSION=6.0 DRIVERS_TOOLS=$PWD/drivers-evergreen-tools MONGO_ORCHESTRATION_HOME=$PWD/drivers-evergreen-tools/.evergreen/orchestration $PWD/drivers-evergreen-tools/.evergreen/run-orchestration.sh``. +- Start the load balancer using: ``MONGODB_URI='mongodb://localhost:27017,localhost:27018/' $PWD/drivers-evergreen-tools/.evergreen/run-load-balancer.sh start``. +- Run the tests from the ``pymongo`` checkout directory using: ``LOADBALANCER=1 TEST_LOADBALANCER=1 SINGLE_MONGOS_LB_URI='mongodb://127.0.0.1:8000/?loadBalanced=true' MULTI_MONGOS_LB_URI='mongodb://127.0.0.1:8001/?loadBalanced=true' MONGODB_URI='mongodb://localhost:27017,localhost:27018/' python setup.py test -s test.test_load_balancer``. Re-sync Spec Tests ------------------ diff --git a/pymongo/topology.py b/pymongo/topology.py index 4e82a41228..6781a9e549 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -644,6 +644,14 @@ def _handle_error(self, address, err_ctx): error = err_ctx.error exc_type = type(error) service_id = err_ctx.service_id + + # Ignore a handshake error if the server is behind a load balancer but + # the service ID is unknown. This indicates that the error happened + # when dialing the connection or during the MongoDB handshake, so we + # don't know the service ID to use for clearing the pool. + if self._settings.load_balanced and not service_id and not err_ctx.completed_handshake: + return + if issubclass(exc_type, NetworkTimeout) and err_ctx.completed_handshake: # The socket has been closed. Don't reset the server. # Server Discovery And Monitoring Spec: "When an application From 418130d9239c12fb10f2f1f6c6957bcc9cd0df48 Mon Sep 17 00:00:00 2001 From: Tim Gates Date: Wed, 13 Jul 2022 03:00:39 +1000 Subject: [PATCH 0202/1588] docs: Fix a few typos (#1003) --- pymongo/database.py | 2 +- pymongo/ocsp_support.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/database.py b/pymongo/database.py index d182012cd4..0047568199 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -772,7 +772,7 @@ def command( .. note:: If this client has been configured to use MongoDB Stable API (see :ref:`versioned-api-ref`), then :meth:`command` will - automactically add API versioning options to the given command. + automatically add API versioning options to the given command. Explicitly adding API versioning options in the command and declaring an API version on the client is not supported. diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index 94905d9f47..3a201f1f5e 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -217,7 +217,7 @@ def _verify_response(issuer, response): if not res: return 0 - # Note that we are not using a "tolerence period" as discussed in + # Note that we are not using a "tolerance period" as discussed in # https://tools.ietf.org/rfc/rfc5019.txt? now = _datetime.utcnow() # RFC6960, Section 3.2, Number 5 From 135efdd23fd75dbce827d9af6d9cbbf2f84d236e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 14 Jul 2022 14:57:52 -0500 Subject: [PATCH 0203/1588] PYTHON-3153 Update initial DNS seedlist discovery tests to support dedicated load balancer port (#1002) --- pymongo/mongo_client.py | 4 ++-- pymongo/pool.py | 11 +++++++---- .../load-balanced/loadBalanced-directConnection.json | 6 +++--- .../load-balanced/loadBalanced-replicaSet-errors.json | 2 +- .../load-balanced/loadBalanced-true-txt.json | 6 +++--- ...MaxHosts-conflicts_with_loadBalanced-true-txt.json | 2 +- .../load-balanced/srvMaxHosts-zero-txt.json | 6 +++--- test/srv_seedlist/load-balanced/srvMaxHosts-zero.json | 6 +++--- 8 files changed, 23 insertions(+), 20 deletions(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 1defe32536..82fab2891c 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -2102,12 +2102,12 @@ def __init__(self, client, server, session): self.service_id = None self.handled = False - def contribute_socket(self, sock_info): + def contribute_socket(self, sock_info, completed_handshake=True): """Provide socket information to the error handler.""" self.max_wire_version = sock_info.max_wire_version self.sock_generation = sock_info.generation self.service_id = sock_info.service_id - self.completed_handshake = True + self.completed_handshake = completed_handshake def handle(self, exc_type, exc_val): if self.handled or exc_type is None: diff --git a/pymongo/pool.py b/pymongo/pool.py index 493a544d01..ed9feac918 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1344,7 +1344,7 @@ def remove_stale_sockets(self, reference_generation): self.requests -= 1 self.size_cond.notify() - def connect(self): + def connect(self, handler=None): """Connect to Mongo and return a new SocketInfo. Can raise ConnectionFailure. @@ -1378,6 +1378,8 @@ def connect(self): if self.handshake: sock_info.hello() self.is_writable = sock_info.is_writable + if handler: + handler.contribute_socket(sock_info, completed_handshake=False) sock_info.authenticate() except BaseException: @@ -1408,7 +1410,8 @@ def get_socket(self, handler=None): if self.enabled_for_cmap: listeners.publish_connection_check_out_started(self.address) - sock_info = self._get_socket() + sock_info = self._get_socket(handler=handler) + if self.enabled_for_cmap: listeners.publish_connection_checked_out(self.address, sock_info.id) try: @@ -1446,7 +1449,7 @@ def _raise_if_not_ready(self, emit_event): ) _raise_connection_failure(self.address, AutoReconnect("connection pool paused")) - def _get_socket(self): + def _get_socket(self, handler=None): """Get or create a SocketInfo. Can raise ConnectionFailure.""" # We use the pid here to avoid issues with fork / multiprocessing. # See test.test_client:TestClient.test_fork for an example of @@ -1520,7 +1523,7 @@ def _get_socket(self): continue else: # We need to create a new connection try: - sock_info = self.connect() + sock_info = self.connect(handler=handler) finally: with self._max_connecting_cond: self._pending -= 1 diff --git a/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json b/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json index 7f41932bb2..3f500acdc6 100644 --- a/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json +++ b/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json @@ -1,10 +1,10 @@ { - "uri": "mongodb+srv://test20.test.build.10gen.cc/?directConnection=false", + "uri": "mongodb+srv://test24.test.build.10gen.cc/?directConnection=false", "seeds": [ - "localhost.test.build.10gen.cc:27017" + "localhost.test.build.10gen.cc:8000" ], "hosts": [ - "localhost.test.build.10gen.cc:27017" + "localhost.test.build.10gen.cc:8000" ], "options": { "loadBalanced": true, diff --git a/test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json b/test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json index 9ed5ff22c2..2133dee532 100644 --- a/test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json +++ b/test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json @@ -1,5 +1,5 @@ { - "uri": "mongodb+srv://test20.test.build.10gen.cc/?replicaSet=replset", + "uri": "mongodb+srv://test24.test.build.10gen.cc/?replicaSet=replset", "seeds": [], "hosts": [], "error": true, diff --git a/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json b/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json index 0117b3e9cb..f9719e760d 100644 --- a/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json +++ b/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json @@ -1,10 +1,10 @@ { - "uri": "mongodb+srv://test20.test.build.10gen.cc/", + "uri": "mongodb+srv://test24.test.build.10gen.cc/", "seeds": [ - "localhost.test.build.10gen.cc:27017" + "localhost.test.build.10gen.cc:8000" ], "hosts": [ - "localhost.test.build.10gen.cc:27017" + "localhost.test.build.10gen.cc:8000" ], "options": { "loadBalanced": true, diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json b/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json index a7600a8a7b..593a521c26 100644 --- a/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json @@ -1,5 +1,5 @@ { - "uri": "mongodb+srv://test20.test.build.10gen.cc/?srvMaxHosts=1", + "uri": "mongodb+srv://test24.test.build.10gen.cc/?srvMaxHosts=1", "seeds": [], "hosts": [], "error": true, diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json b/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json index 8d48b5bbb9..a18360ea64 100644 --- a/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json @@ -1,10 +1,10 @@ { - "uri": "mongodb+srv://test20.test.build.10gen.cc/?srvMaxHosts=0", + "uri": "mongodb+srv://test24.test.build.10gen.cc/?srvMaxHosts=0", "seeds": [ - "localhost.test.build.10gen.cc:27017" + "localhost.test.build.10gen.cc:8000" ], "hosts": [ - "localhost.test.build.10gen.cc:27017" + "localhost.test.build.10gen.cc:8000" ], "options": { "loadBalanced": true, diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json b/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json index 2382fccf85..bd85418117 100644 --- a/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json @@ -1,10 +1,10 @@ { - "uri": "mongodb+srv://test3.test.build.10gen.cc/?loadBalanced=true&srvMaxHosts=0", + "uri": "mongodb+srv://test23.test.build.10gen.cc/?loadBalanced=true&srvMaxHosts=0", "seeds": [ - "localhost.test.build.10gen.cc:27017" + "localhost.test.build.10gen.cc:8000" ], "hosts": [ - "localhost.test.build.10gen.cc:27017" + "localhost.test.build.10gen.cc:8000" ], "options": { "loadBalanced": true, From 309a7e0b3d8c21fc32ff8af76644d9c01ba916d0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 14 Jul 2022 17:09:42 -0500 Subject: [PATCH 0204/1588] PYTHON-3353 Improve reliability of SDAM heartbeat error spec tests (#1005) --- .../hello-command-error.json | 18 +----------------- .../hello-network-error.json | 2 +- .../hello-timeout.json | 18 +----------------- 3 files changed, 3 insertions(+), 35 deletions(-) diff --git a/test/discovery_and_monitoring_integration/hello-command-error.json b/test/discovery_and_monitoring_integration/hello-command-error.json index 05a93e751c..d3bccd3900 100644 --- a/test/discovery_and_monitoring_integration/hello-command-error.json +++ b/test/discovery_and_monitoring_integration/hello-command-error.json @@ -117,7 +117,7 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 2 + "times": 4 }, "data": { "failCommands": [ @@ -162,22 +162,6 @@ } ] } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } } ], "expectations": [ diff --git a/test/discovery_and_monitoring_integration/hello-network-error.json b/test/discovery_and_monitoring_integration/hello-network-error.json index b699363923..f9761d7556 100644 --- a/test/discovery_and_monitoring_integration/hello-network-error.json +++ b/test/discovery_and_monitoring_integration/hello-network-error.json @@ -116,7 +116,7 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 2 + "times": 4 }, "data": { "failCommands": [ diff --git a/test/discovery_and_monitoring_integration/hello-timeout.json b/test/discovery_and_monitoring_integration/hello-timeout.json index 7bdc61a912..004f8f449d 100644 --- a/test/discovery_and_monitoring_integration/hello-timeout.json +++ b/test/discovery_and_monitoring_integration/hello-timeout.json @@ -117,7 +117,7 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 2 + "times": 4 }, "data": { "failCommands": [ @@ -160,22 +160,6 @@ } ] } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } } ], "expectations": [ From 61add4a1cfe0cb56d3790c63e332456366ad0b3a Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 14 Jul 2022 16:30:52 -0700 Subject: [PATCH 0205/1588] PYTHON-3303 Upgrade encryption testing to macos 10.15+ (#1004) --- .evergreen/config.yml | 10 +++++++++- test/test_encryption.py | 7 +++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 653515279a..ac7f97f6fa 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1843,6 +1843,14 @@ axes: skip_ECS_auth_test: true python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz + - id: macos-1100 + display_name: "macOS 11.00" + run_on: macos-1100 + variables: + skip_EC2_auth_test: true + skip_ECS_auth_test: true + python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - id: rhel62 display_name: "RHEL 6.2 (x86_64)" run_on: rhel62-small @@ -2287,7 +2295,7 @@ buildvariants: - matrix_name: "test-macos-encryption" matrix_spec: platform: - - macos-1014 + - macos-1100 auth: "auth" ssl: "nossl" encryption: "*" diff --git a/test/test_encryption.py b/test/test_encryption.py index c75b5f3ebd..94a588bd6a 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1777,10 +1777,9 @@ def setUp(self): self.cipher_text = self.client_encryption.encrypt( "hello", key_id=keyID, algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic ) - if self.cipher_text[-1] == 0: - self.malformed_cipher_text = self.cipher_text[:-1] + b"1" - else: - self.malformed_cipher_text = self.cipher_text[:-1] + b"0" + self.malformed_cipher_text = self.cipher_text[:-1] + (self.cipher_text[-1] ^ 1).to_bytes( + 1, "big" + ) self.malformed_cipher_text = Binary(self.malformed_cipher_text, 6) opts = AutoEncryptionOpts( key_vault_namespace="keyvault.datakeys", kms_providers=kms_providers_map From bbe364fea84ce181d98755fd2445123243493c61 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 18 Jul 2022 13:06:08 -0500 Subject: [PATCH 0206/1588] PYTHON-3294 Bump minimum pymongocrypt version to 1.3.0 (#1007) --- doc/changelog.rst | 6 ++++-- setup.py | 4 +--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index f074a9d464..4ab1348078 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -25,6 +25,8 @@ Bug fixes Unavoidable breaking changes ............................ +- pymongocrypt 1.3.0 or later is now required for client side field level + encryption support. - :meth:`~pymongo.collection.Collection.estimated_document_count` now always uses the `count`_ command. Due to an oversight in versions 5.0.0-5.0.8 of MongoDB, the count command was not included in V1 of the :ref:`versioned-api-ref`. @@ -317,7 +319,7 @@ Breaking Changes in 4.0 :attr:`~pymongo.mongo_client.MongoClient.address` which can change. - Removed the `disable_md5` parameter for :class:`~gridfs.GridFSBucket` and :class:`~gridfs.GridFS`. See :ref:`removed-gridfs-checksum` for details. -- PyMongoCrypt 1.2.0 or later is now required for client side field level +- pymongocrypt 1.2.0 or later is now required for client side field level encryption support. Notable improvements @@ -356,7 +358,7 @@ Changes in Version 3.12.0 .. warning:: PyMongo now allows insertion of documents with keys that include dots ('.') or start with dollar signs ('$'). -- PyMongoCrypt 1.1.0 or later is now required for client side field level +- pymongocrypt 1.1.0 or later is now required for client side field level encryption support. - Iterating over :class:`gridfs.grid_file.GridOut` now moves through the file line by line instead of chunk by chunk, and does not diff --git a/setup.py b/setup.py index a61f56c3f6..ce6cce712e 100755 --- a/setup.py +++ b/setup.py @@ -276,9 +276,7 @@ def build_extension(self, ext): pyopenssl_reqs.append("certifi") extras_require = { - "encryption": [ - "pymongocrypt@git+ssh://git@github.com/mongodb/libmongocrypt.git@161dbc8ae#subdirectory=bindings/python" - ], + "encryption": ["pymongocrypt>=1.3.0,<2.0.0"], "ocsp": pyopenssl_reqs, "snappy": ["python-snappy"], "zstd": ["zstandard"], From 484374eb3f9f5a44c22a529a28c6bd9b99d93869 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 18 Jul 2022 13:40:16 -0700 Subject: [PATCH 0207/1588] PYTHON-3298 Add flag to create_collection to skip listCollections pre-check (#1006) --- doc/changelog.rst | 3 +++ pymongo/database.py | 11 ++++++++--- test/test_database.py | 15 +++++++++++++++ test/unified_format.py | 13 ++----------- test/utils.py | 10 +--------- test/utils_spec_runner.py | 10 +--------- 6 files changed, 30 insertions(+), 32 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 4ab1348078..5594dd4f74 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -13,6 +13,9 @@ PyMongo 4.2 brings a number of improvements including: changes may be made before the final release. See :ref:`automatic-queryable-client-side-encryption` for example usage. - Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout to an entire block of pymongo operations. +- Added ``check_exists`` option to :meth:`~pymongo.database.Database.create_collection` + that when True (the default) runs an additional ``listCollections`` command to verify that the + collection does not exist already. Bug fixes ......... diff --git a/pymongo/database.py b/pymongo/database.py index 0047568199..665b94cad1 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -305,6 +305,7 @@ def create_collection( read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, timeout: Optional[float] = None, + check_exists: Optional[bool] = True, **kwargs: Any, ) -> Collection[_DocumentType]: """Create a new :class:`~pymongo.collection.Collection` in this @@ -336,6 +337,8 @@ def create_collection( :class:`~pymongo.collation.Collation`. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - ``check_exists`` (optional): if True (the default), send a listCollections command to + check if the collection already exists before creation. - `**kwargs` (optional): additional keyword arguments will be passed as options for the `create collection command`_ @@ -402,7 +405,7 @@ def create_collection( enabling pre- and post-images. .. versionchanged:: 4.2 - Added the ``clusteredIndex`` and ``encryptedFields`` parameters. + Added the ``check_exists``, ``clusteredIndex``, and ``encryptedFields`` parameters. .. versionchanged:: 3.11 This method is now supported inside multi-document transactions @@ -441,8 +444,10 @@ def create_collection( with self.__client._tmp_session(session) as s: # Skip this check in a transaction where listCollections is not # supported. - if (not s or not s.in_transaction) and name in self.list_collection_names( - filter={"name": name}, session=s + if ( + check_exists + and (not s or not s.in_transaction) + and name in self.list_collection_names(filter={"name": name}, session=s) ): raise CollectionInvalid("collection %s already exists" % name) return Collection( diff --git a/test/test_database.py b/test/test_database.py index 58cbe54335..d49ac8324f 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -220,6 +220,21 @@ def test_list_collection_names_filter(self): self.assertIn("nameOnly", command) self.assertTrue(command["nameOnly"]) + def test_check_exists(self): + listener = OvertCommandListener() + results = listener.results + client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + db = client[self.db.name] + db.drop_collection("unique") + db.create_collection("unique", check_exists=True) + self.assertIn("listCollections", listener.started_command_names()) + listener.reset() + db.drop_collection("unique") + db.create_collection("unique", check_exists=False) + self.assertTrue(len(results["started"]) > 0) + self.assertNotIn("listCollections", listener.started_command_names()) + def test_list_collections(self): self.client.drop_database("pymongo_test") db = Database(self.client, "pymongo_test") diff --git a/test/unified_format.py b/test/unified_format.py index 2d223d26d2..200040353d 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -283,7 +283,6 @@ def __init__( self._observe_sensitive_commands = False self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) self._ignore_commands.add("configurefailpoint") - self.ignore_list_collections = False self._event_mapping = collections.defaultdict(list) self.entity_map = entity_map if store_events: @@ -314,10 +313,7 @@ def add_event(self, event): ) def _command_event(self, event): - if not ( - event.command_name.lower() in self._ignore_commands - or (self.ignore_list_collections and event.command_name == "listCollections") - ): + if not event.command_name.lower() in self._ignore_commands: self.add_event(event) def started(self, event): @@ -1032,13 +1028,8 @@ def _databaseOperation_listCollections(self, target, *args, **kwargs): def _databaseOperation_createCollection(self, target, *args, **kwargs): # PYTHON-1936 Ignore the listCollections event from create_collection. - for listener in target.client.options.event_listeners: - if isinstance(listener, EventListenerUtil): - listener.ignore_list_collections = True + kwargs["check_exists"] = False ret = target.create_collection(*args, **kwargs) - for listener in target.client.options.event_listeners: - if isinstance(listener, EventListenerUtil): - listener.ignore_list_collections = False return ret def __entityOperation_aggregate(self, target, *args, **kwargs): diff --git a/test/utils.py b/test/utils.py index d80bf551df..5421d584d1 100644 --- a/test/utils.py +++ b/test/utils.py @@ -202,23 +202,14 @@ class OvertCommandListener(EventListener): ignore_list_collections = False def started(self, event): - if self.ignore_list_collections and event.command_name.lower() == "listcollections": - self.ignore_list_collections = False - return if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).started(event) def succeeded(self, event): - if self.ignore_list_collections and event.command_name.lower() == "listcollections": - self.ignore_list_collections = False - return if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).succeeded(event) def failed(self, event): - if self.ignore_list_collections and event.command_name.lower() == "listcollections": - self.ignore_list_collections = False - return if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).failed(event) @@ -1114,6 +1105,7 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac elif opname == "create_collection": if arg_name == "collection": arguments["name"] = arguments.pop(arg_name) + arguments["check_exists"] = False # Any other arguments to create_collection are passed through # **kwargs. elif opname == "create_index" and arg_name == "keys": diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 498a60220b..f8ad26efe7 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -307,15 +307,7 @@ def run_operation(self, sessions, collection, operation): args.update(arguments) arguments = args - try: - if name == "create_collection" and ( - "encrypted" in operation["arguments"]["name"] - or "plaintext" in operation["arguments"]["name"] - ): - self.listener.ignore_list_collections = True - result = cmd(**dict(arguments)) - finally: - self.listener.ignore_list_collections = False + result = cmd(**dict(arguments)) # Cleanup open change stream cursors. if name == "watch": self.addCleanup(result.close) From c43486101fb37ccfd590499aad04f57943bf75d3 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 18 Jul 2022 15:58:20 -0500 Subject: [PATCH 0208/1588] PYTHON-3291 Add PyMongoError.timeout to identify timeout related errors (#1008) --- doc/changelog.rst | 2 ++ pymongo/__init__.py | 8 +++--- pymongo/errors.py | 56 ++++++++++++++++++++++++++++++++++++++++++ pymongo/helpers.py | 3 ++- test/unified_format.py | 13 ++-------- 5 files changed, 67 insertions(+), 15 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 5594dd4f74..b6b099fd31 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -13,6 +13,8 @@ PyMongo 4.2 brings a number of improvements including: changes may be made before the final release. See :ref:`automatic-queryable-client-side-encryption` for example usage. - Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout to an entire block of pymongo operations. +- Added the :attr:`pymongo.errors.PyMongoError.timeout` property which is ``True`` when + the error was caused by a timeout. - Added ``check_exists`` option to :meth:`~pymongo.database.Database.create_collection` that when True (the default) runs an additional ``listCollections`` command to verify that the collection does not exist already. diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 32e8f0f82e..7eaa793648 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -149,9 +149,11 @@ def timeout(seconds: Optional[float]) -> ContextManager: # The deadline has now expired, the next operation will raise # a timeout exception. client.db.coll2.insert_one({}) - except (ServerSelectionTimeoutError, ExecutionTimeout, WTimeoutError, - NetworkTimeout) as exc: - print(f"block timed out: {exc!r}") + except PyMongoError as exc: + if exc.timeout: + print(f"block timed out: {exc!r}") + else: + print(f"failed with non-timeout error: {exc!r}") When nesting :func:`~pymongo.timeout`, the newly computed deadline is capped to at most the existing deadline. The deadline can only be shortened, not extended. diff --git a/pymongo/errors.py b/pymongo/errors.py index a01911c7eb..efc7e2eca0 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -52,6 +52,14 @@ def _remove_error_label(self, label): """Remove the given label from this error.""" self._error_labels.discard(label) + @property + def timeout(self) -> bool: + """True if this error was caused by a timeout. + + .. versionadded:: 4.2 + """ + return False + class ProtocolError(PyMongoError): """Raised for failures related to the wire protocol.""" @@ -69,6 +77,10 @@ class WaitQueueTimeoutError(ConnectionFailure): .. versionadded:: 4.2 """ + @property + def timeout(self) -> bool: + return True + class AutoReconnect(ConnectionFailure): """Raised when a connection to the database is lost and an attempt to @@ -106,6 +118,10 @@ class NetworkTimeout(AutoReconnect): Subclass of :exc:`~pymongo.errors.AutoReconnect`. """ + @property + def timeout(self) -> bool: + return True + def _format_detailed_error(message, details): if details is not None: @@ -149,6 +165,10 @@ class ServerSelectionTimeoutError(AutoReconnect): Preference that the replica set cannot satisfy. """ + @property + def timeout(self) -> bool: + return True + class ConfigurationError(PyMongoError): """Raised when something is incorrectly configured.""" @@ -199,6 +219,10 @@ def details(self) -> Optional[Mapping[str, Any]]: """ return self.__details + @property + def timeout(self) -> bool: + return self.__code in (50,) + class CursorNotFound(OperationFailure): """Raised while iterating query results if the cursor is @@ -217,6 +241,10 @@ class ExecutionTimeout(OperationFailure): .. versionadded:: 2.7 """ + @property + def timeout(self) -> bool: + return True + class WriteConcernError(OperationFailure): """Base exception type for errors raised due to write concern. @@ -242,11 +270,20 @@ class WTimeoutError(WriteConcernError): .. versionadded:: 2.7 """ + @property + def timeout(self) -> bool: + return True + class DuplicateKeyError(WriteError): """Raised when an insert or update fails due to a duplicate key error.""" +def _wtimeout_error(error: Any) -> bool: + """Return True if this writeConcernError doc is a caused by a timeout.""" + return error.get("code") == 50 or ("errInfo" in error and error["errInfo"].get("wtimeout")) + + class BulkWriteError(OperationFailure): """Exception class for bulk write errors. @@ -261,6 +298,19 @@ def __init__(self, results: Mapping[str, Any]) -> None: def __reduce__(self) -> Tuple[Any, Any]: return self.__class__, (self.details,) + @property + def timeout(self) -> bool: + # Check the last writeConcernError and last writeError to determine if this + # BulkWriteError was caused by a timeout. + wces = self.details.get("writeConcernErrors", []) + if wces and _wtimeout_error(wces[-1]): + return True + + werrs = self.details.get("writeErrors", []) + if werrs and werrs[-1].get("code") == 50: + return True + return False + class InvalidOperation(PyMongoError): """Raised when a client attempts to perform an invalid operation.""" @@ -302,6 +352,12 @@ def cause(self) -> Exception: """The exception that caused this encryption or decryption error.""" return self.__cause + @property + def timeout(self) -> bool: + if isinstance(self.__cause, PyMongoError): + return self.__cause.timeout + return False + class _OperationCancelled(AutoReconnect): """Internal error raised when a socket operation is cancelled.""" diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 60b69424a2..4df8ab8e7a 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -30,6 +30,7 @@ WriteConcernError, WriteError, WTimeoutError, + _wtimeout_error, ) from pymongo.hello import HelloCompat @@ -190,7 +191,7 @@ def _raise_last_write_error(write_errors: List[Any]) -> NoReturn: def _raise_write_concern_error(error: Any) -> NoReturn: - if "errInfo" in error and error["errInfo"].get("wtimeout"): + if _wtimeout_error(error): # Make sure we raise WTimeoutError raise WTimeoutError(error.get("errmsg"), error.get("code"), error) raise WriteConcernError(error.get("errmsg"), error.get("code"), error) diff --git a/test/unified_format.py b/test/unified_format.py index 200040353d..ebeb62ceaa 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -72,13 +72,9 @@ ConfigurationError, ConnectionFailure, EncryptionError, - ExecutionTimeout, InvalidOperation, - NetworkTimeout, NotPrimaryError, PyMongoError, - ServerSelectionTimeoutError, - WriteConcernError, ) from pymongo.monitoring import ( _SENSITIVE_COMMANDS, @@ -948,13 +944,8 @@ def process_error(self, exception, spec): self.assertNotIsInstance(exception, PyMongoError) if is_timeout_error: - # TODO: PYTHON-3291 Implement error transformation. - if isinstance(exception, WriteConcernError): - self.assertEqual(exception.code, 50) - else: - self.assertIsInstance( - exception, (NetworkTimeout, ExecutionTimeout, ServerSelectionTimeoutError) - ) + self.assertIsInstance(exception, PyMongoError) + self.assertTrue(exception.timeout, msg=exception) if error_contains: if isinstance(exception, BulkWriteError): From 5c38676d531432004be9f4ec0fe38df508b67b1f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 18 Jul 2022 19:54:45 -0500 Subject: [PATCH 0209/1588] PYTHON-3359 Remove Database and Collection timeout override (#1009) Remove MongoClient.timeout in favor of client.options.timeout. --- pymongo/_csot.py | 2 +- pymongo/client_options.py | 2 +- pymongo/collection.py | 4 - pymongo/common.py | 15 - pymongo/database.py | 8 - pymongo/mongo_client.py | 4 +- test/csot/command-execution.json | 99 +- test/csot/override-collection-timeoutMS.json | 3498 ------------- test/csot/override-database-timeoutMS.json | 4622 ------------------ test/unified_format.py | 3 +- 10 files changed, 42 insertions(+), 8215 deletions(-) delete mode 100644 test/csot/override-collection-timeoutMS.json delete mode 100644 test/csot/override-database-timeoutMS.json diff --git a/pymongo/_csot.py b/pymongo/_csot.py index ddd4e9233f..6d3cd3c0f9 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -58,7 +58,7 @@ class _TimeoutContext(object): Use :func:`pymongo.timeout` instead:: - with client.timeout(0.5): + with pymongo.timeout(0.5): client.test.test.insert_one({}) """ diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 6784e32848..882474e258 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -265,7 +265,7 @@ def read_concern(self): @property def timeout(self) -> Optional[float]: - """The timeout. + """The configured timeoutMS converted to seconds, or None. ..versionadded: 4.2 """ diff --git a/pymongo/collection.py b/pymongo/collection.py index 0088388624..4aff5c1784 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -116,7 +116,6 @@ def __init__( write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, - timeout: Optional[float] = None, **kwargs: Any, ) -> None: """Get / create a Mongo collection. @@ -201,7 +200,6 @@ def __init__( read_preference or database.read_preference, write_concern or database.write_concern, read_concern or database.read_concern, - timeout if timeout is not None else database.timeout, ) if not isinstance(name, str): raise TypeError("name must be an instance of str") @@ -395,7 +393,6 @@ def with_options( read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, - timeout: Optional[float] = None, ) -> "Collection[_DocumentType]": """Get a clone of this collection changing the specified settings. @@ -434,7 +431,6 @@ def with_options( read_preference or self.read_preference, write_concern or self.write_concern, read_concern or self.read_concern, - timeout=timeout if timeout is not None else self.timeout, ) def bulk_write( diff --git a/pymongo/common.py b/pymongo/common.py index 858684bf05..6ffc97f2a8 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -831,7 +831,6 @@ def __init__( read_preference: _ServerMode, write_concern: WriteConcern, read_concern: ReadConcern, - timeout: Optional[float], ) -> None: if not isinstance(codec_options, CodecOptions): raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") @@ -855,12 +854,6 @@ def __init__( raise TypeError("read_concern must be an instance of pymongo.read_concern.ReadConcern") self.__read_concern = read_concern - if not isinstance(timeout, (int, float, type(None))): - raise TypeError("timeout must be None, an int, or a float") - if timeout and timeout < 0: - raise TypeError("timeout cannot be negative") - self.__timeout = float(timeout) if timeout else None - @property def codec_options(self) -> CodecOptions: """Read only access to the :class:`~bson.codec_options.CodecOptions` @@ -910,14 +903,6 @@ def read_concern(self) -> ReadConcern: """ return self.__read_concern - @property - def timeout(self) -> Optional[float]: - """Read only access to the timeout of this instance. - - .. versionadded:: 4.2 - """ - return self.__timeout - class _CaseInsensitiveDictionary(abc.MutableMapping): def __init__(self, *args, **kwargs): diff --git a/pymongo/database.py b/pymongo/database.py index 665b94cad1..9b9d512014 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -76,7 +76,6 @@ def __init__( read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, - timeout: Optional[float] = None, ) -> None: """Get a database by client and name. @@ -129,7 +128,6 @@ def __init__( read_preference or client.read_preference, write_concern or client.write_concern, read_concern or client.read_concern, - timeout if timeout is not None else client.timeout, ) if not isinstance(name, str): @@ -157,7 +155,6 @@ def with_options( read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, - timeout: Optional[float] = None, ) -> "Database[_DocumentType]": """Get a clone of this database changing the specified settings. @@ -197,7 +194,6 @@ def with_options( read_preference or self.read_preference, write_concern or self.write_concern, read_concern or self.read_concern, - timeout if timeout is not None else self.timeout, ) def __eq__(self, other: Any) -> bool: @@ -246,7 +242,6 @@ def get_collection( read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, - timeout: Optional[float] = None, ) -> Collection[_DocumentType]: """Get a :class:`~pymongo.collection.Collection` with the given name and options. @@ -293,7 +288,6 @@ def get_collection( read_preference, write_concern, read_concern, - timeout=timeout, ) def create_collection( @@ -304,7 +298,6 @@ def create_collection( write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, - timeout: Optional[float] = None, check_exists: Optional[bool] = True, **kwargs: Any, ) -> Collection[_DocumentType]: @@ -459,7 +452,6 @@ def create_collection( write_concern, read_concern, session=s, - timeout=timeout, **kwargs, ) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 82fab2891c..e949ba5cd5 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -789,7 +789,6 @@ def __init__( options.read_preference, options.write_concern, options.read_concern, - options.timeout, ) self._topology_settings = TopologySettings( @@ -1955,7 +1954,6 @@ def get_database( read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, - timeout: Optional[float] = None, ) -> database.Database[_DocumentType]: """Get a :class:`~pymongo.database.Database` with the given name and options. @@ -2006,7 +2004,7 @@ def get_database( name = self.__default_database_name return database.Database( - self, name, codec_options, read_preference, write_concern, read_concern, timeout + self, name, codec_options, read_preference, write_concern, read_concern ) def _database_default_options(self, name): diff --git a/test/csot/command-execution.json b/test/csot/command-execution.json index f51b09d2d7..92358f2184 100644 --- a/test/csot/command-execution.json +++ b/test/csot/command-execution.json @@ -61,7 +61,8 @@ "useMultipleMongoses": false, "uriOptions": { "appName": "reduceMaxTimeMSTest", - "w": 1 + "w": 1, + "timeoutMS": 500 }, "observeEvents": [ "commandStartedEvent" @@ -75,35 +76,16 @@ "databaseName": "test" } }, - { - "collection": { - "id": "regularCollection", - "database": "database", - "collectionName": "coll" - } - }, { "collection": { "id": "timeoutCollection", "database": "database", - "collectionName": "timeoutColl", - "collectionOptions": { - "timeoutMS": 60 - } + "collectionName": "timeoutColl" } } ] } }, - { - "name": "insertOne", - "object": "regularCollection", - "arguments": { - "document": { - "_id": 1 - } - } - }, { "name": "insertOne", "object": "timeoutCollection", @@ -118,18 +100,6 @@ { "client": "client", "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - }, { "commandStartedEvent": { "commandName": "insert", @@ -137,7 +107,7 @@ "command": { "insert": "timeoutColl", "maxTimeMS": { - "$$lte": 60 + "$$lte": 500 } } } @@ -180,7 +150,8 @@ "useMultipleMongoses": false, "uriOptions": { "appName": "rttTooHighTest", - "w": 1 + "w": 1, + "timeoutMS": 10 }, "observeEvents": [ "commandStartedEvent" @@ -194,21 +165,11 @@ "databaseName": "test" } }, - { - "collection": { - "id": "regularCollection", - "database": "database", - "collectionName": "coll" - } - }, { "collection": { "id": "timeoutCollection", "database": "database", - "collectionName": "timeoutColl", - "collectionOptions": { - "timeoutMS": 2 - } + "collectionName": "timeoutColl" } } ] @@ -216,11 +177,38 @@ }, { "name": "insertOne", - "object": "regularCollection", + "object": "timeoutCollection", "arguments": { "document": { - "_id": 1 + "_id": 2 } + }, + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectError": { + "isTimeoutError": true } }, { @@ -239,20 +227,7 @@ "expectEvents": [ { "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] + "events": [] } ] } diff --git a/test/csot/override-collection-timeoutMS.json b/test/csot/override-collection-timeoutMS.json deleted file mode 100644 index 7d2c663fc1..0000000000 --- a/test/csot/override-collection-timeoutMS.json +++ /dev/null @@ -1,3498 +0,0 @@ -{ - "description": "timeoutMS can be overridden for a MongoCollection", - "schemaVersion": "1.9", - "runOnRequirements": [ - { - "minServerVersion": "4.4", - "topologies": [ - "replicaset", - "sharded-replicaset" - ] - } - ], - "createEntities": [ - { - "client": { - "id": "failPointClient", - "useMultipleMongoses": false - } - }, - { - "client": { - "id": "client", - "uriOptions": { - "timeoutMS": 10 - }, - "useMultipleMongoses": false, - "observeEvents": [ - "commandStartedEvent" - ], - "ignoreCommandMonitoringEvents": [ - "killCursors" - ] - } - }, - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test" - } - } - ], - "initialData": [ - { - "collectionName": "coll", - "databaseName": "test", - "documents": [] - } - ], - "tests": [ - { - "description": "timeoutMS can be configured on a MongoCollection - aggregate on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - aggregate on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - count on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "count", - "databaseName": "test", - "command": { - "count": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - count on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "count", - "databaseName": "test", - "command": { - "count": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - countDocuments on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - countDocuments on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - estimatedDocumentCount on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "estimatedDocumentCount", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "count", - "databaseName": "test", - "command": { - "count": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - estimatedDocumentCount on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "estimatedDocumentCount", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "count", - "databaseName": "test", - "command": { - "count": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - distinct on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "distinct", - "databaseName": "test", - "command": { - "distinct": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - distinct on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "distinct", - "databaseName": "test", - "command": { - "distinct": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - find on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "find", - "databaseName": "test", - "command": { - "find": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - find on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "find", - "databaseName": "test", - "command": { - "find": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - findOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "find", - "databaseName": "test", - "command": { - "find": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - findOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "find", - "databaseName": "test", - "command": { - "find": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - listIndexes on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listIndexes", - "databaseName": "test", - "command": { - "listIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - listIndexes on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listIndexes", - "databaseName": "test", - "command": { - "listIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - listIndexNames on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listIndexes", - "databaseName": "test", - "command": { - "listIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - listIndexNames on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listIndexes", - "databaseName": "test", - "command": { - "listIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - createChangeStream on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createChangeStream", - "object": "collection", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - createChangeStream on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createChangeStream", - "object": "collection", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - insertOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - insertOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - insertMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "x": 1 - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - insertMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "x": 1 - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - deleteOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "delete", - "databaseName": "test", - "command": { - "delete": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - deleteOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "delete", - "databaseName": "test", - "command": { - "delete": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - deleteMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "deleteMany", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "delete", - "databaseName": "test", - "command": { - "delete": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - deleteMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "deleteMany", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "delete", - "databaseName": "test", - "command": { - "delete": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - replaceOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "replaceOne", - "object": "collection", - "arguments": { - "filter": {}, - "replacement": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - replaceOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "replaceOne", - "object": "collection", - "arguments": { - "filter": {}, - "replacement": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - updateOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - updateOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - updateMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "updateMany", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - updateMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "updateMany", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - findOneAndDelete on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndDelete", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndDelete on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndDelete", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - findOneAndReplace on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndReplace", - "object": "collection", - "arguments": { - "filter": {}, - "replacement": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndReplace on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndReplace", - "object": "collection", - "arguments": { - "filter": {}, - "replacement": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - findOneAndUpdate on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndUpdate on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - bulkWrite on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "requests": [ - { - "insertOne": { - "document": { - "_id": 1 - } - } - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - bulkWrite on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "requests": [ - { - "insertOne": { - "document": { - "_id": 1 - } - } - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - createIndex on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "createIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createIndex", - "object": "collection", - "arguments": { - "keys": { - "x": 1 - }, - "name": "x_1" - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "createIndexes", - "databaseName": "test", - "command": { - "createIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - createIndex on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "createIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createIndex", - "object": "collection", - "arguments": { - "keys": { - "x": 1 - }, - "name": "x_1" - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "createIndexes", - "databaseName": "test", - "command": { - "createIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - dropIndex on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "dropIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "dropIndex", - "object": "collection", - "arguments": { - "name": "x_1" - }, - "expectError": { - "isClientError": false, - "isTimeoutError": false - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "dropIndexes", - "databaseName": "test", - "command": { - "dropIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - dropIndex on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "dropIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "dropIndex", - "object": "collection", - "arguments": { - "name": "x_1" - }, - "expectError": { - "isClientError": false, - "isTimeoutError": false - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "dropIndexes", - "databaseName": "test", - "command": { - "dropIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - dropIndexes on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "dropIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "dropIndexes", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "dropIndexes", - "databaseName": "test", - "command": { - "dropIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - dropIndexes on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "dropIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "dropIndexes", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "dropIndexes", - "databaseName": "test", - "command": { - "dropIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - } - ] -} diff --git a/test/csot/override-database-timeoutMS.json b/test/csot/override-database-timeoutMS.json deleted file mode 100644 index 9c1b77f903..0000000000 --- a/test/csot/override-database-timeoutMS.json +++ /dev/null @@ -1,4622 +0,0 @@ -{ - "description": "timeoutMS can be overridden for a MongoDatabase", - "schemaVersion": "1.9", - "runOnRequirements": [ - { - "minServerVersion": "4.4", - "topologies": [ - "replicaset", - "sharded-replicaset" - ] - } - ], - "createEntities": [ - { - "client": { - "id": "failPointClient", - "useMultipleMongoses": false - } - }, - { - "client": { - "id": "client", - "uriOptions": { - "timeoutMS": 10 - }, - "useMultipleMongoses": false, - "observeEvents": [ - "commandStartedEvent" - ], - "ignoreCommandMonitoringEvents": [ - "killCursors" - ] - } - } - ], - "initialData": [ - { - "collectionName": "coll", - "databaseName": "test", - "documents": [] - } - ], - "tests": [ - { - "description": "timeoutMS can be configured on a MongoDatabase - aggregate on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "aggregate", - "object": "database", - "arguments": { - "pipeline": [ - { - "$listLocalSessions": {} - }, - { - "$limit": 1 - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": 1, - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - aggregate on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "aggregate", - "object": "database", - "arguments": { - "pipeline": [ - { - "$listLocalSessions": {} - }, - { - "$limit": 1 - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": 1, - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - listCollections on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listCollections", - "object": "database", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listCollections", - "databaseName": "test", - "command": { - "listCollections": 1, - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - listCollections on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listCollections", - "object": "database", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listCollections", - "databaseName": "test", - "command": { - "listCollections": 1, - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - listCollectionNames on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listCollectionNames", - "object": "database", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listCollections", - "databaseName": "test", - "command": { - "listCollections": 1, - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - listCollectionNames on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listCollectionNames", - "object": "database", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listCollections", - "databaseName": "test", - "command": { - "listCollections": 1, - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - runCommand on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "ping" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "runCommand", - "object": "database", - "arguments": { - "command": { - "ping": 1 - }, - "commandName": "ping" - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "ping", - "databaseName": "test", - "command": { - "ping": 1, - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - runCommand on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "ping" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "runCommand", - "object": "database", - "arguments": { - "command": { - "ping": 1 - }, - "commandName": "ping" - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "ping", - "databaseName": "test", - "command": { - "ping": 1, - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - createChangeStream on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createChangeStream", - "object": "database", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": 1, - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - createChangeStream on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createChangeStream", - "object": "database", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": 1, - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - aggregate on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - aggregate on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - count on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "count", - "databaseName": "test", - "command": { - "count": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - count on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "count", - "databaseName": "test", - "command": { - "count": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - countDocuments on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - countDocuments on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - estimatedDocumentCount on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "estimatedDocumentCount", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "count", - "databaseName": "test", - "command": { - "count": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - estimatedDocumentCount on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "estimatedDocumentCount", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "count", - "databaseName": "test", - "command": { - "count": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - distinct on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "distinct", - "databaseName": "test", - "command": { - "distinct": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - distinct on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "distinct", - "databaseName": "test", - "command": { - "distinct": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - find on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "find", - "databaseName": "test", - "command": { - "find": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - find on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "find", - "databaseName": "test", - "command": { - "find": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - findOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "find", - "databaseName": "test", - "command": { - "find": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - findOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "find", - "databaseName": "test", - "command": { - "find": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - listIndexes on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listIndexes", - "databaseName": "test", - "command": { - "listIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - listIndexes on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listIndexes", - "databaseName": "test", - "command": { - "listIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - listIndexNames on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listIndexes", - "databaseName": "test", - "command": { - "listIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - listIndexNames on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listIndexes", - "databaseName": "test", - "command": { - "listIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - createChangeStream on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createChangeStream", - "object": "collection", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - createChangeStream on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createChangeStream", - "object": "collection", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - insertOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - insertOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - insertMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "x": 1 - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - insertMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "x": 1 - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - deleteOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "delete", - "databaseName": "test", - "command": { - "delete": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - deleteOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "delete", - "databaseName": "test", - "command": { - "delete": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - deleteMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "deleteMany", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "delete", - "databaseName": "test", - "command": { - "delete": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - deleteMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "deleteMany", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "delete", - "databaseName": "test", - "command": { - "delete": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - replaceOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "replaceOne", - "object": "collection", - "arguments": { - "filter": {}, - "replacement": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - replaceOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "replaceOne", - "object": "collection", - "arguments": { - "filter": {}, - "replacement": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - updateOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - updateOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - updateMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "updateMany", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - updateMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "updateMany", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - findOneAndDelete on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndDelete", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndDelete on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndDelete", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - findOneAndReplace on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndReplace", - "object": "collection", - "arguments": { - "filter": {}, - "replacement": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndReplace on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndReplace", - "object": "collection", - "arguments": { - "filter": {}, - "replacement": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - findOneAndUpdate on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndUpdate on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - bulkWrite on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "requests": [ - { - "insertOne": { - "document": { - "_id": 1 - } - } - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - bulkWrite on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "requests": [ - { - "insertOne": { - "document": { - "_id": 1 - } - } - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - createIndex on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "createIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createIndex", - "object": "collection", - "arguments": { - "keys": { - "x": 1 - }, - "name": "x_1" - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "createIndexes", - "databaseName": "test", - "command": { - "createIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - createIndex on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "createIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createIndex", - "object": "collection", - "arguments": { - "keys": { - "x": 1 - }, - "name": "x_1" - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "createIndexes", - "databaseName": "test", - "command": { - "createIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - dropIndex on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "dropIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "dropIndex", - "object": "collection", - "arguments": { - "name": "x_1" - }, - "expectError": { - "isClientError": false, - "isTimeoutError": false - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "dropIndexes", - "databaseName": "test", - "command": { - "dropIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - dropIndex on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "dropIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "dropIndex", - "object": "collection", - "arguments": { - "name": "x_1" - }, - "expectError": { - "isClientError": false, - "isTimeoutError": false - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "dropIndexes", - "databaseName": "test", - "command": { - "dropIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - dropIndexes on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "dropIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "dropIndexes", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "dropIndexes", - "databaseName": "test", - "command": { - "dropIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - dropIndexes on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "dropIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "dropIndexes", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "dropIndexes", - "databaseName": "test", - "command": { - "dropIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - } - ] -} diff --git a/test/unified_format.py b/test/unified_format.py index ebeb62ceaa..7e6c09023b 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1160,6 +1160,7 @@ def run_entity_operation(self, spec): raise NotImplementedError elif isinstance(target, ClientEncryption): method_name = "_clientEncryptionOperation_%s" % (opname,) + client = target._key_vault_client else: method_name = "doesNotExist" @@ -1175,7 +1176,7 @@ def run_entity_operation(self, spec): try: # TODO: PYTHON-3289 apply inherited timeout by default. - inherit_timeout = getattr(target, "timeout", None) + inherit_timeout = client.options.timeout # CSOT: Translate the spec test "timeout" arg into pymongo's context timeout API. if "timeout" in arguments or inherit_timeout is not None: timeout = arguments.pop("timeout", None) From 667046129a54e5f9a4e29334470bdef235e891dd Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 19 Jul 2022 01:22:43 -0500 Subject: [PATCH 0210/1588] PYTHON-3289 Apply client timeoutMS to every operation (#1011) --- pymongo/_csot.py | 22 +++++++++++++++++++++- pymongo/collection.py | 16 +++++++++++----- pymongo/database.py | 6 +++++- pymongo/mongo_client.py | 5 +++++ test/unified_format.py | 15 ++------------- 5 files changed, 44 insertions(+), 20 deletions(-) diff --git a/pymongo/_csot.py b/pymongo/_csot.py index 6d3cd3c0f9..e25bba108f 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -14,9 +14,10 @@ """Internal helpers for CSOT.""" +import functools import time from contextvars import ContextVar, Token -from typing import Optional, Tuple +from typing import Any, Callable, Optional, Tuple, TypeVar, cast TIMEOUT: ContextVar[Optional[float]] = ContextVar("TIMEOUT", default=None) RTT: ContextVar[float] = ContextVar("RTT", default=0.0) @@ -83,3 +84,22 @@ def __exit__(self, exc_type, exc_val, exc_tb): TIMEOUT.reset(timeout_token) DEADLINE.reset(deadline_token) RTT.reset(rtt_token) + + +# See https://mypy.readthedocs.io/en/stable/generics.html?#decorator-factories +F = TypeVar("F", bound=Callable[..., Any]) + + +def apply(func: F) -> F: + """Apply the client's timeoutMS to this operation.""" + + @functools.wraps(func) + def csot_wrapper(self, *args, **kwargs): + if get_timeout() is None: + timeout = self._timeout + if timeout is not None: + with _TimeoutContext(timeout): + return func(self, *args, **kwargs) + return func(self, *args, **kwargs) + + return cast(F, csot_wrapper) diff --git a/pymongo/collection.py b/pymongo/collection.py index 4aff5c1784..22af5a6426 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -35,7 +35,7 @@ from bson.raw_bson import RawBSONDocument from bson.son import SON from bson.timestamp import Timestamp -from pymongo import ASCENDING, common, helpers, message +from pymongo import ASCENDING, _csot, common, helpers, message from pymongo.aggregation import ( _CollectionAggregationCommand, _CollectionRawAggregationCommand, @@ -217,6 +217,10 @@ def __init__( self.__database: Database[_DocumentType] = database self.__name = name self.__full_name = "%s.%s" % (self.__database.name, self.__name) + self.__write_response_codec_options = self.codec_options._replace( + unicode_decode_error_handler="replace", document_class=dict + ) + self._timeout = database.client.options.timeout encrypted_fields = kwargs.pop("encryptedFields", None) if create or kwargs or collation: if encrypted_fields: @@ -230,10 +234,6 @@ def __init__( else: self.__create(name, kwargs, collation, session) - self.__write_response_codec_options = self.codec_options._replace( - unicode_decode_error_handler="replace", document_class=dict - ) - def _socket_for_reads(self, session): return self.__database.client._socket_for_reads(self._read_preference_for(session), session) @@ -433,6 +433,7 @@ def with_options( read_concern or self.read_concern, ) + @_csot.apply def bulk_write( self, requests: Sequence[_WriteOp], @@ -631,6 +632,7 @@ def insert_one( write_concern.acknowledged, ) + @_csot.apply def insert_many( self, documents: Iterable[_DocumentIn], @@ -1892,6 +1894,7 @@ def create_indexes( kwargs["comment"] = comment return self.__create_indexes(indexes, session, **kwargs) + @_csot.apply def __create_indexes(self, indexes, session, **kwargs): """Internal createIndexes helper. @@ -2088,6 +2091,7 @@ def drop_indexes( kwargs["comment"] = comment self.drop_index("*", session=session, **kwargs) + @_csot.apply def drop_index( self, index_or_name: _IndexKeyHint, @@ -2311,6 +2315,7 @@ def options( return options + @_csot.apply def _aggregate( self, aggregation_command, @@ -2618,6 +2623,7 @@ def watch( full_document_before_change, ) + @_csot.apply def rename( self, new_name: str, diff --git a/pymongo/database.py b/pymongo/database.py index 9b9d512014..4f87a58dda 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -33,7 +33,7 @@ from bson.dbref import DBRef from bson.son import SON from bson.timestamp import Timestamp -from pymongo import common +from pymongo import _csot, common from pymongo.aggregation import _DatabaseAggregationCommand from pymongo.change_stream import DatabaseChangeStream from pymongo.collection import Collection @@ -138,6 +138,7 @@ def __init__( self.__name = name self.__client: MongoClient[_DocumentType] = client + self._timeout = client.options.timeout @property def client(self) -> "MongoClient[_DocumentType]": @@ -290,6 +291,7 @@ def get_collection( read_concern, ) + @_csot.apply def create_collection( self, name: str, @@ -690,6 +692,7 @@ def _command( client=self.__client, ) + @_csot.apply def command( self, command: Union[str, MutableMapping[str, Any]], @@ -964,6 +967,7 @@ def _drop_helper(self, name, session=None, comment=None): session=session, ) + @_csot.apply def drop_collection( self, name_or_collection: Union[str, Collection], diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index e949ba5cd5..080ae8757c 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -838,6 +838,7 @@ def target(): from pymongo.encryption import _Encrypter self._encrypter = _Encrypter(self, self.__options.auto_encryption_opts) + self._timeout = options.timeout def _duplicate(self, **kwargs): args = self.__init_kwargs.copy() @@ -1270,6 +1271,7 @@ def _socket_for_reads(self, read_preference, session): def _should_pin_cursor(self, session): return self.__options.load_balanced and not (session and session.in_transaction) + @_csot.apply def _run_operation(self, operation, unpack_res, address=None): """Run a _Query/_GetMore operation and return a Response. @@ -1318,6 +1320,7 @@ def _retry_with_session(self, retryable, func, session, bulk): ) return self._retry_internal(retryable, func, session, bulk) + @_csot.apply def _retry_internal(self, retryable, func, session, bulk): """Internal retryable write helper.""" max_wire_version = 0 @@ -1384,6 +1387,7 @@ def is_retrying(): retrying = True last_error = exc + @_csot.apply def _retryable_read(self, func, read_pref, session, address=None, retryable=True): """Execute an operation with at most one consecutive retries @@ -1834,6 +1838,7 @@ def list_database_names( """ return [doc["name"] for doc in self.list_databases(session, nameOnly=True, comment=comment)] + @_csot.apply def drop_database( self, name_or_database: Union[str, database.Database], diff --git a/test/unified_format.py b/test/unified_format.py index 7e6c09023b..e37bc1bb6d 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1140,27 +1140,20 @@ def run_entity_operation(self, spec): if isinstance(target, MongoClient): method_name = "_clientOperation_%s" % (opname,) - client = target elif isinstance(target, Database): method_name = "_databaseOperation_%s" % (opname,) - client = target.client elif isinstance(target, Collection): method_name = "_collectionOperation_%s" % (opname,) - client = target.database.client elif isinstance(target, ChangeStream): method_name = "_changeStreamOperation_%s" % (opname,) - client = target._client elif isinstance(target, NonLazyCursor): method_name = "_cursor_%s" % (opname,) - client = target.client elif isinstance(target, ClientSession): method_name = "_sessionOperation_%s" % (opname,) - client = target._client elif isinstance(target, GridFSBucket): raise NotImplementedError elif isinstance(target, ClientEncryption): method_name = "_clientEncryptionOperation_%s" % (opname,) - client = target._key_vault_client else: method_name = "doesNotExist" @@ -1175,13 +1168,9 @@ def run_entity_operation(self, spec): cmd = functools.partial(method, target) try: - # TODO: PYTHON-3289 apply inherited timeout by default. - inherit_timeout = client.options.timeout # CSOT: Translate the spec test "timeout" arg into pymongo's context timeout API. - if "timeout" in arguments or inherit_timeout is not None: - timeout = arguments.pop("timeout", None) - if timeout is None: - timeout = inherit_timeout + if "timeout" in arguments: + timeout = arguments.pop("timeout") with pymongo.timeout(timeout): result = cmd(**dict(arguments)) else: From db3f2dca05485118c90ff1904fe0400818fb212d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 19 Jul 2022 15:01:41 -0700 Subject: [PATCH 0211/1588] PYTHON-2459 Implement unified GridFS tests (#1012) --- .evergreen/resync-specs.sh | 3 + test/gridfs/delete.json | 1019 +++++++++++++++++++++------- test/gridfs/download.json | 861 ++++++++++++----------- test/gridfs/downloadByName.json | 330 +++++++++ test/gridfs/download_by_name.json | 240 ------- test/gridfs/upload-disableMD5.json | 172 +++++ test/gridfs/upload.json | 891 +++++++++++++++--------- test/test_gridfs_spec.py | 214 +----- test/unified_format.py | 62 +- test/utils.py | 2 +- 10 files changed, 2362 insertions(+), 1432 deletions(-) create mode 100644 test/gridfs/downloadByName.json delete mode 100644 test/gridfs/download_by_name.json create mode 100644 test/gridfs/upload-disableMD5.json diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index 4f5366098b..b64868c5a9 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -108,6 +108,9 @@ do csot|CSOT|client-side-operations-timeout) cpjson client-side-operations-timeout/tests csot ;; + gridfs) + cpjson gridfs/tests gridfs + ;; load-balancers|load_balancer) cpjson load-balancers/tests load_balancer ;; diff --git a/test/gridfs/delete.json b/test/gridfs/delete.json index fb5de861f1..7a4ec27f88 100644 --- a/test/gridfs/delete.json +++ b/test/gridfs/delete.json @@ -1,304 +1,799 @@ { - "data": { - "files": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "length": 0, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", - "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000002" - }, - "length": 0, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", - "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000003" - }, - "length": 2, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", - "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000004" - }, - "length": 8, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "dd254cdc958e53abaa67da9f797125f5", - "filename": "length-8", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} + "description": "gridfs-delete", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" } - ], - "chunks": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "files_id": { - "$oid": "000000000000000000000002" + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "n": 0, - "data": { - "$hex": "" - } - }, - { - "_id": { - "$oid": "000000000000000000000002" + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "files_id": { - "$oid": "000000000000000000000003" + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "n": 0, - "data": { - "$hex": "1122" + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "dd254cdc958e53abaa67da9f797125f5", + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} } - }, - { - "_id": { - "$oid": "000000000000000000000003" - }, - "files_id": { - "$oid": "000000000000000000000004" + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } }, - "n": 0, - "data": { - "$hex": "11223344" - } - }, - { - "_id": { - "$oid": "000000000000000000000004" + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } }, - "files_id": { - "$oid": "000000000000000000000004" + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } }, - "n": 1, - "data": { - "$hex": "55667788" + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } } - } - ] - }, + ] + } + ], "tests": [ { - "description": "Delete when length is 0", - "act": { - "operation": "delete", - "arguments": { - "id": { - "$oid": "000000000000000000000001" + "description": "delete when length is 0", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } } } - }, - "assert": { - "result": "void", - "data": [ - { - "delete": "expected.files", - "deletes": [ - { - "q": { - "_id": { - "$oid": "000000000000000000000001" - } - }, - "limit": 1 + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "dd254cdc958e53abaa67da9f797125f5", + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } } - ] - } - ] - } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + } + ] + } + ] }, { - "description": "Delete when length is 0 and there is one extra empty chunk", - "act": { - "operation": "delete", - "arguments": { - "id": { - "$oid": "000000000000000000000002" + "description": "delete when length is 0 and there is one extra empty chunk", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000002" + } } } - }, - "assert": { - "result": "void", - "data": [ - { - "delete": "expected.files", - "deletes": [ - { - "q": { - "_id": { - "$oid": "000000000000000000000002" - } - }, - "limit": 1 + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "dd254cdc958e53abaa67da9f797125f5", + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } } - ] - }, - { - "delete": "expected.chunks", - "deletes": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000002" - } - }, - "limit": 0 + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } } - ] - } - ] - } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + } + ] + } + ] }, { - "description": "Delete when length is 8", - "act": { - "operation": "delete", - "arguments": { - "id": { - "$oid": "000000000000000000000004" + "description": "delete when length is 8", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000004" + } } } - }, - "assert": { - "result": "void", - "data": [ - { - "delete": "expected.files", - "deletes": [ - { - "q": { - "_id": { - "$oid": "000000000000000000000004" - } - }, - "limit": 1 + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } } - ] - }, - { - "delete": "expected.chunks", - "deletes": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000004" - } - }, - "limit": 0 + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Delete when files entry does not exist", - "act": { - "operation": "delete", - "arguments": { - "id": { - "$oid": "000000000000000000000000" + "description": "delete when files entry does not exist", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000000" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "FileNotFound" - } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "dd254cdc958e53abaa67da9f797125f5", + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + } + ] + } + ] }, { - "description": "Delete when files entry does not exist and there are orphaned chunks", - "arrange": { - "data": [ - { - "delete": "fs.files", - "deletes": [ - { - "q": { - "_id": { - "$oid": "000000000000000000000004" - } - }, - "limit": 1 + "description": "delete when files entry does not exist and there are orphaned chunks", + "operations": [ + { + "name": "deleteOne", + "object": "bucket0_files_collection", + "arguments": { + "filter": { + "_id": { + "$oid": "000000000000000000000004" } - ] + } + }, + "expectResult": { + "deletedCount": 1 } - ] - }, - "act": { - "operation": "delete", - "arguments": { - "id": { - "$oid": "000000000000000000000004" + }, + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000004" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "FileNotFound", - "data": [ - { - "delete": "expected.files", - "deletes": [ - { - "q": { - "_id": { - "$oid": "000000000000000000000004" - } - }, - "limit": 1 + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } } - ] - }, - { - "delete": "expected.chunks", - "deletes": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000004" - } - }, - "limit": 0 + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } } - ] - } - ] - } + } + ] + } + ] } ] } diff --git a/test/gridfs/download.json b/test/gridfs/download.json index 5092fba981..48d3246218 100644 --- a/test/gridfs/download.json +++ b/test/gridfs/download.json @@ -1,467 +1,558 @@ { - "data": { - "files": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "length": 0, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", - "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000002" - }, - "length": 0, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", - "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000003" - }, - "length": 2, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", - "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000004" - }, - "length": 8, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "dd254cdc958e53abaa67da9f797125f5", - "filename": "length-8", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000005" - }, - "length": 10, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "57d83cd477bfb1ccd975ab33d827a92b", - "filename": "length-10", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000006" - }, - "length": 2, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} + "description": "gridfs-download", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" } - ], - "chunks": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "files_id": { - "$oid": "000000000000000000000002" - }, - "n": 0, - "data": { - "$hex": "" - } - }, - { - "_id": { - "$oid": "000000000000000000000002" - }, - "files_id": { - "$oid": "000000000000000000000003" + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "n": 0, - "data": { - "$hex": "1122" - } - }, - { - "_id": { - "$oid": "000000000000000000000003" + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "files_id": { - "$oid": "000000000000000000000004" + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "n": 0, - "data": { - "$hex": "11223344" - } - }, - { - "_id": { - "$oid": "000000000000000000000004" + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "dd254cdc958e53abaa67da9f797125f5", + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "files_id": { - "$oid": "000000000000000000000004" + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "n": 1, - "data": { - "$hex": "55667788" + { + "_id": { + "$oid": "000000000000000000000006" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} } - }, - { - "_id": { - "$oid": "000000000000000000000005" - }, - "files_id": { - "$oid": "000000000000000000000005" + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } }, - "n": 0, - "data": { - "$hex": "11223344" - } - }, - { - "_id": { - "$oid": "000000000000000000000006" + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } }, - "files_id": { - "$oid": "000000000000000000000005" + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } }, - "n": 1, - "data": { - "$hex": "55667788" - } - }, - { - "_id": { - "$oid": "000000000000000000000007" + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } }, - "files_id": { - "$oid": "000000000000000000000005" + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } }, - "n": 2, - "data": { - "$hex": "99aa" - } - }, - { - "_id": { - "$oid": "000000000000000000000008" + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } }, - "files_id": { - "$oid": "000000000000000000000006" + { + "_id": { + "$oid": "000000000000000000000007" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2, + "data": { + "$binary": { + "base64": "mao=", + "subType": "00" + } + } }, - "n": 0, - "data": { - "$hex": "1122" + { + "_id": { + "$oid": "000000000000000000000008" + }, + "files_id": { + "$oid": "000000000000000000000006" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } } - } - ] - }, + ] + } + ], "tests": [ { - "description": "Download when length is zero", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000001" + "description": "download when length is zero", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "" + "expectResult": { + "$$matchesHexBytes": "" + } } - } + ] }, { - "description": "Download when length is zero and there is one empty chunk", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000002" + "description": "download when length is zero and there is one empty chunk", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000002" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "" + "expectResult": { + "$$matchesHexBytes": "" + } } - } + ] }, { - "description": "Download when there is one chunk", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000003" + "description": "download when there is one chunk", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000003" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "1122" + "expectResult": { + "$$matchesHexBytes": "1122" + } } - } + ] }, { - "description": "Download when there are two chunks", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000004" + "description": "download when there are two chunks", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000004" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "1122334455667788" + "expectResult": { + "$$matchesHexBytes": "1122334455667788" + } } - } + ] }, { - "description": "Download when there are three chunks", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000005" + "description": "download when there are three chunks", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "112233445566778899aa" + "expectResult": { + "$$matchesHexBytes": "112233445566778899aa" + } } - } + ] }, { - "description": "Download when files entry does not exist", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000000" - }, - "options": {} + "description": "download when files entry does not exist", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000000" + } + }, + "expectError": { + "isError": true + } } - }, - "assert": { - "error": "FileNotFound" - } + ] }, { - "description": "Download when an intermediate chunk is missing", - "arrange": { - "data": [ - { - "delete": "fs.chunks", - "deletes": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000005" - }, - "n": 1 - }, - "limit": 1 - } - ] + "description": "download when an intermediate chunk is missing", + "operations": [ + { + "name": "deleteOne", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1 + } + }, + "expectResult": { + "deletedCount": 1 } - ] - }, - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000005" + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "ChunkIsMissing" - } + ] }, { - "description": "Download when final chunk is missing", - "arrange": { - "data": [ - { - "delete": "fs.chunks", - "deletes": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000005" - }, - "n": 1 - }, - "limit": 1 - } - ] + "description": "download when final chunk is missing", + "operations": [ + { + "name": "deleteOne", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2 + } + }, + "expectResult": { + "deletedCount": 1 } - ] - }, - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000005" + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "ChunkIsMissing" - } + ] }, { - "description": "Download when an intermediate chunk is the wrong size", - "arrange": { - "data": [ - { - "update": "fs.chunks", - "updates": [ + "description": "download when an intermediate chunk is the wrong size", + "operations": [ + { + "name": "bulkWrite", + "object": "bucket0_chunks_collection", + "arguments": { + "requests": [ { - "q": { - "files_id": { - "$oid": "000000000000000000000005" + "updateOne": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1 }, - "n": 1 - }, - "u": { - "$set": { - "data": { - "$hex": "556677" + "update": { + "$set": { + "data": { + "$binary": { + "base64": "VWZ3", + "subType": "00" + } + } } } } }, { - "q": { - "files_id": { - "$oid": "000000000000000000000005" + "updateOne": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2 }, - "n": 2 - }, - "u": { - "$set": { - "data": { - "$hex": "8899aa" + "update": { + "$set": { + "data": { + "$binary": { + "base64": "iJmq", + "subType": "00" + } + } } } } } ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2 } - ] - }, - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000005" + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "ChunkIsWrongSize" - } + ] }, { - "description": "Download when final chunk is the wrong size", - "arrange": { - "data": [ - { - "update": "fs.chunks", - "updates": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000005" - }, - "n": 2 - }, - "u": { - "$set": { - "data": { - "$hex": "99" - } + "description": "download when final chunk is the wrong size", + "operations": [ + { + "name": "updateOne", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2 + }, + "update": { + "$set": { + "data": { + "$binary": { + "base64": "mQ==", + "subType": "00" } } } - ] + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1 } - ] - }, - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000005" + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "ChunkIsWrongSize" - } + ] }, { - "description": "Download legacy file with no name", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000006" + "description": "download legacy file with no name", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000006" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "1122" + "expectResult": { + "$$matchesHexBytes": "1122" + } } - } + ] } ] } diff --git a/test/gridfs/downloadByName.json b/test/gridfs/downloadByName.json new file mode 100644 index 0000000000..cd44663957 --- /dev/null +++ b/test/gridfs/downloadByName.json @@ -0,0 +1,330 @@ +{ + "description": "gridfs-downloadByName", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "47ed733b8d10be225eceba344d533586", + "filename": "abc", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-02T00:00:00.000Z" + }, + "md5": "b15835f133ff2e27c7cb28117bfae8f4", + "filename": "abc", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-03T00:00:00.000Z" + }, + "md5": "eccbc87e4b5ce2fe28308fd9f2a7baf3", + "filename": "abc", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-04T00:00:00.000Z" + }, + "md5": "f623e75af30e62bbd73d6df5b50bb7b5", + "filename": "abc", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-05T00:00:00.000Z" + }, + "md5": "4c614360da93c0a041b22e537de151eb", + "filename": "abc", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000001" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "Ig==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "Mw==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "RA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "VQ==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "downloadByName defaults to latest revision (-1)", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "55" + } + } + ] + }, + { + "description": "downloadByName when revision is 0", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": 0 + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ] + }, + { + "description": "downloadByName when revision is 1", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": 1 + }, + "expectResult": { + "$$matchesHexBytes": "22" + } + } + ] + }, + { + "description": "downloadByName when revision is 2", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": 2 + }, + "expectResult": { + "$$matchesHexBytes": "33" + } + } + ] + }, + { + "description": "downloadByName when revision is -2", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": -2 + }, + "expectResult": { + "$$matchesHexBytes": "44" + } + } + ] + }, + { + "description": "downloadByName when revision is -1", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": -1 + }, + "expectResult": { + "$$matchesHexBytes": "55" + } + } + ] + }, + { + "description": "downloadByName when files entry does not exist", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "xyz" + }, + "expectError": { + "isError": true + } + } + ] + }, + { + "description": "downloadByName when revision does not exist", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": 999 + }, + "expectError": { + "isError": true + } + } + ] + } + ] +} diff --git a/test/gridfs/download_by_name.json b/test/gridfs/download_by_name.json deleted file mode 100644 index ecc8c9e2cc..0000000000 --- a/test/gridfs/download_by_name.json +++ /dev/null @@ -1,240 +0,0 @@ -{ - "data": { - "files": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "47ed733b8d10be225eceba344d533586", - "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000002" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-02T00:00:00.000Z" - }, - "md5": "b15835f133ff2e27c7cb28117bfae8f4", - "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000003" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-03T00:00:00.000Z" - }, - "md5": "eccbc87e4b5ce2fe28308fd9f2a7baf3", - "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000004" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-04T00:00:00.000Z" - }, - "md5": "f623e75af30e62bbd73d6df5b50bb7b5", - "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000005" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-05T00:00:00.000Z" - }, - "md5": "4c614360da93c0a041b22e537de151eb", - "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - } - ], - "chunks": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "files_id": { - "$oid": "000000000000000000000001" - }, - "n": 0, - "data": { - "$hex": "11" - } - }, - { - "_id": { - "$oid": "000000000000000000000002" - }, - "files_id": { - "$oid": "000000000000000000000002" - }, - "n": 0, - "data": { - "$hex": "22" - } - }, - { - "_id": { - "$oid": "000000000000000000000003" - }, - "files_id": { - "$oid": "000000000000000000000003" - }, - "n": 0, - "data": { - "$hex": "33" - } - }, - { - "_id": { - "$oid": "000000000000000000000004" - }, - "files_id": { - "$oid": "000000000000000000000004" - }, - "n": 0, - "data": { - "$hex": "44" - } - }, - { - "_id": { - "$oid": "000000000000000000000005" - }, - "files_id": { - "$oid": "000000000000000000000005" - }, - "n": 0, - "data": { - "$hex": "55" - } - } - ] - }, - "tests": [ - { - "description": "Download_by_name when revision is 0", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "abc", - "options": { - "revision": 0 - } - } - }, - "assert": { - "result": { - "$hex": "11" - } - } - }, - { - "description": "Download_by_name when revision is 1", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "abc", - "options": { - "revision": 1 - } - } - }, - "assert": { - "result": { - "$hex": "22" - } - } - }, - { - "description": "Download_by_name when revision is -2", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "abc", - "options": { - "revision": -2 - } - } - }, - "assert": { - "result": { - "$hex": "44" - } - } - }, - { - "description": "Download_by_name when revision is -1", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "abc", - "options": { - "revision": -1 - } - } - }, - "assert": { - "result": { - "$hex": "55" - } - } - }, - { - "description": "Download_by_name when files entry does not exist", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "xyz" - } - }, - "assert": { - "error": "FileNotFound" - } - }, - { - "description": "Download_by_name when revision does not exist", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "abc", - "options": { - "revision": 999 - } - } - }, - "assert": { - "error": "RevisionNotFound" - } - } - ] -} diff --git a/test/gridfs/upload-disableMD5.json b/test/gridfs/upload-disableMD5.json new file mode 100644 index 0000000000..d5a9d6f4ab --- /dev/null +++ b/test/gridfs/upload-disableMD5.json @@ -0,0 +1,172 @@ +{ + "description": "gridfs-upload-disableMD5", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "upload when length is 0 sans MD5", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "" + }, + "chunkSizeBytes": 4, + "disableMD5": true + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$exists": false + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [] + } + ] + }, + { + "description": "upload when length is 1 sans MD5", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11" + }, + "chunkSizeBytes": 4, + "disableMD5": true + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$exists": false + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/gridfs/upload.json b/test/gridfs/upload.json index 7d4adec1d8..97e18d2bc2 100644 --- a/test/gridfs/upload.json +++ b/test/gridfs/upload.json @@ -1,379 +1,616 @@ { - "data": { - "files": [], - "chunks": [] - }, + "description": "gridfs-upload", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [] + } + ], "tests": [ { - "description": "Upload when length is 0", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "" - }, - "options": { + "description": "upload when length is 0", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "" + }, "chunkSizeBytes": 4 - } + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "d41d8cd98f00b204e9800998ecf8427e" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [] } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 0, - "chunkSize": 4, - "uploadDate": "*actual", - "filename": "filename" - } - ] - } - ] - } + ] }, { - "description": "Upload when length is 1", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "11" - }, - "options": { + "description": "upload when length is 1", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11" + }, "chunkSizeBytes": 4 - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 1, - "chunkSize": 4, - "uploadDate": "*actual", - "filename": "filename" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "47ed733b8d10be225eceba344d533586" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when length is 3", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "112233" - }, - "options": { + "description": "upload when length is 3", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "112233" + }, "chunkSizeBytes": 4 - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 3, - "chunkSize": 4, - "uploadDate": "*actual", - "filename": "filename" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "112233" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 3, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "bafae3a174ab91fc70db7a6aa50f4f52" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIz", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when length is 4", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "11223344" - }, - "options": { + "description": "upload when length is 4", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11223344" + }, "chunkSizeBytes": 4 - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 4, - "chunkSize": 4, - "uploadDate": "*actual", - "filename": "filename" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11223344" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 4, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "7e7c77cff5705d1f7574a25ef6662117" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when length is 5", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "1122334455" - }, - "options": { + "description": "upload when length is 5", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + }, "chunkSizeBytes": 4 - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 5, - "chunkSize": 4, - "uploadDate": "*actual", - "filename": "filename" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11223344" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 5, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "283d4fea5dded59cf837d3047328f5af" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {}, + "sort": { + "n": 1 + } + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" } + } + }, + { + "_id": { + "$$type": "objectId" }, - { - "_id": "*actual", - "files_id": "*result", - "n": 1, - "data": { - "$hex": "55" + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VQ==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when length is 8", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "1122334455667788" - }, - "options": { + "description": "upload when length is 8", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455667788" + }, "chunkSizeBytes": 4 - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 8, - "chunkSize": 4, - "uploadDate": "*actual", - "filename": "filename" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11223344" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "dd254cdc958e53abaa67da9f797125f5" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {}, + "sort": { + "n": 1 + } + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" } + } + }, + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" }, - { - "_id": "*actual", - "files_id": "*result", - "n": 1, - "data": { - "$hex": "55667788" + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when contentType is provided", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "11" - }, - "options": { + "description": "upload when contentType is provided", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11" + }, "chunkSizeBytes": 4, "contentType": "image/jpeg" - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 1, - "chunkSize": 4, - "uploadDate": "*actual", - "filename": "filename", - "contentType": "image/jpeg" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "47ed733b8d10be225eceba344d533586" + }, + "filename": "filename", + "contentType": "image/jpeg" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when metadata is provided", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "11" - }, - "options": { + "description": "upload when metadata is provided", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11" + }, "chunkSizeBytes": 4, "metadata": { "x": 1 } - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 1, - "chunkSize": 4, - "uploadDate": "*actual", - "filename": "filename", - "metadata": { - "x": 1 - } + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "47ed733b8d10be225eceba344d533586" + }, + "filename": "filename", + "metadata": { + "x": 1 } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] } ] } diff --git a/test/test_gridfs_spec.py b/test/test_gridfs_spec.py index 3c6f6b76c4..d080c05c4d 100644 --- a/test/test_gridfs_spec.py +++ b/test/test_gridfs_spec.py @@ -1,4 +1,4 @@ -# Copyright 2015 MongoDB, Inc. +# Copyright 2015-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,221 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test GridFSBucket class.""" +"""Test the GridFS unified spec tests.""" -import copy -import datetime import os -import re import sys -from json import loads sys.path[0:0] = [""] -from test import IntegrationTest, unittest - -import gridfs -from bson import Binary -from bson.int64 import Int64 -from bson.json_util import object_hook -from gridfs.errors import CorruptGridFile, NoFile - -# Commands. -_COMMANDS = { - "delete": lambda coll, doc: [coll.delete_many(d["q"]) for d in doc["deletes"]], - "insert": lambda coll, doc: coll.insert_many(doc["documents"]), - "update": lambda coll, doc: [coll.update_many(u["q"], u["u"]) for u in doc["updates"]], -} +from test import unittest +from test.unified_format import generate_test_classes # Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "gridfs") - - -def camel_to_snake(camel): - # Regex to convert CamelCase to snake_case. Special case for _id. - if camel == "id": - return "file_id" - snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) - return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() - - -class TestAllScenarios(IntegrationTest): - fs: gridfs.GridFSBucket - str_to_cmd: dict - - @classmethod - def setUpClass(cls): - super(TestAllScenarios, cls).setUpClass() - cls.fs = gridfs.GridFSBucket(cls.db) - cls.str_to_cmd = { - "upload": cls.fs.upload_from_stream, - "download": cls.fs.open_download_stream, - "delete": cls.fs.delete, - "download_by_name": cls.fs.open_download_stream_by_name, - } - - def init_db(self, data, test): - self.cleanup_colls( - self.db.fs.files, self.db.fs.chunks, self.db.expected.files, self.db.expected.chunks - ) - - # Read in data. - if data["files"]: - self.db.fs.files.insert_many(data["files"]) - self.db.expected.files.insert_many(data["files"]) - if data["chunks"]: - self.db.fs.chunks.insert_many(data["chunks"]) - self.db.expected.chunks.insert_many(data["chunks"]) - - # Make initial modifications. - if "arrange" in test: - for cmd in test["arrange"].get("data", []): - for key in cmd.keys(): - if key in _COMMANDS: - coll = self.db.get_collection(cmd[key]) - _COMMANDS[key](coll, cmd) - - def init_expected_db(self, test, result): - # Modify outcome DB. - for cmd in test["assert"].get("data", []): - for key in cmd.keys(): - if key in _COMMANDS: - # Replace wildcards in inserts. - for doc in cmd.get("documents", []): - keylist = doc.keys() - for dockey in copy.deepcopy(list(keylist)): - if "result" in str(doc[dockey]): - doc[dockey] = result - if "actual" in str(doc[dockey]): # Avoid duplicate - doc.pop(dockey) - # Move contentType to metadata. - if dockey == "contentType": - doc["metadata"] = {dockey: doc.pop(dockey)} - coll = self.db.get_collection(cmd[key]) - _COMMANDS[key](coll, cmd) - - if test["assert"].get("result") == "&result": - test["assert"]["result"] = result - - def sorted_list(self, coll, ignore_id): - to_sort = [] - for doc in coll.find(): - docstr = "{" - if ignore_id: # Cannot compare _id in chunks collection. - doc.pop("_id") - for k in sorted(doc.keys()): - if k == "uploadDate": # Can't compare datetime. - self.assertTrue(isinstance(doc[k], datetime.datetime)) - else: - docstr += "%s:%s " % (k, repr(doc[k])) - to_sort.append(docstr + "}") - return to_sort - - -def create_test(scenario_def): - def run_scenario(self): - - # Run tests. - self.assertTrue(scenario_def["tests"], "tests cannot be empty") - for test in scenario_def["tests"]: - self.init_db(scenario_def["data"], test) - - # Run GridFs Operation. - operation = self.str_to_cmd[test["act"]["operation"]] - args = test["act"]["arguments"] - extra_opts = args.pop("options", {}) - if "contentType" in extra_opts: - extra_opts["metadata"] = {"contentType": extra_opts.pop("contentType")} - - args.update(extra_opts) - - converted_args = dict((camel_to_snake(c), v) for c, v in args.items()) - - expect_error = test["assert"].get("error", False) - result = None - error = None - try: - result = operation(**converted_args) - - if "download" in test["act"]["operation"]: - result = Binary(result.read()) - except Exception as exc: - if not expect_error: - raise - error = exc - - self.init_expected_db(test, result) - - # Asserts. - errors = { - "FileNotFound": NoFile, - "ChunkIsMissing": CorruptGridFile, - "ExtraChunk": CorruptGridFile, - "ChunkIsWrongSize": CorruptGridFile, - "RevisionNotFound": NoFile, - } - - if expect_error: - self.assertIsNotNone(error) - self.assertIsInstance(error, errors[test["assert"]["error"]], test["description"]) - else: - self.assertIsNone(error) - - if "result" in test["assert"]: - if test["assert"]["result"] == "void": - test["assert"]["result"] = None - self.assertEqual(result, test["assert"].get("result")) - - if "data" in test["assert"]: - # Create alphabetized list - self.assertEqual( - set(self.sorted_list(self.db.fs.chunks, True)), - set(self.sorted_list(self.db.expected.chunks, True)), - ) - - self.assertEqual( - set(self.sorted_list(self.db.fs.files, False)), - set(self.sorted_list(self.db.expected.files, False)), - ) - - return run_scenario - - -def _object_hook(dct): - if "length" in dct: - dct["length"] = Int64(dct["length"]) - return object_hook(dct) - - -def create_tests(): - for dirpath, _, filenames in os.walk(_TEST_PATH): - for filename in filenames: - with open(os.path.join(dirpath, filename)) as scenario_stream: - scenario_def = loads(scenario_stream.read(), object_hook=_object_hook) - - # Because object_hook is already defined by bson.json_util, - # and everything is named 'data' - def str2hex(jsn): - for key, val in jsn.items(): - if key in ("data", "source", "result"): - if "$hex" in val: - jsn[key] = Binary(bytes.fromhex(val["$hex"])) - if isinstance(jsn[key], dict): - str2hex(jsn[key]) - if isinstance(jsn[key], list): - for k in jsn[key]: - str2hex(k) - - str2hex(scenario_def) - - # Construct test from scenario. - new_test = create_test(scenario_def) - test_name = "test_%s" % (os.path.splitext(filename)[0]) - new_test.__name__ = test_name - setattr(TestAllScenarios, new_test.__name__, new_test) - +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "gridfs") -create_tests() +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) if __name__ == "__main__": unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index e37bc1bb6d..d36b5d0a48 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -16,6 +16,7 @@ https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.rst """ +import binascii import collections import copy import datetime @@ -457,8 +458,10 @@ def _create_entity(self, entity_spec, uri=None): self.test.addCleanup(session.end_session) return elif entity_type == "bucket": - # TODO: implement the 'bucket' entity type - self.test.skipTest("GridFS is not currently supported (PYTHON-2459)") + db = self[spec["database"]] + kwargs = parse_spec_options(spec.get("bucketOptions", {}).copy()) + self[spec["id"]] = GridFSBucket(db, **kwargs) + return elif entity_type == "clientEncryption": opts = camel_to_snake_args(spec["clientEncryptionOpts"].copy()) if isinstance(opts["key_vault_client"], str): @@ -575,11 +578,12 @@ def _operation_type(self, spec, actual, key_to_compare): def _operation_matchesEntity(self, spec, actual, key_to_compare): expected_entity = self.test.entity_map[spec] - self.test.assertIsInstance(expected_entity, abc.Mapping) self.test.assertEqual(expected_entity, actual[key_to_compare]) def _operation_matchesHexBytes(self, spec, actual, key_to_compare): - raise NotImplementedError + expected = binascii.unhexlify(spec) + value = actual[key_to_compare] if key_to_compare else actual + self.test.assertEqual(value, expected) def _operation_unsetOrMatches(self, spec, actual, key_to_compare): if key_to_compare is None and not actual: @@ -906,12 +910,15 @@ def maybe_skip_test(self, spec): if not client_context.test_commands_enabled: if name == "failPoint" or name == "targetedFailPoint": self.skipTest("Test commands must be enabled to use fail points") - if "timeoutMode" in op.get("arguments", {}): - self.skipTest("PyMongo does not support timeoutMode") - if name == "createEntities": - self.maybe_skip_entity(op.get("arguments", {}).get("entities", [])) if name == "modifyCollection": self.skipTest("PyMongo does not support modifyCollection") + if "timeoutMode" in op.get("arguments", {}): + self.skipTest("PyMongo does not support timeoutMode") + if "csot" in class_name: + if "bucket" in op["object"]: + self.skipTest("CSOT not implemented for GridFS") + if name == "createEntities": + self.maybe_skip_entity(op.get("arguments", {}).get("entities", [])) def maybe_skip_entity(self, entities): for entity in entities: @@ -1116,9 +1123,35 @@ def _clientEncryptionOperation_rewrapManyDataKey(self, target, *args, **kwargs): return dict(bulkWriteResult=parse_bulk_write_result(data.bulk_write_result)) return dict() + def _bucketOperation_download(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> bytes: + with target.open_download_stream(*args, **kwargs) as gout: + return gout.read() + + def _bucketOperation_downloadByName( + self, target: GridFSBucket, *args: Any, **kwargs: Any + ) -> bytes: + with target.open_download_stream_by_name(*args, **kwargs) as gout: + return gout.read() + + def _bucketOperation_upload(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> ObjectId: + kwargs["source"] = binascii.unhexlify(kwargs.pop("source")["$$hexBytes"]) + if "content_type" in kwargs: + kwargs.setdefault("metadata", {})["contentType"] = kwargs.pop("content_type") + return target.upload_from_stream(*args, **kwargs) + + def _bucketOperation_uploadWithId(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> Any: + kwargs["source"] = binascii.unhexlify(kwargs.pop("source")["$$hexBytes"]) + if "content_type" in kwargs: + kwargs.setdefault("metadata", {})["contentType"] = kwargs.pop("content_type") + return target.upload_from_stream_with_id(*args, **kwargs) + + def _bucketOperation_drop(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> None: + # PyMongo does not support GridFSBucket.drop(), emulate it. + target._files.drop(*args, **kwargs) + target._chunks.drop(*args, **kwargs) + def run_entity_operation(self, spec): target = self.entity_map[spec["object"]] - client = target opname = spec["name"] opargs = spec.get("arguments") expect_error = spec.get("expectError") @@ -1144,6 +1177,11 @@ def run_entity_operation(self, spec): method_name = "_databaseOperation_%s" % (opname,) elif isinstance(target, Collection): method_name = "_collectionOperation_%s" % (opname,) + # contentType is always stored in metadata in pymongo. + if target.name.endswith(".files") and opname == "find": + for doc in spec.get("expectResult", []): + if "contentType" in doc: + doc.setdefault("metadata", {})["contentType"] = doc.pop("contentType") elif isinstance(target, ChangeStream): method_name = "_changeStreamOperation_%s" % (opname,) elif isinstance(target, NonLazyCursor): @@ -1151,7 +1189,11 @@ def run_entity_operation(self, spec): elif isinstance(target, ClientSession): method_name = "_sessionOperation_%s" % (opname,) elif isinstance(target, GridFSBucket): - raise NotImplementedError + method_name = "_bucketOperation_%s" % (opname,) + if "id" in arguments: + arguments["file_id"] = arguments.pop("id") + # MD5 is always disabled in pymongo. + arguments.pop("disable_md5", None) elif isinstance(target, ClientEncryption): method_name = "_clientEncryptionOperation_%s" % (opname,) else: diff --git a/test/utils.py b/test/utils.py index 5421d584d1..29ee1ca477 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1087,7 +1087,7 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac arguments["session"] = entity_map[arguments["session"]] elif opname == "open_download_stream" and arg_name == "id": arguments["file_id"] = arguments.pop(arg_name) - elif opname != "find" and c2s == "max_time_ms": + elif opname not in ("find", "find_one") and c2s == "max_time_ms": # find is the only method that accepts snake_case max_time_ms. # All other methods take kwargs which must use the server's # camelCase maxTimeMS. See PYTHON-1855. From 935f926bd9bf556cadba5d7bda344371b4da24ef Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 19 Jul 2022 17:46:09 -0700 Subject: [PATCH 0212/1588] PYTHON-3362 Ignore wtimeout when timeoutMS or timeout() is configured (#1013) Apply client timeoutMS to gridfs operations. --- gridfs/__init__.py | 8 ++++++- pymongo/_csot.py | 15 +++++++++++- pymongo/bulk.py | 5 ++-- pymongo/collection.py | 8 ------- pymongo/network.py | 5 ++-- pymongo/pool.py | 9 +------- test/csot/gridfs-advanced.json | 36 ++++++++++++++++++++--------- test/csot/gridfs-delete.json | 32 ++++++++++++++++++-------- test/csot/gridfs-download.json | 32 ++++++++++++++++++-------- test/csot/gridfs-find.json | 8 +++---- test/csot/gridfs-upload.json | 20 ++++++++-------- test/unified_format.py | 42 ++++++++++++++++++---------------- 12 files changed, 133 insertions(+), 87 deletions(-) diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 08c7e1d2cd..6ab843a85e 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -33,7 +33,7 @@ _clear_entity_type_registry, _disallow_transactions, ) -from pymongo import ASCENDING, DESCENDING +from pymongo import ASCENDING, DESCENDING, _csot from pymongo.client_session import ClientSession from pymongo.collection import Collection from pymongo.common import validate_string @@ -514,6 +514,7 @@ def __init__( ) self._chunk_size_bytes = chunk_size_bytes + self._timeout = db.client.options.timeout def open_upload_stream( self, @@ -631,6 +632,7 @@ def open_upload_stream_with_id( return GridIn(self._collection, session=session, **opts) + @_csot.apply def upload_from_stream( self, filename: str, @@ -679,6 +681,7 @@ def upload_from_stream( return cast(ObjectId, gin._id) + @_csot.apply def upload_from_stream_with_id( self, file_id: Any, @@ -762,6 +765,7 @@ def open_download_stream( gout._ensure_file() return gout + @_csot.apply def download_to_stream( self, file_id: Any, destination: Any, session: Optional[ClientSession] = None ) -> None: @@ -795,6 +799,7 @@ def download_to_stream( for chunk in gout: destination.write(chunk) + @_csot.apply def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: """Given an file_id, delete this stored file's files collection document and associated chunks from a GridFS bucket. @@ -926,6 +931,7 @@ def open_download_stream_by_name( except StopIteration: raise NoFile("no version %d for filename %r" % (revision, filename)) + @_csot.apply def download_to_stream_by_name( self, filename: str, diff --git a/pymongo/_csot.py b/pymongo/_csot.py index e25bba108f..5170c0d8ca 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -17,7 +17,9 @@ import functools import time from contextvars import ContextVar, Token -from typing import Any, Callable, Optional, Tuple, TypeVar, cast +from typing import Any, Callable, MutableMapping, Optional, Tuple, TypeVar, cast + +from pymongo.write_concern import WriteConcern TIMEOUT: ContextVar[Optional[float]] = ContextVar("TIMEOUT", default=None) RTT: ContextVar[float] = ContextVar("RTT", default=0.0) @@ -103,3 +105,14 @@ def csot_wrapper(self, *args, **kwargs): return func(self, *args, **kwargs) return cast(F, csot_wrapper) + + +def apply_write_concern(cmd: MutableMapping, write_concern: Optional[WriteConcern]) -> None: + """Apply the given write concern to a command.""" + if not write_concern or write_concern.is_server_default: + return + wc = write_concern.document + if get_timeout() is not None: + wc.pop("wtimeout", None) + if wc: + cmd["writeConcern"] = wc diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 7992383f67..b21b576aa5 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -23,7 +23,7 @@ from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument from bson.son import SON -from pymongo import common +from pymongo import _csot, common from pymongo.client_session import _validate_session_write_concern from pymongo.collation import validate_collation_or_none from pymongo.common import ( @@ -315,8 +315,7 @@ def _execute_command( cmd = SON([(cmd_name, self.collection.name), ("ordered", self.ordered)]) if self.comment: cmd["comment"] = self.comment - if not write_concern.is_server_default: - cmd["writeConcern"] = write_concern.document + _csot.apply_write_concern(cmd, write_concern) if self.bypass_doc_val: cmd["bypassDocumentValidation"] = True if self.let is not None and run.op_type in (_DELETE, _UPDATE): diff --git a/pymongo/collection.py b/pymongo/collection.py index 22af5a6426..9a9ba56618 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -542,8 +542,6 @@ def _insert_one( command = SON([("insert", self.name), ("ordered", ordered), ("documents", [doc])]) if comment is not None: command["comment"] = comment - if not write_concern.is_server_default: - command["writeConcern"] = write_concern.document def _insert_command(session, sock_info, retryable_write): if bypass_doc_val: @@ -756,8 +754,6 @@ def _update( if let is not None: common.validate_is_mapping("let", let) command["let"] = let - if not write_concern.is_server_default: - command["writeConcern"] = write_concern.document if comment is not None: command["comment"] = comment @@ -1232,8 +1228,6 @@ def _delete( hint = helpers._index_document(hint) delete_doc["hint"] = hint command = SON([("delete", self.name), ("ordered", ordered), ("deletes", [delete_doc])]) - if not write_concern.is_server_default: - command["writeConcern"] = write_concern.document if let is not None: common.validate_is_document_type("let", let) @@ -2820,8 +2814,6 @@ def _find_and_modify(session, sock_info, retryable_write): "Must be connected to MongoDB 4.4+ to use hint on unacknowledged find and modify commands." ) cmd["hint"] = hint - if not write_concern.is_server_default: - cmd["writeConcern"] = write_concern.document out = self._command( sock_info, cmd, diff --git a/pymongo/network.py b/pymongo/network.py index 3eac0d02d3..a5c5459e14 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -118,9 +118,8 @@ def command( # Support CSOT if client: - sock_info.apply_timeout(client, spec, write_concern) - elif write_concern and not write_concern.is_server_default: - spec["writeConcern"] = write_concern.document + sock_info.apply_timeout(client, spec) + _csot.apply_write_concern(spec, write_concern) if use_op_msg: flags = _OpMsg.MORE_TO_COME if unacknowledged else 0 diff --git a/pymongo/pool.py b/pymongo/pool.py index ed9feac918..1fab98209f 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -569,16 +569,13 @@ def set_socket_timeout(self, timeout): self.last_timeout = timeout self.sock.settimeout(timeout) - def apply_timeout(self, client, cmd, write_concern=None): + def apply_timeout(self, client, cmd): # CSOT: use remaining timeout when set. timeout = _csot.remaining() if timeout is None: # Reset the socket timeout unless we're performing a streaming monitor check. if not self.more_to_come: self.set_socket_timeout(self.opts.socket_timeout) - - if cmd and write_concern and not write_concern.is_server_default: - cmd["writeConcern"] = write_concern.document return None # RTT validation. rtt = _csot.get_rtt() @@ -593,10 +590,6 @@ def apply_timeout(self, client, cmd, write_concern=None): ) if cmd is not None: cmd["maxTimeMS"] = int(max_time_ms * 1000) - wc = write_concern.document if write_concern else {} - wc.pop("wtimeout", None) - if wc: - cmd["writeConcern"] = wc self.set_socket_timeout(timeout) return timeout diff --git a/test/csot/gridfs-advanced.json b/test/csot/gridfs-advanced.json index 668b93f37a..0b09684fc7 100644 --- a/test/csot/gridfs-advanced.json +++ b/test/csot/gridfs-advanced.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 75 }, "useMultipleMongoses": false, "observeEvents": [ @@ -62,13 +62,12 @@ "_id": { "$oid": "000000000000000000000005" }, - "length": 10, + "length": 8, "chunkSize": 4, "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "57d83cd477bfb1ccd975ab33d827a92b", - "filename": "length-10", + "filename": "length-8", "contentType": "application/octet-stream", "aliases": [], "metadata": {} @@ -93,6 +92,21 @@ "subType": "00" } } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } } ] } @@ -116,7 +130,7 @@ "update" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -129,7 +143,7 @@ "$oid": "000000000000000000000005" }, "newFilename": "foo", - "timeoutMS": 100 + "timeoutMS": 2000 } } ], @@ -174,7 +188,7 @@ "update" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -234,7 +248,7 @@ "drop" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -243,7 +257,7 @@ "name": "drop", "object": "bucket", "arguments": { - "timeoutMS": 100 + "timeoutMS": 2000 } } ] @@ -266,7 +280,7 @@ "drop" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -320,7 +334,7 @@ "drop" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } diff --git a/test/csot/gridfs-delete.json b/test/csot/gridfs-delete.json index f458fa827c..8701929ff3 100644 --- a/test/csot/gridfs-delete.json +++ b/test/csot/gridfs-delete.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 75 }, "useMultipleMongoses": false, "observeEvents": [ @@ -62,13 +62,12 @@ "_id": { "$oid": "000000000000000000000005" }, - "length": 10, + "length": 8, "chunkSize": 4, "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "57d83cd477bfb1ccd975ab33d827a92b", - "filename": "length-10", + "filename": "length-8", "contentType": "application/octet-stream", "aliases": [], "metadata": {} @@ -93,6 +92,21 @@ "subType": "00" } } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } } ] } @@ -116,7 +130,7 @@ "delete" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -128,7 +142,7 @@ "id": { "$oid": "000000000000000000000005" }, - "timeoutMS": 100 + "timeoutMS": 1000 } } ] @@ -151,7 +165,7 @@ "delete" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -210,7 +224,7 @@ "delete" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -247,7 +261,7 @@ "delete" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 50 } } } diff --git a/test/csot/gridfs-download.json b/test/csot/gridfs-download.json index a3044a6d81..2ab64010f8 100644 --- a/test/csot/gridfs-download.json +++ b/test/csot/gridfs-download.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 75 }, "useMultipleMongoses": false, "observeEvents": [ @@ -62,13 +62,12 @@ "_id": { "$oid": "000000000000000000000005" }, - "length": 10, + "length": 8, "chunkSize": 4, "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "57d83cd477bfb1ccd975ab33d827a92b", - "filename": "length-10", + "filename": "length-8", "contentType": "application/octet-stream", "aliases": [], "metadata": {} @@ -93,6 +92,21 @@ "subType": "00" } } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } } ] } @@ -116,7 +130,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -128,7 +142,7 @@ "id": { "$oid": "000000000000000000000005" }, - "timeoutMS": 100 + "timeoutMS": 1000 } } ] @@ -151,7 +165,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -210,7 +224,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -284,7 +298,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 50 } } } diff --git a/test/csot/gridfs-find.json b/test/csot/gridfs-find.json index f75a279c01..45bb7066d6 100644 --- a/test/csot/gridfs-find.json +++ b/test/csot/gridfs-find.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 75 }, "useMultipleMongoses": false, "observeEvents": [ @@ -84,7 +84,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -94,7 +94,7 @@ "object": "bucket", "arguments": { "filter": {}, - "timeoutMS": 100 + "timeoutMS": 1000 } } ], @@ -139,7 +139,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } diff --git a/test/csot/gridfs-upload.json b/test/csot/gridfs-upload.json index b0daeb2e42..690fdda77f 100644 --- a/test/csot/gridfs-upload.json +++ b/test/csot/gridfs-upload.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 75 }, "useMultipleMongoses": false } @@ -81,7 +81,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -117,7 +117,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -155,7 +155,7 @@ "listIndexes" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -193,7 +193,7 @@ "createIndexes" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -231,7 +231,7 @@ "listIndexes" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -269,7 +269,7 @@ "createIndexes" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -307,7 +307,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -345,7 +345,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -384,7 +384,7 @@ "listIndexes" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 50 } } } diff --git a/test/unified_format.py b/test/unified_format.py index d36b5d0a48..d81238c0ee 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -52,7 +52,7 @@ snake_to_camel, ) from test.version import Version -from typing import Any +from typing import Any, List import pymongo from bson import SON, Code, DBRef, Decimal128, Int64, MaxKey, MinKey, json_util @@ -60,8 +60,8 @@ from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.objectid import ObjectId from bson.regex import RE_TYPE, Regex -from gridfs import GridFSBucket -from pymongo import ASCENDING, MongoClient +from gridfs import GridFSBucket, GridOut +from pymongo import ASCENDING, MongoClient, _csot from pymongo.change_stream import ChangeStream from pymongo.client_session import ClientSession, TransactionOptions, _TxnState from pymongo.collection import Collection @@ -460,7 +460,17 @@ def _create_entity(self, entity_spec, uri=None): elif entity_type == "bucket": db = self[spec["database"]] kwargs = parse_spec_options(spec.get("bucketOptions", {}).copy()) - self[spec["id"]] = GridFSBucket(db, **kwargs) + bucket = GridFSBucket(db, **kwargs) + + # PyMongo does not support GridFSBucket.drop(), emulate it. + @_csot.apply + def drop(self: GridFSBucket, *args: Any, **kwargs: Any) -> None: + self._files.drop(*args, **kwargs) + self._chunks.drop(*args, **kwargs) + + if not hasattr(bucket, "drop"): + bucket.drop = drop.__get__(bucket) + self[spec["id"]] = bucket return elif entity_type == "clientEncryption": opts = camel_to_snake_args(spec["clientEncryptionOpts"].copy()) @@ -871,8 +881,11 @@ def maybe_skip_test(self, spec): or "Dirty implicit session is discarded" in spec["description"] ): self.skipTest("MMAPv1 does not support retryWrites=True") - elif "Client side error in command starting transaction" in spec["description"]: + if "Client side error in command starting transaction" in spec["description"]: self.skipTest("Implement PYTHON-1894") + if "timeoutMS applied to entire download" in spec["description"]: + self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") + class_name = self.__class__.__name__.lower() description = spec["description"].lower() if "csot" in class_name: @@ -914,17 +927,6 @@ def maybe_skip_test(self, spec): self.skipTest("PyMongo does not support modifyCollection") if "timeoutMode" in op.get("arguments", {}): self.skipTest("PyMongo does not support timeoutMode") - if "csot" in class_name: - if "bucket" in op["object"]: - self.skipTest("CSOT not implemented for GridFS") - if name == "createEntities": - self.maybe_skip_entity(op.get("arguments", {}).get("entities", [])) - - def maybe_skip_entity(self, entities): - for entity in entities: - entity_type = next(iter(entity)) - if entity_type == "bucket": - self.skipTest("GridFS is not currently supported (PYTHON-2459)") def process_error(self, exception, spec): is_error = spec.get("isError") @@ -1145,10 +1147,10 @@ def _bucketOperation_uploadWithId(self, target: GridFSBucket, *args: Any, **kwar kwargs.setdefault("metadata", {})["contentType"] = kwargs.pop("content_type") return target.upload_from_stream_with_id(*args, **kwargs) - def _bucketOperation_drop(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> None: - # PyMongo does not support GridFSBucket.drop(), emulate it. - target._files.drop(*args, **kwargs) - target._chunks.drop(*args, **kwargs) + def _bucketOperation_find( + self, target: GridFSBucket, *args: Any, **kwargs: Any + ) -> List[GridOut]: + return list(target.find(*args, **kwargs)) def run_entity_operation(self, spec): target = self.entity_map[spec["object"]] From 4d4fddaf699d16af6e082da5b5c3303cbafc2818 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 19 Jul 2022 18:17:12 -0700 Subject: [PATCH 0213/1588] PYTHON-3363 Allow change stream to be resumed after a timeout (#1014) Apply client timeoutMS to ChangeStream iteration. --- pymongo/change_stream.py | 16 ++++++++++++++-- test/test_change_stream.py | 14 ++++++-------- test/test_csot.py | 33 ++++++++++++++++++++++++++++++++- test/unified_format.py | 9 ++++----- 4 files changed, 56 insertions(+), 16 deletions(-) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index ef3573022d..80820dff91 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -20,7 +20,7 @@ from bson import _bson_to_dict from bson.raw_bson import RawBSONDocument from bson.timestamp import Timestamp -from pymongo import common +from pymongo import _csot, common from pymongo.aggregation import ( _CollectionAggregationCommand, _DatabaseAggregationCommand, @@ -128,6 +128,8 @@ def __init__( self._start_at_operation_time = start_at_operation_time self._session = session self._comment = comment + self._closed = False + self._timeout = self._target._timeout # Initialize cursor. self._cursor = self._create_cursor() @@ -234,6 +236,7 @@ def _resume(self): def close(self) -> None: """Close this ChangeStream.""" + self._closed = True self._cursor.close() def __iter__(self) -> "ChangeStream[_DocumentType]": @@ -248,6 +251,7 @@ def resume_token(self) -> Optional[Mapping[str, Any]]: """ return copy.deepcopy(self._resume_token) + @_csot.apply def next(self) -> _DocumentType: """Advance the cursor. @@ -298,8 +302,9 @@ def alive(self) -> bool: .. versionadded:: 3.8 """ - return self._cursor.alive + return not self._closed + @_csot.apply def try_next(self) -> Optional[_DocumentType]: """Advance the cursor without blocking indefinitely. @@ -332,6 +337,9 @@ def try_next(self) -> Optional[_DocumentType]: .. versionadded:: 3.8 """ + if not self._closed and not self._cursor.alive: + self._resume() + # Attempt to get the next change with at most one getMore and at most # one resume attempt. try: @@ -350,6 +358,10 @@ def try_next(self) -> Optional[_DocumentType]: self._resume() change = self._cursor._try_next(False) + # Check if the cursor was invalidated. + if not self._cursor.alive: + self._closed = True + # If no changes are available. if change is None: # We have either iterated over all documents in the cursor, diff --git a/test/test_change_stream.py b/test/test_change_stream.py index f3f206d965..11ed2895ac 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -486,7 +486,7 @@ def _get_expected_resume_token(self, stream, listener, previous_change=None): return response["cursor"]["postBatchResumeToken"] @no_type_check - def _test_raises_error_on_missing_id(self, expected_exception): + def _test_raises_error_on_missing_id(self, expected_exception, expected_exception2): """ChangeStream will raise an exception if the server response is missing the resume token. """ @@ -494,8 +494,7 @@ def _test_raises_error_on_missing_id(self, expected_exception): self.watched_collection().insert_one({}) with self.assertRaises(expected_exception): next(change_stream) - # The cursor should now be closed. - with self.assertRaises(StopIteration): + with self.assertRaises(expected_exception2): next(change_stream) @no_type_check @@ -525,17 +524,16 @@ def test_update_resume_token_legacy(self): self._test_update_resume_token(self._get_expected_resume_token_legacy) # Prose test no. 2 - @client_context.require_version_max(4, 3, 3) # PYTHON-2120 @client_context.require_version_min(4, 1, 8) def test_raises_error_on_missing_id_418plus(self): - # Server returns an error on 4.1.8+ - self._test_raises_error_on_missing_id(OperationFailure) + # Server returns an error on 4.1.8+, subsequent next() resumes and gets the same error. + self._test_raises_error_on_missing_id(OperationFailure, OperationFailure) # Prose test no. 2 @client_context.require_version_max(4, 1, 8) def test_raises_error_on_missing_id_418minus(self): - # PyMongo raises an error - self._test_raises_error_on_missing_id(InvalidOperation) + # PyMongo raises an error, closes the cursor, subsequent next() raises StopIteration. + self._test_raises_error_on_missing_id(InvalidOperation, StopIteration) # Prose test no. 3 @no_type_check diff --git a/test/test_csot.py b/test/test_csot.py index 4d71973320..7b82a49caf 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -19,11 +19,12 @@ sys.path[0:0] = [""] -from test import IntegrationTest, unittest +from test import IntegrationTest, client_context, unittest from test.unified_format import generate_test_classes import pymongo from pymongo import _csot +from pymongo.errors import PyMongoError # Location of JSON test specifications. TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "csot") @@ -72,6 +73,36 @@ def test_timeout_nested(self): self.assertEqual(_csot.get_deadline(), float("inf")) self.assertEqual(_csot.get_rtt(), 0.0) + @client_context.require_version_min(3, 6) + @client_context.require_no_mmap + @client_context.require_no_standalone + def test_change_stream_can_resume_after_timeouts(self): + coll = self.db.test + with coll.watch(max_await_time_ms=150) as stream: + with pymongo.timeout(0.1): + with self.assertRaises(PyMongoError) as ctx: + stream.try_next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + with self.assertRaises(PyMongoError) as ctx: + stream.try_next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + # Resume before the insert on 3.6 because 4.0 is required to avoid skipping documents + if client_context.version < (4, 0): + stream.try_next() + coll.insert_one({}) + with pymongo.timeout(10): + self.assertTrue(stream.next()) + self.assertTrue(stream.alive) + # Timeout applies to entire next() call, not only individual commands. + with pymongo.timeout(0.5): + with self.assertRaises(PyMongoError) as ctx: + stream.next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + self.assertFalse(stream.alive) + if __name__ == "__main__": unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index d81238c0ee..ee64915202 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1078,10 +1078,6 @@ def _sessionOperation_startTransaction(self, target, *args, **kwargs): self.__raise_if_unsupported("startTransaction", target, ClientSession) return target.start_transaction(*args, **kwargs) - def _cursor_iterateOnce(self, target, *args, **kwargs): - self.__raise_if_unsupported("iterateOnce", target, NonLazyCursor, ChangeStream) - return target.try_next() - def _changeStreamOperation_iterateUntilDocumentOrError(self, target, *args, **kwargs): self.__raise_if_unsupported("iterateUntilDocumentOrError", target, ChangeStream) return next(target) @@ -1204,8 +1200,11 @@ def run_entity_operation(self, spec): try: method = getattr(self, method_name) except AttributeError: + target_opname = camel_to_snake(opname) + if target_opname == "iterate_once": + target_opname = "try_next" try: - cmd = getattr(target, camel_to_snake(opname)) + cmd = getattr(target, target_opname) except AttributeError: self.fail("Unsupported operation %s on entity %s" % (opname, target)) else: From 6172c00dbe1b53152293560a3c2272b44776fa9d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 19 Jul 2022 19:08:54 -0700 Subject: [PATCH 0214/1588] PYTHON-3362 Fix CSOT gridfs test (#1015) --- test/csot/gridfs-advanced.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/csot/gridfs-advanced.json b/test/csot/gridfs-advanced.json index 0b09684fc7..6bf0229a04 100644 --- a/test/csot/gridfs-advanced.json +++ b/test/csot/gridfs-advanced.json @@ -366,7 +366,7 @@ "drop" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 50 } } } From 9bc134cf612fc29675a0388b6d30840c05fc1475 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 20 Jul 2022 08:33:41 -0700 Subject: [PATCH 0215/1588] BUMP 4.2 (#1016) --- doc/changelog.rst | 42 ++++++++++++++++++++++++++++++++++++++++-- pymongo/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 42 insertions(+), 4 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index b6b099fd31..7afaca22a1 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -13,11 +13,26 @@ PyMongo 4.2 brings a number of improvements including: changes may be made before the final release. See :ref:`automatic-queryable-client-side-encryption` for example usage. - Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout to an entire block of pymongo operations. +- Added the ``timeoutMS`` URI and keyword argument to :class:`~pymongo.mongo_client.MongoClient`. - Added the :attr:`pymongo.errors.PyMongoError.timeout` property which is ``True`` when the error was caused by a timeout. -- Added ``check_exists`` option to :meth:`~pymongo.database.Database.create_collection` +- Added the ``check_exists`` argument to :meth:`~pymongo.database.Database.create_collection` that when True (the default) runs an additional ``listCollections`` command to verify that the collection does not exist already. +- Added the following key management APIs to :class:`~pymongo.encryption.ClientEncryption`: + + - :meth:`~pymongo.encryption.ClientEncryption.get_key` + - :meth:`~pymongo.encryption.ClientEncryption.get_keys` + - :meth:`~pymongo.encryption.ClientEncryption.delete_key` + - :meth:`~pymongo.encryption.ClientEncryption.add_key_alt_name` + - :meth:`~pymongo.encryption.ClientEncryption.get_key_by_alt_name` + - :meth:`~pymongo.encryption.ClientEncryption.remove_key_alt_name` + - :meth:`~pymongo.encryption.ClientEncryption.rewrap_many_data_key` + - :class:`~pymongo.encryption.RewrapManyDataKeyResult` + +- Support for the ``crypt_shared`` library to replace ``mongocryptd`` using the new + ``crypt_shared_lib_path`` and ``crypt_shared_lib_required`` arguments to + :class:`~pymongo.encryption_options.AutoEncryptionOpts`. Bug fixes ......... @@ -25,7 +40,18 @@ Bug fixes - Fixed a bug where :meth:`~pymongo.collection.Collection.estimated_document_count` would fail with a "CommandNotSupportedOnView" error on views (`PYTHON-2885`_). - Fixed a bug where invalid UTF-8 strings could be passed as patterns for :class:`~bson.regex.Regex` - objects (`PYTHON-3048`_). :func:`bson.encode` now correctly raises :class:`bson.errors.InvalidStringData`. + objects. :func:`bson.encode` now correctly raises :class:`bson.errors.InvalidStringData` (`PYTHON-3048`_). +- Fixed a bug that caused ``AutoReconnect("connection pool paused")`` errors in the child + process after fork (`PYTHON-3257`_). +- Fixed a bug where :meth:`~pymongo.collection.Collection.count_documents` and + :meth:`~pymongo.collection.Collection.distinct` would fail in a transaction with + ``directConnection=True`` (`PYTHON-3333`_). +- GridFS no longer uploads an incomplete files collection document after encountering an + error in the middle of an upload fork. This results in fewer + :class:`~gridfs.errors.CorruptGridFile` errors (`PYTHON-1552`_). +- Renamed PyMongo's internal C extension methods to avoid crashing due to name conflicts + with mpi4py and other shared libraries (`PYTHON-2110`_). +- Fixed tight CPU loop for network I/O when using PyOpenSSL (`PYTHON-3187`_). Unavoidable breaking changes ............................ @@ -38,6 +64,11 @@ Unavoidable breaking changes Users of the Stable API with estimated_document_count are recommended to upgrade their server version to 5.0.9+ or set :attr:`pymongo.server_api.ServerApi.strict` to ``False`` to avoid encountering errors (`PYTHON-3167`_). +- Removed generic typing from :class:`~pymongo.client_session.ClientSession` to improve + support for Pyright (`PYTHON-3283`_). +- Added ``__all__`` to the bson, pymongo, and gridfs packages. This could be a breaking + change for apps that relied on ``from bson import *`` to import APIs not present in + ``__all__`` (`PYTHON-3311`_). .. _count: https://mongodb.com/docs/manual/reference/command/count/ @@ -50,6 +81,13 @@ in this release. .. _PYTHON-3048: https://jira.mongodb.org/browse/PYTHON-3048 .. _PYTHON-2885: https://jira.mongodb.org/browse/PYTHON-2885 .. _PYTHON-3167: https://jira.mongodb.org/browse/PYTHON-3167 +.. _PYTHON-3257: https://jira.mongodb.org/browse/PYTHON-3257 +.. _PYTHON-3333: https://jira.mongodb.org/browse/PYTHON-3333 +.. _PYTHON-1552: https://jira.mongodb.org/browse/PYTHON-1552 +.. _PYTHON-2110: https://jira.mongodb.org/browse/PYTHON-2110 +.. _PYTHON-3283: https://jira.mongodb.org/browse/PYTHON-3283 +.. _PYTHON-3311: https://jira.mongodb.org/browse/PYTHON-3311 +.. _PYTHON-3187: https://jira.mongodb.org/browse/PYTHON-3187 .. _PyMongo 4.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33196 .. _Queryable Encryption: automatic-queryable-client-side-encryption diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 7eaa793648..ee246d25a9 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -84,7 +84,7 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, ".dev2") +version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0) def get_version_string() -> str: diff --git a/setup.py b/setup.py index ce6cce712e..94889d7261 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.2.0.dev2" +version = "4.2.0" f = open("README.rst") try: From e192c7f85ec627e09cf934e15bc7c009b64c51a6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 20 Jul 2022 08:37:16 -0700 Subject: [PATCH 0216/1588] BUMP 4.2.1.dev0 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index ee246d25a9..257c1dbac1 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -84,7 +84,7 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0) +version_tuple: Tuple[Union[int, str], ...] = (4, 2, 1, ".dev0") def get_version_string() -> str: diff --git a/setup.py b/setup.py index 94889d7261..0e983e4642 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.2.0" +version = "4.2.1.dev0" f = open("README.rst") try: From c131ad8cc13de32aca23d1b1d352d1a9892896b0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 20 Jul 2022 13:04:49 -0700 Subject: [PATCH 0217/1588] Update readme for 6.0 support (#1017) --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index c301932643..f60b8da680 100644 --- a/README.rst +++ b/README.rst @@ -16,7 +16,7 @@ is a `gridfs `_ implementation on top of ``pymongo``. -PyMongo supports MongoDB 3.6, 4.0, 4.2, 4.4, and 5.0. +PyMongo supports MongoDB 3.6, 4.0, 4.2, 4.4, 5.0, and 6.0. Support / Feedback ================== From 065b02bcb3ff6d8c088e4934105b9158f48d7074 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 21 Jul 2022 11:47:02 -0700 Subject: [PATCH 0218/1588] PYTHON-3358 Skip obsolete StaleShardVersion test on 6.1.0+ (#1018) --- .../unified/change-streams-resume-errorLabels.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/change_streams/unified/change-streams-resume-errorLabels.json b/test/change_streams/unified/change-streams-resume-errorLabels.json index c156b550ce..f5f4505a9f 100644 --- a/test/change_streams/unified/change-streams-resume-errorLabels.json +++ b/test/change_streams/unified/change-streams-resume-errorLabels.json @@ -1478,6 +1478,11 @@ }, { "description": "change stream resumes after StaleShardVersion", + "runOnRequirements": [ + { + "maxServerVersion": "6.0.99" + } + ], "operations": [ { "name": "failPoint", From 925537575b63029931d54d3abf250e4ecdbcae75 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 25 Jul 2022 14:22:51 -0700 Subject: [PATCH 0219/1588] PYTHON-3284 Fix test_snapshot_query by waiting for documents to be committed to the snapshot (#1019) --- test/test_examples.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/test_examples.py b/test/test_examples.py index b7b70463ac..e23abe104f 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -1372,9 +1372,9 @@ def check_for_snapshot(self, collection): """ with self.client.start_session(snapshot=True) as s: try: - with collection.aggregate([], session=s): - pass - return True + if collection.find_one(session=s): + return True + return False except OperationFailure as e: # Retry them as the server demands... if e.code == 246: # SnapshotUnavailable From f5ac946020609569e0eb7ca51a6316ad03a54fb1 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 25 Jul 2022 15:19:22 -0700 Subject: [PATCH 0220/1588] PYTHON-3368 Add test that reads are not retried in a transaction (#1020) --- .../do-not-retry-read-in-transaction.json | 115 ++++++++++++++++++ 1 file changed, 115 insertions(+) create mode 100644 test/transactions/unified/do-not-retry-read-in-transaction.json diff --git a/test/transactions/unified/do-not-retry-read-in-transaction.json b/test/transactions/unified/do-not-retry-read-in-transaction.json new file mode 100644 index 0000000000..6d9dc704b8 --- /dev/null +++ b/test/transactions/unified/do-not-retry-read-in-transaction.json @@ -0,0 +1,115 @@ +{ + "description": "do not retry read in a transaction", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "retryReads": true + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-read-in-transaction-test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "tests": [ + { + "description": "find does not retry in a transaction", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "startTransaction": true + }, + "commandName": "find", + "databaseName": "retryable-read-in-transaction-test" + } + } + ] + } + ] + } + ] +} From 864812d40093fd1502626b9cc45d62f97a29025a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 25 Jul 2022 15:25:41 -0700 Subject: [PATCH 0221/1588] PYTHON-3366 Support mypy 0.971 and test with latest version (#1021) PYTHON-3369 Use https://www.gevent.org --- .github/workflows/test-python.yml | 4 ++-- bson/__init__.py | 12 ++++++------ doc/conf.py | 2 +- pymongo/pyopenssl_context.py | 9 +++------ test/test_auth.py | 4 ++-- test/test_bson.py | 21 ++++++++++++++++----- test/test_change_stream.py | 3 ++- test/test_collection.py | 2 +- test/test_database.py | 5 +++-- 9 files changed, 36 insertions(+), 26 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 89d9830e82..6d5f26c503 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -59,8 +59,8 @@ jobs: cache-dependency-path: 'setup.py' - name: Install dependencies run: | - python -m pip install -U pip mypy==0.942 - pip install -e ".[zstd, srv]" + python -m pip install -U pip mypy + pip install -e ".[zstd, srv, encryption, ocsp]" - name: Run mypy run: | mypy --install-types --non-interactive bson gridfs tools pymongo diff --git a/bson/__init__.py b/bson/__init__.py index cc0850709e..2db1fb5d0b 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -61,8 +61,8 @@ import struct import sys import uuid -from codecs import utf_8_decode as _utf_8_decode # type: ignore[attr-defined] -from codecs import utf_8_encode as _utf_8_encode # type: ignore[attr-defined] +from codecs import utf_8_decode as _utf_8_decode +from codecs import utf_8_encode as _utf_8_encode from collections import abc as _abc from typing import ( IO, @@ -621,7 +621,7 @@ def _make_c_string_check(string: Union[str, bytes]) -> bytes: else: if "\x00" in string: raise InvalidDocument("BSON keys / regex patterns must not contain a NUL character") - return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" + return _utf_8_encode(string)[0] + b"\x00" def _make_c_string(string: Union[str, bytes]) -> bytes: @@ -633,7 +633,7 @@ def _make_c_string(string: Union[str, bytes]) -> bytes: except UnicodeError: raise InvalidStringData("strings in documents must be valid UTF-8: %r" % string) else: - return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" + return _utf_8_encode(string)[0] + b"\x00" def _make_name(string: str) -> bytes: @@ -641,7 +641,7 @@ def _make_name(string: str) -> bytes: # Keys can only be text in python 3. if "\x00" in string: raise InvalidDocument("BSON keys / regex patterns must not contain a NUL character") - return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" + return _utf_8_encode(string)[0] + b"\x00" def _encode_float(name: bytes, value: float, dummy0: Any, dummy1: Any) -> bytes: @@ -1308,7 +1308,7 @@ def encode( """ return cls(encode(document, check_keys, codec_options)) - def decode(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> _DocumentType: # type: ignore[override] + def decode(self, codec_options: "CodecOptions[_DocumentType]" = DEFAULT_CODEC_OPTIONS) -> _DocumentType: # type: ignore[override,assignment] """Decode this BSON data. By default, returns a BSON document represented as a Python diff --git a/doc/conf.py b/doc/conf.py index 1e18eb29bf..f66de3868a 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -192,6 +192,6 @@ intersphinx_mapping = { - "gevent": ("http://www.gevent.org/", None), + "gevent": ("https://www.gevent.org/", None), "py": ("https://docs.python.org/3/", None), } diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 758a741b6f..2d9c904bb3 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -135,7 +135,7 @@ def recv(self, *args, **kwargs): def recv_into(self, *args, **kwargs): try: - return self._call(super(_sslConn, self).recv_into, *args, **kwargs) # type: ignore + return self._call(super(_sslConn, self).recv_into, *args, **kwargs) except _SSL.SysCallError as exc: # Suppress ragged EOFs to match the stdlib. if self.suppress_ragged_eofs and _ragged_eof(exc): @@ -146,12 +146,9 @@ def sendall(self, buf, flags=0): view = memoryview(buf) total_length = len(buf) total_sent = 0 - sent = 0 while total_sent < total_length: try: - sent = self._call( - super(_sslConn, self).send, view[total_sent:], flags # type: ignore - ) + sent = self._call(super(_sslConn, self).send, view[total_sent:], flags) # XXX: It's not clear if this can actually happen. PyOpenSSL # doesn't appear to have any interrupt handling, nor any interrupt # errors for OpenSSL connections. @@ -162,7 +159,7 @@ def sendall(self, buf, flags=0): # https://github.com/pyca/pyopenssl/blob/19.1.0/src/OpenSSL/SSL.py#L1756 # https://www.openssl.org/docs/man1.0.2/man3/SSL_write.html if sent <= 0: - raise Exception("Connection closed") + raise OSError("connection closed") total_sent += sent diff --git a/test/test_auth.py b/test/test_auth.py index 69ed27bda0..20d53ef24b 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -329,8 +329,8 @@ def auth_string(user, password): bad_user = MongoClient(auth_string("not-user", SASL_PASS)) bad_pwd = MongoClient(auth_string(SASL_USER, "not-pwd")) # OperationFailure raised upon connecting. - self.assertRaises(OperationFailure, bad_user.admin.command, "ping") - self.assertRaises(OperationFailure, bad_pwd.admin.command, "ping") + self.assertRaises(OperationFailure, bad_user.admin.command, "ping") # type: ignore[arg-type] + self.assertRaises(OperationFailure, bad_pwd.admin.command, "ping") # type: ignore[arg-type] class TestSCRAMSHA1(IntegrationTest): diff --git a/test/test_bson.py b/test/test_bson.py index 8ad65f3412..aa77954fa2 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -117,7 +117,8 @@ def tzname(self, dt): class TestBSON(unittest.TestCase): def assertInvalid(self, data): - self.assertRaises(InvalidBSON, decode, data) + # Remove type ignore after: https://github.com/python/mypy/issues/13220 + self.assertRaises(InvalidBSON, decode, data) # type: ignore[arg-type] def check_encode_then_decode(self, doc_class=dict, decoder=decode, encoder=encode): @@ -1025,11 +1026,17 @@ def test_unicode_decode_error_handler(self): # Ensure that strict mode raises an error. for invalid in [invalid_key, invalid_val, invalid_both]: + # Remove type ignore after: https://github.com/python/mypy/issues/13220 self.assertRaises( - InvalidBSON, decode, invalid, CodecOptions(unicode_decode_error_handler="strict") + InvalidBSON, + decode, # type: ignore[arg-type] + invalid, + CodecOptions(unicode_decode_error_handler="strict"), ) - self.assertRaises(InvalidBSON, decode, invalid, CodecOptions()) - self.assertRaises(InvalidBSON, decode, invalid) + self.assertRaises( + InvalidBSON, decode, invalid, CodecOptions() # type: ignore[arg-type] + ) + self.assertRaises(InvalidBSON, decode, invalid) # type: ignore[arg-type] # Test all other error handlers. for handler in ["replace", "backslashreplace", "surrogateescape", "ignore"]: @@ -1046,8 +1053,12 @@ def test_unicode_decode_error_handler(self): dec = decode(enc, CodecOptions(unicode_decode_error_handler="junk")) self.assertEqual(dec, {"keystr": "foobar"}) + # Remove type ignore after: https://github.com/python/mypy/issues/13220 self.assertRaises( - InvalidBSON, decode, invalid_both, CodecOptions(unicode_decode_error_handler="junk") + InvalidBSON, + decode, # type: ignore[arg-type] + invalid_both, + CodecOptions(unicode_decode_error_handler="junk"), ) def round_trip_pickle(self, obj, pickled_with_older): diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 11ed2895ac..b5b260086d 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -1084,8 +1084,9 @@ def setFailPoint(self, scenario_dict): fail_cmd = SON([("configureFailPoint", "failCommand")]) fail_cmd.update(fail_point) client_context.client.admin.command(fail_cmd) + # Remove type ignore after: https://github.com/python/mypy/issues/13220 self.addCleanup( - client_context.client.admin.command, + client_context.client.admin.command, # type: ignore[arg-type] "configureFailPoint", fail_cmd["configureFailPoint"], mode="off", diff --git a/test/test_collection.py b/test/test_collection.py index bea2ed6ca6..37f1b1eae2 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -19,7 +19,7 @@ import contextlib import re import sys -from codecs import utf_8_decode # type: ignore +from codecs import utf_8_decode from collections import defaultdict from typing import Iterable, no_type_check diff --git a/test/test_database.py b/test/test_database.py index d49ac8324f..a1c0439089 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -604,13 +604,14 @@ def test_command_max_time_ms(self): try: db = self.client.pymongo_test db.command("count", "test") - self.assertRaises(ExecutionTimeout, db.command, "count", "test", maxTimeMS=1) + # Remove type ignore after: https://github.com/python/mypy/issues/13220 + self.assertRaises(ExecutionTimeout, db.command, "count", "test", maxTimeMS=1) # type: ignore[arg-type] pipeline = [{"$project": {"name": 1, "count": 1}}] # Database command helper. db.command("aggregate", "test", pipeline=pipeline, cursor={}) self.assertRaises( ExecutionTimeout, - db.command, + db.command, # type: ignore[arg-type] "aggregate", "test", pipeline=pipeline, From e96f112d84c0a62ae601d1c65835c7319ea91255 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 27 Jul 2022 18:28:23 -0500 Subject: [PATCH 0222/1588] PYTHON-3274 Add commandStartedEvent assertions to clustered index spec tests (#1022) --- .../clustered-indexes.json | 122 +++++++++++++++++- 1 file changed, 118 insertions(+), 4 deletions(-) diff --git a/test/collection_management/clustered-indexes.json b/test/collection_management/clustered-indexes.json index 739d0fd8b6..9db5ff06d7 100644 --- a/test/collection_management/clustered-indexes.json +++ b/test/collection_management/clustered-indexes.json @@ -10,14 +10,17 @@ "createEntities": [ { "client": { - "id": "client0" + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] } }, { "database": { "id": "database0", "client": "client0", - "databaseName": "ts-tests" + "databaseName": "ci-tests" } }, { @@ -31,7 +34,7 @@ "initialData": [ { "collectionName": "test", - "databaseName": "ts-tests", + "databaseName": "ci-tests", "documents": [] } ], @@ -64,10 +67,40 @@ "name": "assertCollectionExists", "object": "testRunner", "arguments": { - "databaseName": "ts-tests", + "databaseName": "ci-tests", "collectionName": "test" } } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + }, + "databaseName": "ci-tests" + } + } + ] + } ] }, { @@ -125,6 +158,49 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": { + "$eq": "test" + } + } + }, + "databaseName": "ci-tests" + } + } + ] + } ] }, { @@ -171,6 +247,44 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "test" + }, + "databaseName": "ci-tests" + } + } + ] + } ] } ] From 14002a5a0d294cae8c2c5349e1a92364381dfd4d Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Wed, 27 Jul 2022 16:53:52 -0700 Subject: [PATCH 0223/1588] PYTHON-1824 Allow encoding/decoding out-of-range datetimes via DatetimeMS and datetime_conversion (#981) https://jira.mongodb.org/browse/PYTHON-1824 Co-authored-by: Ben Warner --- bson/__init__.py | 47 ++++------ bson/_cbsonmodule.c | 138 +++++++++++++++++++++++++++- bson/_cbsonmodule.h | 1 + bson/codec_options.py | 27 +++++- bson/codec_options.pyi | 8 ++ bson/datetime_ms.py | 157 ++++++++++++++++++++++++++++++++ bson/json_util.py | 43 +++++++-- doc/api/bson/datetime_ms.rst | 4 + doc/api/bson/index.rst | 1 + doc/examples/datetimes.rst | 54 +++++++++++ pymongo/common.py | 18 +++- pymongo/mongo_client.py | 8 ++ test/test_bson.py | 170 ++++++++++++++++++++++++++++++++++- test/test_client.py | 23 ++++- test/test_json_util.py | 68 +++++++++++++- 15 files changed, 721 insertions(+), 46 deletions(-) create mode 100644 bson/datetime_ms.py create mode 100644 doc/api/bson/datetime_ms.rst diff --git a/bson/__init__.py b/bson/__init__.py index 2db1fb5d0b..4283faf7dc 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -54,7 +54,6 @@ subtype 0. It will be decoded back to bytes. """ -import calendar import datetime import itertools import re @@ -100,9 +99,18 @@ from bson.codec_options import ( DEFAULT_CODEC_OPTIONS, CodecOptions, + DatetimeConversionOpts, _DocumentType, _raw_document_class, ) +from bson.datetime_ms import ( + EPOCH_AWARE, + EPOCH_NAIVE, + DatetimeMS, + _datetime_to_millis, + _millis_to_datetime, + utc, +) from bson.dbref import DBRef from bson.decimal128 import Decimal128 from bson.errors import InvalidBSON, InvalidDocument, InvalidStringData @@ -113,7 +121,6 @@ from bson.regex import Regex from bson.son import RE_TYPE, SON from bson.timestamp import Timestamp -from bson.tz_util import utc # Import some modules for type-checking only. if TYPE_CHECKING: @@ -187,12 +194,10 @@ "is_valid", "BSON", "has_c", + "DatetimeConversionOpts", + "DatetimeMS", ] -EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) -EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0) - - BSONNUM = b"\x01" # Floating point BSONSTR = b"\x02" # UTF-8 string BSONOBJ = b"\x03" # Embedded document @@ -413,7 +418,7 @@ def _get_boolean( def _get_date( data: Any, view: Any, position: int, dummy0: int, opts: CodecOptions, dummy1: Any -) -> Tuple[datetime.datetime, int]: +) -> Tuple[Union[datetime.datetime, DatetimeMS], int]: """Decode a BSON datetime to python datetime.datetime.""" return _millis_to_datetime(_UNPACK_LONG_FROM(data, position)[0], opts), position + 8 @@ -724,6 +729,12 @@ def _encode_datetime(name: bytes, value: datetime.datetime, dummy0: Any, dummy1: return b"\x09" + name + _PACK_LONG(millis) +def _encode_datetime_ms(name: bytes, value: DatetimeMS, dummy0: Any, dummy1: Any) -> bytes: + """Encode datetime.datetime.""" + millis = int(value) + return b"\x09" + name + _PACK_LONG(millis) + + def _encode_none(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: """Encode python None.""" return b"\x0A" + name @@ -814,6 +825,7 @@ def _encode_maxkey(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: bool: _encode_bool, bytes: _encode_bytes, datetime.datetime: _encode_datetime, + DatetimeMS: _encode_datetime_ms, dict: _encode_mapping, float: _encode_float, int: _encode_int, @@ -948,27 +960,6 @@ def _dict_to_bson(doc: Any, check_keys: bool, opts: CodecOptions, top_level: boo _dict_to_bson = _cbson._dict_to_bson # noqa: F811 -def _millis_to_datetime(millis: int, opts: CodecOptions) -> datetime.datetime: - """Convert milliseconds since epoch UTC to datetime.""" - diff = ((millis % 1000) + 1000) % 1000 - seconds = (millis - diff) // 1000 - micros = diff * 1000 - if opts.tz_aware: - dt = EPOCH_AWARE + datetime.timedelta(seconds=seconds, microseconds=micros) - if opts.tzinfo: - dt = dt.astimezone(opts.tzinfo) - return dt - else: - return EPOCH_NAIVE + datetime.timedelta(seconds=seconds, microseconds=micros) - - -def _datetime_to_millis(dtm: datetime.datetime) -> int: - """Convert datetime to milliseconds since epoch UTC.""" - if dtm.utcoffset() is not None: - dtm = dtm - dtm.utcoffset() # type: ignore - return int(calendar.timegm(dtm.timetuple()) * 1000 + dtm.microsecond // 1000) - - _CODEC_OPTIONS_TYPE_ERROR = TypeError("codec_options must be an instance of CodecOptions") diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index da6a5cbda7..019f049bb5 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -52,6 +52,9 @@ struct module_state { PyObject* BSONInt64; PyObject* Decimal128; PyObject* Mapping; + PyObject* DatetimeMS; + PyObject* _min_datetime_ms; + PyObject* _max_datetime_ms; }; #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) @@ -72,6 +75,12 @@ struct module_state { /* The smallest possible BSON document, i.e. "{}" */ #define BSON_MIN_SIZE 5 +/* Datetime codec options */ +#define DATETIME 1 +#define DATETIME_CLAMP 2 +#define DATETIME_MS 3 +#define DATETIME_AUTO 4 + /* Get an error class from the bson.errors module. * * Returns a new ref */ @@ -179,6 +188,45 @@ static long long millis_from_datetime(PyObject* datetime) { return millis; } +/* Extended-range datetime, returns a DatetimeMS object with millis */ +static PyObject* datetime_ms_from_millis(PyObject* self, long long millis){ + // Allocate a new DatetimeMS object. + struct module_state *state = GETSTATE(self); + + PyObject* dt; + PyObject* ll_millis; + + if (!(ll_millis = PyLong_FromLongLong(millis))){ + return NULL; + } + dt = PyObject_CallFunctionObjArgs(state->DatetimeMS, ll_millis, NULL); + Py_DECREF(ll_millis); + return dt; +} + +/* Extended-range datetime, takes a DatetimeMS object and extracts the long long value. */ +static int millis_from_datetime_ms(PyObject* dt, long long* out){ + PyObject* ll_millis; + long long millis; + + if (!(ll_millis = PyNumber_Long(dt))){ + if (PyErr_Occurred()) { // TypeError + return 0; + } + } + + if ((millis = PyLong_AsLongLong(ll_millis)) == -1){ + if (PyErr_Occurred()) { /* Overflow */ + PyErr_SetString(PyExc_OverflowError, + "MongoDB datetimes can only handle up to 8-byte ints"); + return 0; + } + } + Py_DECREF(ll_millis); + *out = millis; + return 1; +} + /* Just make this compatible w/ the old API. */ int buffer_write_bytes(buffer_t buffer, const char* data, int size) { if (pymongo_buffer_write(buffer, data, size)) { @@ -342,7 +390,10 @@ static int _load_python_objects(PyObject* module) { _load_object(&state->BSONInt64, "bson.int64", "Int64") || _load_object(&state->Decimal128, "bson.decimal128", "Decimal128") || _load_object(&state->UUID, "uuid", "UUID") || - _load_object(&state->Mapping, "collections.abc", "Mapping")) { + _load_object(&state->Mapping, "collections.abc", "Mapping") || + _load_object(&state->DatetimeMS, "bson.datetime_ms", "DatetimeMS") || + _load_object(&state->_min_datetime_ms, "bson.datetime_ms", "_min_datetime_ms") || + _load_object(&state->_max_datetime_ms, "bson.datetime_ms", "_max_datetime_ms")) { return 1; } /* Reload our REType hack too. */ @@ -466,13 +517,14 @@ int convert_codec_options(PyObject* options_obj, void* p) { options->unicode_decode_error_handler = NULL; - if (!PyArg_ParseTuple(options_obj, "ObbzOO", + if (!PyArg_ParseTuple(options_obj, "ObbzOOb", &options->document_class, &options->tz_aware, &options->uuid_rep, &options->unicode_decode_error_handler, &options->tzinfo, - &type_registry_obj)) + &type_registry_obj, + &options->datetime_conversion)) return 0; type_marker = _type_marker(options->document_class); @@ -1049,6 +1101,13 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; return buffer_write_int64(buffer, (int64_t)millis); + } else if (PyObject_TypeCheck(value, (PyTypeObject *) state->DatetimeMS)) { + long long millis; + if (!millis_from_datetime_ms(value, &millis)) { + return 0; + } + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; + return buffer_write_int64(buffer, (int64_t)millis); } else if (PyObject_TypeCheck(value, state->REType)) { return _write_regex_to_buffer(buffer, type_byte, value); } @@ -1854,8 +1913,79 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } memcpy(&millis, buffer + *position, 8); millis = (int64_t)BSON_UINT64_FROM_LE(millis); - naive = datetime_from_millis(millis); *position += 8; + + if (options->datetime_conversion == DATETIME_MS){ + value = datetime_ms_from_millis(self, millis); + break; + } + + int dt_clamp = options->datetime_conversion == DATETIME_CLAMP; + int dt_auto = options->datetime_conversion == DATETIME_AUTO; + + + if (dt_clamp || dt_auto){ + PyObject *min_millis_fn = _get_object(state->_min_datetime_ms, "bson.datetime_ms", "_min_datetime_ms"); + PyObject *max_millis_fn = _get_object(state->_max_datetime_ms, "bson.datetime_ms", "_max_datetime_ms"); + PyObject *min_millis_fn_res; + PyObject *max_millis_fn_res; + int64_t min_millis; + int64_t max_millis; + + if (min_millis_fn == NULL || max_millis_fn == NULL) { + Py_XDECREF(min_millis_fn); + Py_XDECREF(max_millis_fn); + goto invalid; + } + + if (options->tz_aware){ + PyObject* tzinfo = options->tzinfo; + if (tzinfo == Py_None) { + // Default to UTC. + utc_type = _get_object(state->UTC, "bson.tz_util", "utc"); + tzinfo = utc_type; + } + min_millis_fn_res = PyObject_CallFunctionObjArgs(min_millis_fn, tzinfo, NULL); + max_millis_fn_res = PyObject_CallFunctionObjArgs(max_millis_fn, tzinfo, NULL); + } else { + min_millis_fn_res = PyObject_CallObject(min_millis_fn, NULL); + max_millis_fn_res = PyObject_CallObject(max_millis_fn, NULL); + } + + Py_DECREF(min_millis_fn); + Py_DECREF(max_millis_fn); + + if (!min_millis_fn_res || !max_millis_fn_res){ + Py_XDECREF(min_millis_fn_res); + Py_XDECREF(max_millis_fn_res); + goto invalid; + } + + min_millis = PyLong_AsLongLong(min_millis_fn_res); + max_millis = PyLong_AsLongLong(max_millis_fn_res); + + if ((min_millis == -1 || max_millis == -1) && PyErr_Occurred()) + { + // min/max_millis check + goto invalid; + } + + if (dt_clamp) { + if (millis < min_millis) { + millis = min_millis; + } else if (millis > max_millis) { + millis = max_millis; + } + // Continues from here to return a datetime. + } else if (dt_auto) { + if (millis < min_millis || millis > max_millis){ + value = datetime_ms_from_millis(self, millis); + break; // Out-of-range so done. + } + } + } + + naive = datetime_from_millis(millis); if (!options->tz_aware) { /* In the naive case, we're done here. */ value = naive; break; diff --git a/bson/_cbsonmodule.h b/bson/_cbsonmodule.h index 12a2c8ac67..6ff453b8ff 100644 --- a/bson/_cbsonmodule.h +++ b/bson/_cbsonmodule.h @@ -62,6 +62,7 @@ typedef struct codec_options_t { char* unicode_decode_error_handler; PyObject* tzinfo; type_registry_t type_registry; + unsigned char datetime_conversion; PyObject* options_obj; unsigned char is_raw_bson; } codec_options_t; diff --git a/bson/codec_options.py b/bson/codec_options.py index 4eaff59ea7..a29c878929 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -16,6 +16,7 @@ import abc import datetime +import enum from collections.abc import MutableMapping as _MutableMapping from typing import ( Any, @@ -198,6 +199,16 @@ def __eq__(self, other: Any) -> Any: ) +class DatetimeConversionOpts(enum.IntEnum): + DATETIME = 1 + DATETIME_CLAMP = 2 + DATETIME_MS = 3 + DATETIME_AUTO = 4 + + def __repr__(self): + return f"{self.value}" + + class _BaseCodecOptions(NamedTuple): document_class: Type[Mapping[str, Any]] tz_aware: bool @@ -205,6 +216,7 @@ class _BaseCodecOptions(NamedTuple): unicode_decode_error_handler: str tzinfo: Optional[datetime.tzinfo] type_registry: TypeRegistry + datetime_conversion: Optional[DatetimeConversionOpts] class CodecOptions(_BaseCodecOptions): @@ -268,7 +280,13 @@ class CodecOptions(_BaseCodecOptions): encoded/decoded. - `type_registry`: Instance of :class:`TypeRegistry` used to customize encoding and decoding behavior. - + - `datetime_conversion`: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. .. versionchanged:: 4.0 The default for `uuid_representation` was changed from :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to @@ -292,6 +310,7 @@ def __new__( unicode_decode_error_handler: str = "strict", tzinfo: Optional[datetime.tzinfo] = None, type_registry: Optional[TypeRegistry] = None, + datetime_conversion: Optional[DatetimeConversionOpts] = DatetimeConversionOpts.DATETIME, ) -> "CodecOptions": doc_class = document_class or dict # issubclass can raise TypeError for generic aliases like SON[str, Any]. @@ -336,6 +355,7 @@ def __new__( unicode_decode_error_handler, tzinfo, type_registry, + datetime_conversion, ), ) @@ -350,7 +370,7 @@ def _arguments_repr(self) -> str: return ( "document_class=%s, tz_aware=%r, uuid_representation=%s, " "unicode_decode_error_handler=%r, tzinfo=%r, " - "type_registry=%r" + "type_registry=%r, datetime_conversion=%r" % ( document_class_repr, self.tz_aware, @@ -358,6 +378,7 @@ def _arguments_repr(self) -> str: self.unicode_decode_error_handler, self.tzinfo, self.type_registry, + self.datetime_conversion, ) ) @@ -371,6 +392,7 @@ def _options_dict(self) -> Dict[str, Any]: "unicode_decode_error_handler": self.unicode_decode_error_handler, "tzinfo": self.tzinfo, "type_registry": self.type_registry, + "datetime_conversion": self.datetime_conversion, } def __repr__(self): @@ -406,6 +428,7 @@ def _parse_codec_options(options: Any) -> CodecOptions: "unicode_decode_error_handler", "tzinfo", "type_registry", + "datetime_conversion", }: if k == "uuidrepresentation": kwargs["uuid_representation"] = options[k] diff --git a/bson/codec_options.pyi b/bson/codec_options.pyi index 9d5f5c2656..260407524f 100644 --- a/bson/codec_options.pyi +++ b/bson/codec_options.pyi @@ -21,6 +21,7 @@ you get the error: "TypeError: 'type' object is not subscriptable". import datetime import abc +import enum from typing import Tuple, Generic, Optional, Mapping, Any, TypeVar, Type, Dict, Iterable, Tuple, MutableMapping, Callable, Union @@ -54,6 +55,11 @@ class TypeRegistry: _DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) +class DatetimeConversionOpts(int, enum.Enum): + DATETIME = ... + DATETIME_CLAMP = ... + DATETIME_MS = ... + DATETIME_AUTO = ... class CodecOptions(Tuple, Generic[_DocumentType]): document_class: Type[_DocumentType] @@ -62,6 +68,7 @@ class CodecOptions(Tuple, Generic[_DocumentType]): unicode_decode_error_handler: Optional[str] tzinfo: Optional[datetime.tzinfo] type_registry: TypeRegistry + datetime_conversion: Optional[int] def __new__( cls: Type[CodecOptions], @@ -71,6 +78,7 @@ class CodecOptions(Tuple, Generic[_DocumentType]): unicode_decode_error_handler: Optional[str] = ..., tzinfo: Optional[datetime.tzinfo] = ..., type_registry: Optional[TypeRegistry] = ..., + datetime_conversion: Optional[int] = ..., ) -> CodecOptions[_DocumentType]: ... # CodecOptions API diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py new file mode 100644 index 0000000000..f3e25ed05a --- /dev/null +++ b/bson/datetime_ms.py @@ -0,0 +1,157 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Tools for representing the BSON datetime type.""" + +import calendar +import datetime +import functools +from typing import Any, Union, cast + +from bson.codec_options import ( + DEFAULT_CODEC_OPTIONS, + CodecOptions, + DatetimeConversionOpts, +) +from bson.tz_util import utc + +EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) +EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0) + + +class DatetimeMS: + __slots__ = ("_value",) + + def __init__(self, value: Union[int, datetime.datetime]): + """Represents a BSON UTC datetime. + + BSON UTC datetimes are defined as an int64 of milliseconds since the Unix + epoch. The principal use of DatetimeMS is to represent datetimes outside + the range of the Python builtin :class:`~datetime.datetime` class when + encoding/decoding BSON. + + To decode UTC datetimes as a ``DatetimeMS``,`datetime_conversion` in + :class:`~bson.CodecOptions` must be set to 'datetime_ms' or + 'datetime_auto'. See :ref:`handling-out-of-range-datetimes` for details. + + :Parameters: + - `value`: An instance of :class:`datetime.datetime` to be + represented as milliseconds since the Unix epoch, or int of + milliseconds since the Unix epoch. + + .. versionadded:: 4.3 + """ + if isinstance(value, int): + if not (-(2**63) <= value <= 2**63 - 1): + raise OverflowError("Must be a 64-bit integer of milliseconds") + self._value = value + elif isinstance(value, datetime.datetime): + self._value = _datetime_to_millis(value) + else: + raise TypeError(f"{type(value)} is not a valid type for DatetimeMS") + + def __hash__(self) -> int: + return hash(self._value) + + def __repr__(self) -> str: + return type(self).__name__ + "(" + str(self._value) + ")" + + def __lt__(self, other: Union["DatetimeMS", int]) -> bool: + return self._value < other + + def __le__(self, other: Union["DatetimeMS", int]) -> bool: + return self._value <= other + + def __eq__(self, other: Any) -> bool: + if isinstance(other, DatetimeMS): + return self._value == other._value + return False + + def __ne__(self, other: Any) -> bool: + if isinstance(other, DatetimeMS): + return self._value != other._value + return True + + def __gt__(self, other: Union["DatetimeMS", int]) -> bool: + return self._value > other + + def __ge__(self, other: Union["DatetimeMS", int]) -> bool: + return self._value >= other + + _type_marker = 9 + + def as_datetime(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> datetime.datetime: + """Create a Python :class:`~datetime.datetime` from this DatetimeMS object. + + :Parameters: + - `codec_options`: A CodecOptions instance for specifying how the + resulting DatetimeMS object will be formatted using ``tz_aware`` + and ``tz_info``. Defaults to + :const:`~bson.codec_options.DEFAULT_CODEC_OPTIONS`. + """ + return cast(datetime.datetime, _millis_to_datetime(self._value, codec_options)) + + def __int__(self) -> int: + return self._value + + +# Inclusive and exclusive min and max for timezones. +# Timezones are hashed by their offset, which is a timedelta +# and therefore there are more than 24 possible timezones. +@functools.lru_cache(maxsize=None) +def _min_datetime_ms(tz=datetime.timezone.utc): + return _datetime_to_millis(datetime.datetime.min.replace(tzinfo=tz)) + + +@functools.lru_cache(maxsize=None) +def _max_datetime_ms(tz=datetime.timezone.utc): + return _datetime_to_millis(datetime.datetime.max.replace(tzinfo=tz)) + + +def _millis_to_datetime(millis: int, opts: CodecOptions) -> Union[datetime.datetime, DatetimeMS]: + """Convert milliseconds since epoch UTC to datetime.""" + if ( + opts.datetime_conversion == DatetimeConversionOpts.DATETIME + or opts.datetime_conversion == DatetimeConversionOpts.DATETIME_CLAMP + or opts.datetime_conversion == DatetimeConversionOpts.DATETIME_AUTO + ): + tz = opts.tzinfo or datetime.timezone.utc + if opts.datetime_conversion == DatetimeConversionOpts.DATETIME_CLAMP: + millis = max(_min_datetime_ms(tz), min(millis, _max_datetime_ms(tz))) + elif opts.datetime_conversion == DatetimeConversionOpts.DATETIME_AUTO: + if not (_min_datetime_ms(tz) <= millis <= _max_datetime_ms(tz)): + return DatetimeMS(millis) + + diff = ((millis % 1000) + 1000) % 1000 + seconds = (millis - diff) // 1000 + micros = diff * 1000 + + if opts.tz_aware: + dt = EPOCH_AWARE + datetime.timedelta(seconds=seconds, microseconds=micros) + if opts.tzinfo: + dt = dt.astimezone(tz) + return dt + else: + return EPOCH_NAIVE + datetime.timedelta(seconds=seconds, microseconds=micros) + elif opts.datetime_conversion == DatetimeConversionOpts.DATETIME_MS: + return DatetimeMS(millis) + else: + raise ValueError("datetime_conversion must be an element of DatetimeConversionOpts") + + +def _datetime_to_millis(dtm: datetime.datetime) -> int: + """Convert datetime to milliseconds since epoch UTC.""" + if dtm.utcoffset() is not None: + dtm = dtm - dtm.utcoffset() # type: ignore + return int(calendar.timegm(dtm.timetuple()) * 1000 + dtm.microsecond // 1000) diff --git a/bson/json_util.py b/bson/json_util.py index 369c3d5f4a..0b5494e85c 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -94,11 +94,16 @@ import uuid from typing import Any, Dict, Mapping, Optional, Sequence, Tuple, Type, Union, cast -import bson -from bson import EPOCH_AWARE from bson.binary import ALL_UUID_SUBTYPES, UUID_SUBTYPE, Binary, UuidRepresentation from bson.code import Code -from bson.codec_options import CodecOptions +from bson.codec_options import CodecOptions, DatetimeConversionOpts +from bson.datetime_ms import ( + EPOCH_AWARE, + DatetimeMS, + _datetime_to_millis, + _max_datetime_ms, + _millis_to_datetime, +) from bson.dbref import DBRef from bson.decimal128 import Decimal128 from bson.int64 import Int64 @@ -228,6 +233,14 @@ class JSONOptions(CodecOptions): - `tzinfo`: A :class:`datetime.tzinfo` subclass that specifies the timezone from which :class:`~datetime.datetime` objects should be decoded. Defaults to :const:`~bson.tz_util.utc`. + - `datetime_conversion`: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. See + :ref:`handling-out-of-range-datetimes` for details. - `args`: arguments to :class:`~bson.codec_options.CodecOptions` - `kwargs`: arguments to :class:`~bson.codec_options.CodecOptions` @@ -594,7 +607,9 @@ def _parse_canonical_binary(doc: Any, json_options: JSONOptions) -> Union[Binary return _binary_or_uuid(data, int(subtype, 16), json_options) -def _parse_canonical_datetime(doc: Any, json_options: JSONOptions) -> datetime.datetime: +def _parse_canonical_datetime( + doc: Any, json_options: JSONOptions +) -> Union[datetime.datetime, DatetimeMS]: """Decode a JSON datetime to python datetime.datetime.""" dtm = doc["$date"] if len(doc) != 1: @@ -647,10 +662,15 @@ def _parse_canonical_datetime(doc: Any, json_options: JSONOptions) -> datetime.d if json_options.tz_aware: if json_options.tzinfo: aware = aware.astimezone(json_options.tzinfo) + if json_options.datetime_conversion == DatetimeConversionOpts.DATETIME_MS: + return DatetimeMS(aware) return aware else: - return aware.replace(tzinfo=None) - return bson._millis_to_datetime(int(dtm), json_options) + aware_tzinfo_none = aware.replace(tzinfo=None) + if json_options.datetime_conversion == DatetimeConversionOpts.DATETIME_MS: + return DatetimeMS(aware_tzinfo_none) + return aware_tzinfo_none + return _millis_to_datetime(int(dtm), json_options) def _parse_canonical_oid(doc: Any) -> ObjectId: @@ -806,10 +826,19 @@ def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: "$date": "%s%s%s" % (obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string) } - millis = bson._datetime_to_millis(obj) + millis = _datetime_to_millis(obj) if json_options.datetime_representation == DatetimeRepresentation.LEGACY: return {"$date": millis} return {"$date": {"$numberLong": str(millis)}} + if isinstance(obj, DatetimeMS): + if ( + json_options.datetime_representation == DatetimeRepresentation.ISO8601 + and 0 <= int(obj) <= _max_datetime_ms() + ): + return default(obj.as_datetime(), json_options) + elif json_options.datetime_representation == DatetimeRepresentation.LEGACY: + return {"$date": str(int(obj))} + return {"$date": {"$numberLong": str(int(obj))}} if json_options.strict_number_long and isinstance(obj, Int64): return {"$numberLong": str(obj)} if isinstance(obj, (RE_TYPE, Regex)): diff --git a/doc/api/bson/datetime_ms.rst b/doc/api/bson/datetime_ms.rst new file mode 100644 index 0000000000..254f115eb8 --- /dev/null +++ b/doc/api/bson/datetime_ms.rst @@ -0,0 +1,4 @@ +:mod:`datetime_ms` -- Support for BSON UTC Datetime +=================================================== +.. automodule:: bson.datetime_ms + :members: diff --git a/doc/api/bson/index.rst b/doc/api/bson/index.rst index 5f15ed99eb..72baae68a6 100644 --- a/doc/api/bson/index.rst +++ b/doc/api/bson/index.rst @@ -13,6 +13,7 @@ Sub-modules: binary code codec_options + datetime_ms dbref decimal128 errors diff --git a/doc/examples/datetimes.rst b/doc/examples/datetimes.rst index d712ce6138..b9c509e075 100644 --- a/doc/examples/datetimes.rst +++ b/doc/examples/datetimes.rst @@ -102,3 +102,57 @@ out of MongoDB in US/Pacific time: >>> result = aware_times.find_one() datetime.datetime(2002, 10, 27, 6, 0, # doctest: +NORMALIZE_WHITESPACE tzinfo=) + +.. _handling-out-of-range-datetimes: + +Handling out of range datetimes +------------------------------- + +Python's :class:`~datetime.datetime` can only represent datetimes within the +range allowed by +:attr:`~datetime.datetime.min` and :attr:`~datetime.datetime.max`, whereas +the range of datetimes allowed in BSON can represent any 64-bit number +of milliseconds from the Unix epoch. To deal with this, we can use the +:class:`bson.datetime_ms.DatetimeMS` object, which is a wrapper for the +:class:`int` built-in. + +To decode UTC datetime values as :class:`~bson.datetime_ms.DatetimeMS`, +:class:`~bson.codec_options.CodecOptions` should have its +``datetime_conversion`` parameter set to one of the options available in +:class:`bson.datetime_ms.DatetimeConversionOpts`. These include +:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME`, +:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_MS`, +:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_AUTO`, +:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_CLAMP`. +:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME` is the default +option and has the behavior of raising an exception upon attempting to +decode an out-of-range date. +:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_MS` will only return +:class:`~bson.datetime_ms.DatetimeMS` objects, regardless of whether the +represented datetime is in- or out-of-range. +:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_AUTO` will return +:class:`~datetime.datetime` if the underlying UTC datetime is within range, +or :class:`~bson.datetime_ms.DatetimeMS` if the underlying datetime +cannot be represented using the builtin Python :class:`~datetime.datetime`. +:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_CLAMP` will clamp +resulting :class:`~datetime.datetime` objects to be within +:attr:`~datetime.datetime.min` and :attr:`~datetime.datetime.max` +(trimmed to `999000` microseconds). + +An example of encoding and decoding using `DATETIME_MS` is as follows: + +.. doctest:: + >>> from datetime import datetime + >>> from bson import encode, decode + >>> from bson.datetime_ms import DatetimeMS + >>> from bson.codec_options import CodecOptions,DatetimeConversionOpts + >>> x = encode({"x": datetime(1970, 1, 1)}) + >>> x + b'\x10\x00\x00\x00\tx\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + >>> decode(x, codec_options=CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_MS)) + {'x': DatetimeMS(0)} + +:class:`~bson.datetime_ms.DatetimeMS` objects have support for rich comparison +methods against other instances of :class:`~bson.datetime_ms.DatetimeMS`. +They can also be converted to :class:`~datetime.datetime` objects with +:meth:`~bson.datetime_ms.DatetimeMS.to_datetime()`. diff --git a/pymongo/common.py b/pymongo/common.py index 6ffc97f2a8..319b07193c 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -36,7 +36,7 @@ from bson import SON from bson.binary import UuidRepresentation -from bson.codec_options import CodecOptions, TypeRegistry +from bson.codec_options import CodecOptions, DatetimeConversionOpts, TypeRegistry from bson.raw_bson import RawBSONDocument from pymongo.auth import MECHANISMS from pymongo.compression_support import ( @@ -620,6 +620,21 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A return value +def validate_datetime_conversion(option: Any, value: Any) -> Optional[DatetimeConversionOpts]: + """Validate a DatetimeConversionOpts string.""" + if value is None: + return DatetimeConversionOpts.DATETIME + + if isinstance(value, str): + if value.isdigit(): + return DatetimeConversionOpts(int(value)) + return DatetimeConversionOpts[value] + elif isinstance(value, int): + return DatetimeConversionOpts(value) + + raise TypeError("%s must be a str or int representing DatetimeConversionOpts" % (option,)) + + # Dictionary where keys are the names of public URI options, and values # are lists of aliases for that option. URI_OPTIONS_ALIAS_MAP: Dict[str, List[str]] = { @@ -684,6 +699,7 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A "uuidrepresentation": validate_uuid_representation, "waitqueuemultiple": validate_non_negative_integer_or_none, "waitqueuetimeoutms": validate_timeout_or_none, + "datetime_conversion": validate_datetime_conversion, } # Dictionary where keys are the names of keyword-only options for the diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 080ae8757c..fd4c0e84bc 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -239,6 +239,14 @@ def __init__( - `type_registry` (optional): instance of :class:`~bson.codec_options.TypeRegistry` to enable encoding and decoding of custom types. + - `datetime_conversion`: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. See + :ref:`handling-out-of-range-datetimes` for details. | **Other optional parameters can be passed as keyword arguments:** diff --git a/test/test_bson.py b/test/test_bson.py index aa77954fa2..0893000c0c 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -38,7 +38,9 @@ from bson import ( BSON, EPOCH_AWARE, + DatetimeMS, Regex, + _datetime_to_millis, decode, decode_all, decode_file_iter, @@ -48,7 +50,7 @@ ) from bson.binary import Binary, UuidRepresentation from bson.code import Code -from bson.codec_options import CodecOptions +from bson.codec_options import CodecOptions, DatetimeConversionOpts from bson.dbref import DBRef from bson.errors import InvalidBSON, InvalidDocument from bson.int64 import Int64 @@ -978,7 +980,7 @@ def test_codec_options_repr(self): "uuid_representation=UuidRepresentation.UNSPECIFIED, " "unicode_decode_error_handler='strict', " "tzinfo=None, type_registry=TypeRegistry(type_codecs=[], " - "fallback_encoder=None))" + "fallback_encoder=None), datetime_conversion=1)" ) self.assertEqual(r, repr(CodecOptions())) @@ -1153,5 +1155,169 @@ def test_bson_encode_decode(self) -> None: self.assertTrue(decoded["_id"].generation_time) +class TestDatetimeConversion(unittest.TestCase): + def test_comps(self): + # Tests other timestamp formats. + # Test each of the rich comparison methods. + pairs = [ + (DatetimeMS(-1), DatetimeMS(1)), + (DatetimeMS(0), DatetimeMS(0)), + (DatetimeMS(1), DatetimeMS(-1)), + ] + + comp_ops = ["__lt__", "__le__", "__eq__", "__ne__", "__gt__", "__ge__"] + for lh, rh in pairs: + for op in comp_ops: + self.assertEqual(getattr(lh, op)(rh), getattr(lh._value, op)(rh._value)) + + def test_class_conversions(self): + # Test class conversions. + dtr1 = DatetimeMS(1234) + dt1 = dtr1.as_datetime() + self.assertEqual(dtr1, DatetimeMS(dt1)) + + dt2 = datetime.datetime(1969, 1, 1) + dtr2 = DatetimeMS(dt2) + self.assertEqual(dtr2.as_datetime(), dt2) + + # Test encode and decode without codec options. Expect: DatetimeMS => datetime + dtr1 = DatetimeMS(0) + enc1 = encode({"x": dtr1}) + dec1 = decode(enc1) + self.assertEqual(dec1["x"], datetime.datetime(1970, 1, 1)) + self.assertNotEqual(type(dtr1), type(dec1["x"])) + + # Test encode and decode with codec options. Expect: UTCDateimteRaw => DatetimeMS + opts1 = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_MS) + enc1 = encode({"x": dtr1}) + dec1 = decode(enc1, opts1) + self.assertEqual(type(dtr1), type(dec1["x"])) + self.assertEqual(dtr1, dec1["x"]) + + # Expect: datetime => DatetimeMS + opts1 = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_MS) + dt1 = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) + enc1 = encode({"x": dt1}) + dec1 = decode(enc1, opts1) + self.assertEqual(dec1["x"], DatetimeMS(0)) + self.assertNotEqual(dt1, type(dec1["x"])) + + def test_clamping(self): + # Test clamping from below and above. + opts1 = CodecOptions( + datetime_conversion=DatetimeConversionOpts.DATETIME_CLAMP, + tz_aware=True, + tzinfo=datetime.timezone.utc, + ) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 1)}) + dec_below = decode(below, opts1) + self.assertEqual( + dec_below["x"], datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) + ) + + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 1)}) + dec_above = decode(above, opts1) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(tzinfo=datetime.timezone.utc, microsecond=999000), + ) + + def test_tz_clamping(self): + # Naive clamping to local tz. + opts1 = CodecOptions( + datetime_conversion=DatetimeConversionOpts.DATETIME_CLAMP, tz_aware=False + ) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + + dec_below = decode(below, opts1) + self.assertEqual(dec_below["x"], datetime.datetime.min) + + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts1) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(microsecond=999000), + ) + + # Aware clamping. + opts2 = CodecOptions( + datetime_conversion=DatetimeConversionOpts.DATETIME_CLAMP, tz_aware=True + ) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts2) + self.assertEqual( + dec_below["x"], datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) + ) + + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts2) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(tzinfo=datetime.timezone.utc, microsecond=999000), + ) + + def test_datetime_auto(self): + # Naive auto, in range. + opts1 = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_AUTO) + inr = encode({"x": datetime.datetime(1970, 1, 1)}, codec_options=opts1) + dec_inr = decode(inr) + self.assertEqual(dec_inr["x"], datetime.datetime(1970, 1, 1)) + + # Naive auto, below range. + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts1) + self.assertEqual( + dec_below["x"], DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60) + ) + + # Naive auto, above range. + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts1) + self.assertEqual( + dec_above["x"], + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60), + ) + + # Aware auto, in range. + opts2 = CodecOptions( + datetime_conversion=DatetimeConversionOpts.DATETIME_AUTO, + tz_aware=True, + tzinfo=datetime.timezone.utc, + ) + inr = encode({"x": datetime.datetime(1970, 1, 1)}, codec_options=opts2) + dec_inr = decode(inr) + self.assertEqual(dec_inr["x"], datetime.datetime(1970, 1, 1)) + + # Aware auto, below range. + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts2) + self.assertEqual( + dec_below["x"], DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60) + ) + + # Aware auto, above range. + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts2) + self.assertEqual( + dec_above["x"], + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60), + ) + + def test_millis_from_datetime_ms(self): + # Test 65+ bit integer conversion, expect OverflowError. + big_ms = 2**65 + with self.assertRaises(OverflowError): + encode({"x": DatetimeMS(big_ms)}) + + # Subclass of DatetimeMS w/ __int__ override, expect an Error. + class DatetimeMSOverride(DatetimeMS): + def __int__(self): + return float(self._value) + + float_ms = DatetimeMSOverride(2) + with self.assertRaises(TypeError): + encode({"x": float_ms}) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_client.py b/test/test_client.py index 3630cec06c..f520043ecf 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -65,7 +65,12 @@ import pymongo from bson import encode -from bson.codec_options import CodecOptions, TypeEncoder, TypeRegistry +from bson.codec_options import ( + CodecOptions, + DatetimeConversionOpts, + TypeEncoder, + TypeRegistry, +) from bson.son import SON from bson.tz_util import utc from pymongo import event_loggers, message, monitoring @@ -386,14 +391,17 @@ def test_uri_codec_options(self): # Ensure codec options are passed in correctly uuid_representation_label = "javaLegacy" unicode_decode_error_handler = "ignore" + datetime_conversion = "DATETIME_CLAMP" uri = ( "mongodb://%s:%d/foo?tz_aware=true&uuidrepresentation=" "%s&unicode_decode_error_handler=%s" + "&datetime_conversion=%s" % ( client_context.host, client_context.port, uuid_representation_label, unicode_decode_error_handler, + datetime_conversion, ) ) c = MongoClient(uri, connect=False) @@ -403,6 +411,19 @@ def test_uri_codec_options(self): c.codec_options.uuid_representation, _UUID_REPRESENTATIONS[uuid_representation_label] ) self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) + self.assertEqual( + c.codec_options.datetime_conversion, DatetimeConversionOpts[datetime_conversion] + ) + + # Change the passed datetime_conversion to a number and re-assert. + uri = uri.replace( + datetime_conversion, f"{int(DatetimeConversionOpts[datetime_conversion])}" + ) + c = MongoClient(uri, connect=False) + + self.assertEqual( + c.codec_options.datetime_conversion, DatetimeConversionOpts[datetime_conversion] + ) def test_uri_option_precedence(self): # Ensure kwarg options override connection string options. diff --git a/test/test_json_util.py b/test/test_json_util.py index ee5b7abb49..576746e865 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -21,11 +21,13 @@ import uuid from typing import Any, List, MutableMapping +from bson.codec_options import CodecOptions, DatetimeConversionOpts + sys.path[0:0] = [""] from test import IntegrationTest, unittest -from bson import EPOCH_AWARE, EPOCH_NAIVE, SON, json_util +from bson import EPOCH_AWARE, EPOCH_NAIVE, SON, DatetimeMS, json_util from bson.binary import ( ALL_UUID_REPRESENTATIONS, MD5_SUBTYPE, @@ -35,6 +37,7 @@ UuidRepresentation, ) from bson.code import Code +from bson.datetime_ms import _max_datetime_ms from bson.dbref import DBRef from bson.int64 import Int64 from bson.json_util import ( @@ -241,6 +244,69 @@ def test_datetime(self): ), ) + def test_datetime_ms(self): + # Test ISO8601 in-range + dat_min = {"x": DatetimeMS(0)} + dat_max = {"x": DatetimeMS(_max_datetime_ms())} + opts = JSONOptions(datetime_representation=DatetimeRepresentation.ISO8601) + + self.assertEqual( + dat_min["x"].as_datetime(CodecOptions(tz_aware=False)), + json_util.loads(json_util.dumps(dat_min))["x"], + ) + self.assertEqual( + dat_max["x"].as_datetime(CodecOptions(tz_aware=False)), + json_util.loads(json_util.dumps(dat_max))["x"], + ) + + # Test ISO8601 out-of-range + dat_min = {"x": DatetimeMS(-1)} + dat_max = {"x": DatetimeMS(_max_datetime_ms() + 1)} + + self.assertEqual('{"x": {"$date": {"$numberLong": "-1"}}}', json_util.dumps(dat_min)) + self.assertEqual( + '{"x": {"$date": {"$numberLong": "' + str(int(dat_max["x"])) + '"}}}', + json_util.dumps(dat_max), + ) + # Test legacy. + opts = JSONOptions( + datetime_representation=DatetimeRepresentation.LEGACY, json_mode=JSONMode.LEGACY + ) + self.assertEqual('{"x": {"$date": "-1"}}', json_util.dumps(dat_min, json_options=opts)) + self.assertEqual( + '{"x": {"$date": "' + str(int(dat_max["x"])) + '"}}', + json_util.dumps(dat_max, json_options=opts), + ) + + # Test regular. + opts = JSONOptions( + datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY + ) + self.assertEqual( + '{"x": {"$date": {"$numberLong": "-1"}}}', json_util.dumps(dat_min, json_options=opts) + ) + self.assertEqual( + '{"x": {"$date": {"$numberLong": "' + str(int(dat_max["x"])) + '"}}}', + json_util.dumps(dat_max, json_options=opts), + ) + + # Test decode from datetime.datetime to DatetimeMS + dat_min = {"x": datetime.datetime.min} + dat_max = {"x": DatetimeMS(_max_datetime_ms()).as_datetime(CodecOptions(tz_aware=False))} + opts = JSONOptions( + datetime_representation=DatetimeRepresentation.ISO8601, + datetime_conversion=DatetimeConversionOpts.DATETIME_MS, + ) + + self.assertEqual( + DatetimeMS(dat_min["x"]), + json_util.loads(json_util.dumps(dat_min), json_options=opts)["x"], + ) + self.assertEqual( + DatetimeMS(dat_max["x"]), + json_util.loads(json_util.dumps(dat_max), json_options=opts)["x"], + ) + def test_regex_object_hook(self): # Extended JSON format regular expression. pat = "a*b" From 0c56d5665811df65aa25602b61d289cf605e3647 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Thu, 28 Jul 2022 15:55:34 -0700 Subject: [PATCH 0224/1588] PYTHON-3371 Remove DatetimeConversionOpts.__repr__ (#1023) * Removed __repr__ and adjusted repr string * Changed to %s Co-authored-by: Ben Warner --- bson/codec_options.py | 5 +---- test/test_bson.py | 3 ++- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/bson/codec_options.py b/bson/codec_options.py index a29c878929..afffa2f120 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -205,9 +205,6 @@ class DatetimeConversionOpts(enum.IntEnum): DATETIME_MS = 3 DATETIME_AUTO = 4 - def __repr__(self): - return f"{self.value}" - class _BaseCodecOptions(NamedTuple): document_class: Type[Mapping[str, Any]] @@ -370,7 +367,7 @@ def _arguments_repr(self) -> str: return ( "document_class=%s, tz_aware=%r, uuid_representation=%s, " "unicode_decode_error_handler=%r, tzinfo=%r, " - "type_registry=%r, datetime_conversion=%r" + "type_registry=%r, datetime_conversion=%s" % ( document_class_repr, self.tz_aware, diff --git a/test/test_bson.py b/test/test_bson.py index 0893000c0c..7fe0c168c6 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -980,7 +980,8 @@ def test_codec_options_repr(self): "uuid_representation=UuidRepresentation.UNSPECIFIED, " "unicode_decode_error_handler='strict', " "tzinfo=None, type_registry=TypeRegistry(type_codecs=[], " - "fallback_encoder=None), datetime_conversion=1)" + "fallback_encoder=None), " + "datetime_conversion=DatetimeConversionOpts.DATETIME)" ) self.assertEqual(r, repr(CodecOptions())) From 3c18c2079524d322d24b5dcc515ddc91c59e3fbd Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Fri, 29 Jul 2022 12:07:04 -0700 Subject: [PATCH 0225/1588] PYTHON-3377 datetime_ms documentation page is empty (#1026) Co-authored-by: Ben Warner --- bson/datetime_ms.py | 25 +++++++++++++++---------- doc/api/bson/datetime_ms.rst | 2 ++ 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py index f3e25ed05a..925087a5aa 100644 --- a/bson/datetime_ms.py +++ b/bson/datetime_ms.py @@ -12,7 +12,10 @@ # implied. See the License for the specific language governing # permissions and limitations under the License. -"""Tools for representing the BSON datetime type.""" +"""Tools for representing the BSON datetime type. + +.. versionadded:: 4.3 +""" import calendar import datetime @@ -31,26 +34,28 @@ class DatetimeMS: + """Represents a BSON UTC datetime.""" + __slots__ = ("_value",) def __init__(self, value: Union[int, datetime.datetime]): """Represents a BSON UTC datetime. - BSON UTC datetimes are defined as an int64 of milliseconds since the Unix - epoch. The principal use of DatetimeMS is to represent datetimes outside - the range of the Python builtin :class:`~datetime.datetime` class when + BSON UTC datetimes are defined as an int64 of milliseconds since the + Unix epoch. The principal use of DatetimeMS is to represent + datetimes outside the range of the Python builtin + :class:`~datetime.datetime` class when encoding/decoding BSON. - To decode UTC datetimes as a ``DatetimeMS``,`datetime_conversion` in + To decode UTC datetimes as a ``DatetimeMS``, `datetime_conversion` in :class:`~bson.CodecOptions` must be set to 'datetime_ms' or - 'datetime_auto'. See :ref:`handling-out-of-range-datetimes` for details. + 'datetime_auto'. See :ref:`handling-out-of-range-datetimes` for + details. :Parameters: - `value`: An instance of :class:`datetime.datetime` to be - represented as milliseconds since the Unix epoch, or int of - milliseconds since the Unix epoch. - - .. versionadded:: 4.3 + represented as milliseconds since the Unix epoch, or int of + milliseconds since the Unix epoch. """ if isinstance(value, int): if not (-(2**63) <= value <= 2**63 - 1): diff --git a/doc/api/bson/datetime_ms.rst b/doc/api/bson/datetime_ms.rst index 254f115eb8..1afaad69fc 100644 --- a/doc/api/bson/datetime_ms.rst +++ b/doc/api/bson/datetime_ms.rst @@ -1,4 +1,6 @@ :mod:`datetime_ms` -- Support for BSON UTC Datetime =================================================== + .. automodule:: bson.datetime_ms + :synopsis: Support for BSON UTC datetimes. :members: From 1166bb96cd538e90c65dc9a6674791b5accb16dc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 29 Jul 2022 15:39:11 -0700 Subject: [PATCH 0226/1588] PYTHON-3382 Resync csfle tests (#1027) --- test/client-side-encryption/spec/unified/addKeyAltName.json | 4 +++- .../spec/unified/createDataKey-kms_providers-invalid.json | 2 +- test/client-side-encryption/spec/unified/getKey.json | 4 +++- .../spec/unified/getKeyByAltName.json | 4 +++- .../spec/unified/removeKeyAltName.json | 4 +++- .../spec/unified/rewrapManyDataKey.json | 6 +++--- 6 files changed, 16 insertions(+), 8 deletions(-) diff --git a/test/client-side-encryption/spec/unified/addKeyAltName.json b/test/client-side-encryption/spec/unified/addKeyAltName.json index 8b6c174cbc..f70bc572a8 100644 --- a/test/client-side-encryption/spec/unified/addKeyAltName.json +++ b/test/client-side-encryption/spec/unified/addKeyAltName.json @@ -98,7 +98,9 @@ }, "keyAltName": "new_key_alt_name" }, - "expectResult": null + "expectResult": { + "$$unsetOrMatches": null + } } ], "expectEvents": [ diff --git a/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json b/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json index 16cf6ca70d..2344a61a95 100644 --- a/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json +++ b/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json @@ -1,5 +1,5 @@ { - "description": "createDataKey-provider-invalid", + "description": "createDataKey-kms_providers-invalid", "schemaVersion": "1.8", "runOnRequirements": [ { diff --git a/test/client-side-encryption/spec/unified/getKey.json b/test/client-side-encryption/spec/unified/getKey.json index 6a7269b2ca..2ea3fe7358 100644 --- a/test/client-side-encryption/spec/unified/getKey.json +++ b/test/client-side-encryption/spec/unified/getKey.json @@ -133,7 +133,9 @@ } } }, - "expectResult": null + "expectResult": { + "$$unsetOrMatches": null + } } ], "expectEvents": [ diff --git a/test/client-side-encryption/spec/unified/getKeyByAltName.json b/test/client-side-encryption/spec/unified/getKeyByAltName.json index f94459bbd8..2505abc16e 100644 --- a/test/client-side-encryption/spec/unified/getKeyByAltName.json +++ b/test/client-side-encryption/spec/unified/getKeyByAltName.json @@ -128,7 +128,9 @@ "arguments": { "keyAltName": "does_not_exist" }, - "expectResult": null + "expectResult": { + "$$unsetOrMatches": null + } } ], "expectEvents": [ diff --git a/test/client-side-encryption/spec/unified/removeKeyAltName.json b/test/client-side-encryption/spec/unified/removeKeyAltName.json index bef13c87de..1b7077077a 100644 --- a/test/client-side-encryption/spec/unified/removeKeyAltName.json +++ b/test/client-side-encryption/spec/unified/removeKeyAltName.json @@ -102,7 +102,9 @@ }, "keyAltName": "does_not_exist" }, - "expectResult": null + "expectResult": { + "$$unsetOrMatches": null + } } ], "expectEvents": [ diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json index 7e3abb1274..89860de0c0 100644 --- a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json @@ -1,5 +1,5 @@ { - "description": "rewrapManyDataKey-kms_providers", + "description": "rewrapManyDataKey", "schemaVersion": "1.8", "runOnRequirements": [ { @@ -128,7 +128,7 @@ ], "keyMaterial": { "$binary": { - "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEGkNTybTc7Eyif0f+qqE0lAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDB2j78AeuIQxcRh8cQIBEIB7vj9buHEaT7XHFIsKBJiyzZRmNnjvqMK5LSdzonKdx97jlqauvPvTDXSsdQDcspUs5oLrGmAXpbFResscxmbwZoKgUtWiuIOpeAcYuszCiMKt15s1WIMLDXUhYtfCmhRhekvgHnRAaK4HJMlGE+lKJXYI84E0b86Cd/g+", + "base64": "pr01l7qDygUkFE/0peFwpnNlv3iIy8zrQK38Q9i12UCN2jwZHDmfyx8wokiIKMb9kAleeY+vnt3Cf1MKu9kcDmI+KxbNDd+V3ytAAGzOVLDJr77CiWjF9f8ntkXRHrAY9WwnVDANYkDwXlyU0Y2GQFTiW65jiQhUtYLYH63Tk48SsJuQvnWw1Q+PzY8ga+QeVec8wbcThwtm+r2IHsCFnc72Gv73qq7weISw+O4mN08z3wOp5FOS2ZM3MK7tBGmPdBcktW7F8ODGsOQ1FU53OrWUnyX2aTi2ftFFFMWVHqQo7EYuBZHru8RRODNKMyQk0BFfKovAeTAVRv9WH9QU7g==", "subType": "00" } }, @@ -196,7 +196,7 @@ ], "keyMaterial": { "$binary": { - "base64": "VoI9J8HusQ3u2gT9i8Awgg/6W4/igvLwRzn3SRDGx0Dl/1ayDMubphOw0ONPVKfuvS6HL3e4gAoCJ/uEz2KLFTVsEqYCpMhfAhgXxm8Ena8vDcOkCzFX+euvN/N2ES3wpzAD18b3qIH0MbBwKJP82d5GQ4pVfGnPW8Ujp9aO1qC/s0EqNqYyzJ1SyzhV9lAjHHGIENYJx+bBrekg2EeZBA==", + "base64": "CklVctHzke4mcytd0TxGqvepkdkQN8NUF4+jV7aZQITAKdz6WjdDpq3lMt9nSzWGG2vAEfvRb3mFEVjV57qqGqxjq2751gmiMRHXz0btStbIK3mQ5xbY9kdye4tsixlCryEwQONr96gwlwKKI9Nubl9/8+uRF6tgYjje7Q7OjauEf1SrJwKcoQ3WwnjZmEqAug0kImCpJ/irhdqPzivRiA==", "subType": "00" } }, From fbb8dde826f5c32ef3db5c046eb81a73c902241a Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Fri, 29 Jul 2022 15:53:38 -0700 Subject: [PATCH 0227/1588] PYTHON-3375 Added docstrings to DatetimeConversionOpts (#1024) * Added docstrings * Fixed detail * Fixed punctuation and links Co-authored-by: Ben Warner --- bson/codec_options.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/bson/codec_options.py b/bson/codec_options.py index afffa2f120..bceab5e003 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -200,10 +200,38 @@ def __eq__(self, other: Any) -> Any: class DatetimeConversionOpts(enum.IntEnum): + """Options for decoding BSON datetimes.""" + DATETIME = 1 + """Decode a BSON UTC datetime as a :class:`datetime.datetime`. + + BSON UTC datetimes that cannot be represented as a + :class:`~datetime.datetime` will raise an :class:`OverflowError` + or a :class:`ValueError`. + + .. versionadded 4.3 + """ + DATETIME_CLAMP = 2 + """Decode a BSON UTC datetime as a :class:`datetime.datetime`, clamping + to :attr:`~datetime.datetime.min` and :attr:`~datetime.datetime.max`. + + .. versionadded 4.3 + """ + DATETIME_MS = 3 + """Decode a BSON UTC datetime as a :class:`~bson.datetime_ms.DatetimeMS` + object. + + .. versionadded 4.3 + """ + DATETIME_AUTO = 4 + """Decode a BSON UTC datetime as a :class:`datetime.datetime` if possible, + and a :class:`~bson.datetime_ms.DatetimeMS` if not. + + .. versionadded 4.3 + """ class _BaseCodecOptions(NamedTuple): From 7c19ff7f7ac7199e8ec9026110daa7cefc0a3a7a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 3 Aug 2022 12:23:50 -0700 Subject: [PATCH 0228/1588] PYTHON-3389 Close ChangeStream after non-resumable non-timeout errors (#1029) --- pymongo/change_stream.py | 42 +++++++++++++++++++++++++------------- test/test_change_stream.py | 13 ++++++------ 2 files changed, 35 insertions(+), 20 deletions(-) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 80820dff91..0edf513a3c 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -68,6 +68,19 @@ from pymongo.mongo_client import MongoClient +def _resumable(exc: PyMongoError) -> bool: + """Return True if given a resumable change stream error.""" + if isinstance(exc, (ConnectionFailure, CursorNotFound)): + return True + if isinstance(exc, OperationFailure): + if exc._max_wire_version is None: + return False + return ( + exc._max_wire_version >= 9 and exc.has_error_label("ResumableChangeStreamError") + ) or (exc._max_wire_version < 9 and exc.code in _RESUMABLE_GETMORE_ERRORS) + return False + + class ChangeStream(Generic[_DocumentType]): """The internal abstract base class for change stream cursors. @@ -343,20 +356,21 @@ def try_next(self) -> Optional[_DocumentType]: # Attempt to get the next change with at most one getMore and at most # one resume attempt. try: - change = self._cursor._try_next(True) - except (ConnectionFailure, CursorNotFound): - self._resume() - change = self._cursor._try_next(False) - except OperationFailure as exc: - if exc._max_wire_version is None: - raise - is_resumable = ( - exc._max_wire_version >= 9 and exc.has_error_label("ResumableChangeStreamError") - ) or (exc._max_wire_version < 9 and exc.code in _RESUMABLE_GETMORE_ERRORS) - if not is_resumable: - raise - self._resume() - change = self._cursor._try_next(False) + try: + change = self._cursor._try_next(True) + except PyMongoError as exc: + if not _resumable(exc): + raise + self._resume() + change = self._cursor._try_next(False) + except PyMongoError as exc: + # Close the stream after a fatal error. + if not _resumable(exc) and not exc.timeout: + self.close() + raise + except Exception: + self.close() + raise # Check if the cursor was invalidated. if not self._cursor.alive: diff --git a/test/test_change_stream.py b/test/test_change_stream.py index b5b260086d..a8b793333e 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -486,7 +486,7 @@ def _get_expected_resume_token(self, stream, listener, previous_change=None): return response["cursor"]["postBatchResumeToken"] @no_type_check - def _test_raises_error_on_missing_id(self, expected_exception, expected_exception2): + def _test_raises_error_on_missing_id(self, expected_exception): """ChangeStream will raise an exception if the server response is missing the resume token. """ @@ -494,7 +494,8 @@ def _test_raises_error_on_missing_id(self, expected_exception, expected_exceptio self.watched_collection().insert_one({}) with self.assertRaises(expected_exception): next(change_stream) - with self.assertRaises(expected_exception2): + # The cursor should now be closed. + with self.assertRaises(StopIteration): next(change_stream) @no_type_check @@ -526,14 +527,14 @@ def test_update_resume_token_legacy(self): # Prose test no. 2 @client_context.require_version_min(4, 1, 8) def test_raises_error_on_missing_id_418plus(self): - # Server returns an error on 4.1.8+, subsequent next() resumes and gets the same error. - self._test_raises_error_on_missing_id(OperationFailure, OperationFailure) + # Server returns an error on 4.1.8+ + self._test_raises_error_on_missing_id(OperationFailure) # Prose test no. 2 @client_context.require_version_max(4, 1, 8) def test_raises_error_on_missing_id_418minus(self): - # PyMongo raises an error, closes the cursor, subsequent next() raises StopIteration. - self._test_raises_error_on_missing_id(InvalidOperation, StopIteration) + # PyMongo raises an error + self._test_raises_error_on_missing_id(InvalidOperation) # Prose test no. 3 @no_type_check From 5b85ad2bcf5f1a996f8b288b27a849e8e75b4779 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 3 Aug 2022 13:30:41 -0700 Subject: [PATCH 0229/1588] PYTHON-3391 Skip unsupported CSOT tests on serverless (#1030) --- test/__init__.py | 10 ++++++++++ test/csot/gridfs-advanced.json | 3 ++- test/csot/gridfs-delete.json | 3 ++- test/csot/gridfs-download.json | 3 ++- test/csot/gridfs-find.json | 3 ++- test/csot/gridfs-upload.json | 3 ++- test/test_change_stream.py | 13 ++++--------- test/test_csot.py | 4 +--- test/test_custom_types.py | 9 +++------ test/test_examples.py | 3 +-- 10 files changed, 29 insertions(+), 25 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index 4ecc3c9e9e..2a3e59adf9 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -759,6 +759,16 @@ def require_no_load_balancer(self, func): lambda: not self.load_balancer, "Must not be connected to a load balancer", func=func ) + def require_no_serverless(self, func): + """Run a test only if the client is not connected to serverless.""" + return self._require( + lambda: not self.serverless, "Must not be connected to serverless", func=func + ) + + def require_change_streams(self, func): + """Run a test only if the server supports change streams.""" + return self.require_no_mmap(self.require_no_standalone(self.require_no_serverless(func))) + def is_topology_type(self, topologies): unknown = set(topologies) - { "single", diff --git a/test/csot/gridfs-advanced.json b/test/csot/gridfs-advanced.json index 6bf0229a04..c6c0944d2f 100644 --- a/test/csot/gridfs-advanced.json +++ b/test/csot/gridfs-advanced.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/csot/gridfs-delete.json b/test/csot/gridfs-delete.json index 8701929ff3..9f4980114b 100644 --- a/test/csot/gridfs-delete.json +++ b/test/csot/gridfs-delete.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/csot/gridfs-download.json b/test/csot/gridfs-download.json index 2ab64010f8..8542f69e89 100644 --- a/test/csot/gridfs-download.json +++ b/test/csot/gridfs-download.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/csot/gridfs-find.json b/test/csot/gridfs-find.json index 45bb7066d6..7409036284 100644 --- a/test/csot/gridfs-find.json +++ b/test/csot/gridfs-find.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/csot/gridfs-upload.json b/test/csot/gridfs-upload.json index 690fdda77f..b3f174973d 100644 --- a/test/csot/gridfs-upload.json +++ b/test/csot/gridfs-upload.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/test_change_stream.py b/test/test_change_stream.py index a8b793333e..18a0ec84c4 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -430,8 +430,7 @@ def test_start_after_resume_process_with_changes(self): self.assertEqual(change["fullDocument"], {"_id": 3}) @no_type_check - @client_context.require_no_mongos # Remove after SERVER-41196 - @client_context.require_version_min(4, 1, 1) + @client_context.require_version_min(4, 2) def test_start_after_resume_process_without_changes(self): resume_token = self.get_resume_token(invalidate=True) @@ -767,8 +766,7 @@ class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin): @classmethod @client_context.require_version_min(4, 0, 0, -1) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): super(TestClusterChangeStream, cls).setUpClass() cls.dbs = [cls.db, cls.client.pymongo_test_2] @@ -829,8 +827,7 @@ def test_full_pipeline(self): class TestDatabaseChangeStream(TestChangeStreamBase, APITestsMixin): @classmethod @client_context.require_version_min(4, 0, 0, -1) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): super(TestDatabaseChangeStream, cls).setUpClass() @@ -915,9 +912,7 @@ def test_isolation(self): class TestCollectionChangeStream(TestChangeStreamBase, APITestsMixin, ProseSpecTestsMixin): @classmethod - @client_context.require_version_min(3, 5, 11) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): super(TestCollectionChangeStream, cls).setUpClass() diff --git a/test/test_csot.py b/test/test_csot.py index 7b82a49caf..a9cf7a0124 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -73,9 +73,7 @@ def test_timeout_nested(self): self.assertEqual(_csot.get_deadline(), float("inf")) self.assertEqual(_csot.get_rtt(), 0.0) - @client_context.require_version_min(3, 6) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def test_change_stream_can_resume_after_timeouts(self): coll = self.db.test with coll.watch(max_await_time_ms=150) as stream: diff --git a/test/test_custom_types.py b/test/test_custom_types.py index e11b5ebe00..868756c67d 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -896,8 +896,7 @@ def run_test(doc_cls): class TestCollectionChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): @classmethod - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): super(TestCollectionChangeStreamsWCustomTypes, cls).setUpClass() cls.db.test.delete_many({}) @@ -916,8 +915,7 @@ def create_targets(self, *args, **kwargs): class TestDatabaseChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): @classmethod @client_context.require_version_min(4, 0, 0) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): super(TestDatabaseChangeStreamsWCustomTypes, cls).setUpClass() cls.db.test.delete_many({}) @@ -936,8 +934,7 @@ def create_targets(self, *args, **kwargs): class TestClusterChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): @classmethod @client_context.require_version_min(4, 0, 0) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): super(TestClusterChangeStreamsWCustomTypes, cls).setUpClass() cls.db.test.delete_many({}) diff --git a/test/test_examples.py b/test/test_examples.py index e23abe104f..9c1adda69c 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -740,8 +740,7 @@ def test_delete(self): self.assertEqual(db.inventory.count_documents({}), 0) - @client_context.require_replica_set - @client_context.require_no_mmap + @client_context.require_change_streams def test_change_streams(self): db = self.db done = False From 13e2715af0eb2f8fdcb5fae470db1120112202a3 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 3 Aug 2022 15:34:16 -0700 Subject: [PATCH 0230/1588] PYTHON-3312 Convert SDAM integration tests to unified (#1028) --- .evergreen/resync-specs.sh | 4 +- pymongo/monitoring.py | 64 +-- .../unified/auth-error.json | 230 ++++++++ .../unified/auth-misc-command-error.json | 230 ++++++++ .../unified/auth-network-error.json | 230 ++++++++ .../unified/auth-network-timeout-error.json | 233 ++++++++ .../unified/auth-shutdown-error.json | 230 ++++++++ .../unified/cancel-server-check.json | 201 +++++++ .../unified/connectTimeoutMS.json | 221 ++++++++ .../unified/find-network-error.json | 234 ++++++++ .../unified/find-network-timeout-error.json | 199 +++++++ .../unified/find-shutdown-error.json | 251 +++++++++ .../unified/hello-command-error.json | 376 +++++++++++++ .../unified/hello-network-error.json | 346 ++++++++++++ .../unified/hello-timeout.json | 514 ++++++++++++++++++ .../unified/insert-network-error.json | 246 +++++++++ .../unified/insert-shutdown-error.json | 250 +++++++++ .../unified/minPoolSize-error.json | 177 ++++++ .../unified}/pool-cleared-error.json | 204 ++++--- .../rediscover-quickly-after-step-down.json | 242 +++++++++ .../auth-error.json | 140 ----- .../auth-misc-command-error.json | 140 ----- .../auth-network-error.json | 140 ----- .../auth-network-timeout-error.json | 143 ----- .../auth-shutdown-error.json | 140 ----- .../cancel-server-check.json | 130 ----- .../connectTimeoutMS.json | 149 ----- .../find-network-error.json | 144 ----- .../find-network-timeout-error.json | 119 ---- .../find-shutdown-error.json | 168 ------ .../hello-command-error.json | 223 -------- .../hello-network-error.json | 219 -------- .../hello-timeout.json | 337 ------------ .../insert-network-error.json | 156 ------ .../insert-shutdown-error.json | 167 ------ .../minPoolSize-error.json | 102 ---- .../rediscover-quickly-after-step-down.json | 165 ------ test/test_discovery_and_monitoring.py | 114 +--- .../entity-thread-additionalProperties.json | 18 + .../invalid/entity-thread-id-required.json | 15 + .../invalid/entity-thread-id-type.json | 17 + ...tionChangedEvent-additionalProperties.json | 23 + ...erverDescription-additionalProperties.json | 25 + ...ngedEvent-serverDescription-type-enum.json | 25 + ...ngedEvent-serverDescription-type-type.json | 25 + test/unified_format.py | 165 +++++- 46 files changed, 4881 insertions(+), 3010 deletions(-) create mode 100644 test/discovery_and_monitoring/unified/auth-error.json create mode 100644 test/discovery_and_monitoring/unified/auth-misc-command-error.json create mode 100644 test/discovery_and_monitoring/unified/auth-network-error.json create mode 100644 test/discovery_and_monitoring/unified/auth-network-timeout-error.json create mode 100644 test/discovery_and_monitoring/unified/auth-shutdown-error.json create mode 100644 test/discovery_and_monitoring/unified/cancel-server-check.json create mode 100644 test/discovery_and_monitoring/unified/connectTimeoutMS.json create mode 100644 test/discovery_and_monitoring/unified/find-network-error.json create mode 100644 test/discovery_and_monitoring/unified/find-network-timeout-error.json create mode 100644 test/discovery_and_monitoring/unified/find-shutdown-error.json create mode 100644 test/discovery_and_monitoring/unified/hello-command-error.json create mode 100644 test/discovery_and_monitoring/unified/hello-network-error.json create mode 100644 test/discovery_and_monitoring/unified/hello-timeout.json create mode 100644 test/discovery_and_monitoring/unified/insert-network-error.json create mode 100644 test/discovery_and_monitoring/unified/insert-shutdown-error.json create mode 100644 test/discovery_and_monitoring/unified/minPoolSize-error.json rename test/{discovery_and_monitoring_integration => discovery_and_monitoring/unified}/pool-cleared-error.json (60%) create mode 100644 test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json delete mode 100644 test/discovery_and_monitoring_integration/auth-error.json delete mode 100644 test/discovery_and_monitoring_integration/auth-misc-command-error.json delete mode 100644 test/discovery_and_monitoring_integration/auth-network-error.json delete mode 100644 test/discovery_and_monitoring_integration/auth-network-timeout-error.json delete mode 100644 test/discovery_and_monitoring_integration/auth-shutdown-error.json delete mode 100644 test/discovery_and_monitoring_integration/cancel-server-check.json delete mode 100644 test/discovery_and_monitoring_integration/connectTimeoutMS.json delete mode 100644 test/discovery_and_monitoring_integration/find-network-error.json delete mode 100644 test/discovery_and_monitoring_integration/find-network-timeout-error.json delete mode 100644 test/discovery_and_monitoring_integration/find-shutdown-error.json delete mode 100644 test/discovery_and_monitoring_integration/hello-command-error.json delete mode 100644 test/discovery_and_monitoring_integration/hello-network-error.json delete mode 100644 test/discovery_and_monitoring_integration/hello-timeout.json delete mode 100644 test/discovery_and_monitoring_integration/insert-network-error.json delete mode 100644 test/discovery_and_monitoring_integration/insert-shutdown-error.json delete mode 100644 test/discovery_and_monitoring_integration/minPoolSize-error.json delete mode 100644 test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json create mode 100644 test/unified-test-format/invalid/entity-thread-additionalProperties.json create mode 100644 test/unified-test-format/invalid/entity-thread-id-required.json create mode 100644 test/unified-test-format/invalid/entity-thread-id-type.json create mode 100644 test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum.json create mode 100644 test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type.json diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index b64868c5a9..817fa4b730 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -132,8 +132,8 @@ do discovery_and_monitoring/sharded cpjson server-discovery-and-monitoring/tests/single \ discovery_and_monitoring/single - cpjson server-discovery-and-monitoring/tests/integration \ - discovery_and_monitoring_integration + cpjson server-discovery-and-monitoring/tests/unified \ + discovery_and_monitoring/unified cpjson server-discovery-and-monitoring/tests/load-balanced \ discovery_and_monitoring/load-balanced ;; diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index f3f773fbbd..90b8c1a3eb 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -914,13 +914,12 @@ class ConnectionCheckOutFailedReason(object): class _ConnectionEvent(object): - """Private base class for some connection events.""" + """Private base class for connection events.""" - __slots__ = ("__address", "__connection_id") + __slots__ = ("__address",) - def __init__(self, address: _Address, connection_id: int) -> None: + def __init__(self, address: _Address) -> None: self.__address = address - self.__connection_id = connection_id @property def address(self) -> _Address: @@ -929,16 +928,29 @@ def address(self) -> _Address: """ return self.__address + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, self.__address) + + +class _ConnectionIdEvent(_ConnectionEvent): + """Private base class for connection events with an id.""" + + __slots__ = ("__connection_id",) + + def __init__(self, address: _Address, connection_id: int) -> None: + super().__init__(address) + self.__connection_id = connection_id + @property def connection_id(self) -> int: """The ID of the Connection.""" return self.__connection_id def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, self.__address, self.__connection_id) + return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__connection_id) -class ConnectionCreatedEvent(_ConnectionEvent): +class ConnectionCreatedEvent(_ConnectionIdEvent): """Published when a Connection Pool creates a Connection object. NOTE: This connection is not ready for use until the @@ -955,7 +967,7 @@ class ConnectionCreatedEvent(_ConnectionEvent): __slots__ = () -class ConnectionReadyEvent(_ConnectionEvent): +class ConnectionReadyEvent(_ConnectionIdEvent): """Published when a Connection has finished its setup, and is ready to use. :Parameters: @@ -969,7 +981,7 @@ class ConnectionReadyEvent(_ConnectionEvent): __slots__ = () -class ConnectionClosedEvent(_ConnectionEvent): +class ConnectionClosedEvent(_ConnectionIdEvent): """Published when a Connection is closed. :Parameters: @@ -1005,7 +1017,7 @@ def __repr__(self): ) -class ConnectionCheckOutStartedEvent(object): +class ConnectionCheckOutStartedEvent(_ConnectionEvent): """Published when the driver starts attempting to check out a connection. :Parameters: @@ -1015,23 +1027,10 @@ class ConnectionCheckOutStartedEvent(object): .. versionadded:: 3.9 """ - __slots__ = ("__address",) - - def __init__(self, address): - self.__address = address - - @property - def address(self): - """The address (host, port) pair of the server this connection is - attempting to connect to. - """ - return self.__address - - def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, self.__address) + __slots__ = () -class ConnectionCheckOutFailedEvent(object): +class ConnectionCheckOutFailedEvent(_ConnectionEvent): """Published when the driver's attempt to check out a connection fails. :Parameters: @@ -1042,19 +1041,12 @@ class ConnectionCheckOutFailedEvent(object): .. versionadded:: 3.9 """ - __slots__ = ("__address", "__reason") + __slots__ = ("__reason",) def __init__(self, address: _Address, reason: str) -> None: - self.__address = address + super().__init__(address) self.__reason = reason - @property - def address(self) -> _Address: - """The address (host, port) pair of the server this connection is - attempting to connect to. - """ - return self.__address - @property def reason(self) -> str: """A reason explaining why connection check out failed. @@ -1065,10 +1057,10 @@ def reason(self) -> str: return self.__reason def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, self.__address, self.__reason) + return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__reason) -class ConnectionCheckedOutEvent(_ConnectionEvent): +class ConnectionCheckedOutEvent(_ConnectionIdEvent): """Published when the driver successfully checks out a Connection. :Parameters: @@ -1082,7 +1074,7 @@ class ConnectionCheckedOutEvent(_ConnectionEvent): __slots__ = () -class ConnectionCheckedInEvent(_ConnectionEvent): +class ConnectionCheckedInEvent(_ConnectionIdEvent): """Published when the driver checks in a Connection into the Pool. :Parameters: diff --git a/test/discovery_and_monitoring/unified/auth-error.json b/test/discovery_and_monitoring/unified/auth-error.json new file mode 100644 index 0000000000..5c78ecfe50 --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after AuthenticationFailure error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authErrorTest", + "errorCode": 18 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-misc-command-error.json b/test/discovery_and_monitoring/unified/auth-misc-command-error.json new file mode 100644 index 0000000000..6e1b645461 --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-misc-command-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-misc-command-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-misc-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after misc command error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authMiscErrorTest", + "errorCode": 1 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authMiscErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-misc-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-misc-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-misc-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-network-error.json b/test/discovery_and_monitoring/unified/auth-network-error.json new file mode 100644 index 0000000000..7606d2db7a --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-network-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-network-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true, + "appName": "authNetworkErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authNetworkErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-network-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-network-timeout-error.json b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json new file mode 100644 index 0000000000..22066e8bae --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json @@ -0,0 +1,233 @@ +{ + "description": "auth-network-timeout-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network timeout error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "authNetworkTimeoutErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authNetworkTimeoutErrorTest", + "connectTimeoutMS": 250, + "socketTimeoutMS": 250 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-network-timeout-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-network-timeout-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-shutdown-error.json b/test/discovery_and_monitoring/unified/auth-shutdown-error.json new file mode 100644 index 0000000000..5dd7b5bb6f --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-shutdown-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-shutdown-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after shutdown error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authShutdownErrorTest", + "errorCode": 91 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authShutdownErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-shutdown-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-shutdown-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/cancel-server-check.json b/test/discovery_and_monitoring/unified/cancel-server-check.json new file mode 100644 index 0000000000..896cc8d087 --- /dev/null +++ b/test/discovery_and_monitoring/unified/cancel-server-check.json @@ -0,0 +1,201 @@ +{ + "description": "cancel-server-check", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ], + "serverless": "forbid" + }, + { + "minServerVersion": "4.2", + "topologies": [ + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "cancel-server-check", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Cancel server check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": true, + "heartbeatFrequencyMS": 10000, + "serverSelectionTimeoutMS": 5000, + "appname": "cancelServerCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "cancel-server-check" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectResult": { + "insertedId": 2 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + }, + "expectResult": { + "insertedId": 3 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "cancel-server-check", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/connectTimeoutMS.json b/test/discovery_and_monitoring/unified/connectTimeoutMS.json new file mode 100644 index 0000000000..67a4d9da1d --- /dev/null +++ b/test/discovery_and_monitoring/unified/connectTimeoutMS.json @@ -0,0 +1,221 @@ +{ + "description": "connectTimeoutMS", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "connectTimeoutMS", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "connectTimeoutMS=0", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 0, + "heartbeatFrequencyMS": 500, + "appname": "connectTimeoutMS=0" + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "connectTimeoutMS" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "connectTimeoutMS=0", + "blockConnection": true, + "blockTimeMS": 550 + } + }, + "client": "setupClient" + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 750 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "connectTimeoutMS", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "connectTimeoutMS", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "connectTimeoutMS", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/find-network-error.json b/test/discovery_and_monitoring/unified/find-network-error.json new file mode 100644 index 0000000000..651466bfa6 --- /dev/null +++ b/test/discovery_and_monitoring/unified/find-network-error.json @@ -0,0 +1,234 @@ +{ + "description": "find-network-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "find-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network error on find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true, + "appName": "findNetworkErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "retryReads": false, + "appname": "findNetworkErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "find-network-error" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "find-network-error" + }, + "commandName": "find", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "find-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "find-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/find-network-timeout-error.json b/test/discovery_and_monitoring/unified/find-network-timeout-error.json new file mode 100644 index 0000000000..2bde6daa5d --- /dev/null +++ b/test/discovery_and_monitoring/unified/find-network-timeout-error.json @@ -0,0 +1,199 @@ +{ + "description": "find-network-timeout-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "find-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Ignore network timeout error on find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "findNetworkTimeoutErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "retryReads": false, + "appname": "findNetworkTimeoutErrorTest", + "socketTimeoutMS": 250 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "find-network-timeout-error" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "find-network-timeout-error" + }, + "commandName": "find", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "find-network-timeout-error", + "documents": [ + { + "_id": 3 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "find-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/find-shutdown-error.json b/test/discovery_and_monitoring/unified/find-shutdown-error.json new file mode 100644 index 0000000000..624ad352fc --- /dev/null +++ b/test/discovery_and_monitoring/unified/find-shutdown-error.json @@ -0,0 +1,251 @@ +{ + "description": "find-shutdown-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "find-shutdown-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Concurrent shutdown error on find", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false, + "retryReads": false, + "heartbeatFrequencyMS": 500, + "appname": "shutdownErrorFindTest" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "find-shutdown-error" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "appName": "shutdownErrorFindTest", + "errorCode": 91, + "blockConnection": true, + "blockTimeMS": 500 + } + }, + "client": "setupClient" + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "thread": { + "id": "thread0" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread0", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread0" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 4 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "find-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/hello-command-error.json b/test/discovery_and_monitoring/unified/hello-command-error.json new file mode 100644 index 0000000000..7d6046b76f --- /dev/null +++ b/test/discovery_and_monitoring/unified/hello-command-error.json @@ -0,0 +1,376 @@ +{ + "description": "hello-command-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "hello-command-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Command error on Monitor handshake", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "commandErrorHandshakeTest", + "closeConnection": false, + "errorCode": 91 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent", + "commandStartedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "commandErrorHandshakeTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-command-error" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-command-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-command-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Command error on Monitor check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 1000, + "heartbeatFrequencyMS": 500, + "appname": "commandErrorCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-command-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "commandErrorCheckTest", + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 750, + "errorCode": 91 + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-command-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-command-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-command-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/hello-network-error.json b/test/discovery_and_monitoring/unified/hello-network-error.json new file mode 100644 index 0000000000..f44b26a9f9 --- /dev/null +++ b/test/discovery_and_monitoring/unified/hello-network-error.json @@ -0,0 +1,346 @@ +{ + "description": "hello-network-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "hello-network-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Network error on Monitor handshake", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "networkErrorHandshakeTest", + "closeConnection": true + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "networkErrorHandshakeTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-network-error" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-network-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Network error on Monitor check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "networkErrorCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-network-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "networkErrorCheckTest", + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-network-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-network-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/hello-timeout.json b/test/discovery_and_monitoring/unified/hello-timeout.json new file mode 100644 index 0000000000..dfa6b48d66 --- /dev/null +++ b/test/discovery_and_monitoring/unified/hello-timeout.json @@ -0,0 +1,514 @@ +{ + "description": "hello-timeout", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Network timeout on Monitor handshake", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "timeoutMonitorHandshakeTest", + "blockConnection": true, + "blockTimeMS": 1000 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "timeoutMonitorHandshakeTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-timeout" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Network timeout on Monitor check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 750, + "heartbeatFrequencyMS": 500, + "appname": "timeoutMonitorCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-timeout" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "timeoutMonitorCheckTest", + "blockConnection": true, + "blockTimeMS": 1000 + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "Driver extends timeout while streaming", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "extendsTimeoutTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-timeout" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 2000 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/insert-network-error.json b/test/discovery_and_monitoring/unified/insert-network-error.json new file mode 100644 index 0000000000..e4ba6684ae --- /dev/null +++ b/test/discovery_and_monitoring/unified/insert-network-error.json @@ -0,0 +1,246 @@ +{ + "description": "insert-network-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "insert-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network error on insert", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true, + "appName": "insertNetworkErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "insertNetworkErrorTest" + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "insert-network-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "insert-network-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "insert-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "insert-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/insert-shutdown-error.json b/test/discovery_and_monitoring/unified/insert-shutdown-error.json new file mode 100644 index 0000000000..3c724fa5e4 --- /dev/null +++ b/test/discovery_and_monitoring/unified/insert-shutdown-error.json @@ -0,0 +1,250 @@ +{ + "description": "insert-shutdown-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "insert-shutdown-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Concurrent shutdown error on insert", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "appname": "shutdownErrorInsertTest" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "insert-shutdown-error" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "appName": "shutdownErrorInsertTest", + "errorCode": 91, + "blockConnection": true, + "blockTimeMS": 500 + } + }, + "client": "setupClient" + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "thread": { + "id": "thread0" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread0", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread0" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 4 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "insert-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/minPoolSize-error.json b/test/discovery_and_monitoring/unified/minPoolSize-error.json new file mode 100644 index 0000000000..0234ac9929 --- /dev/null +++ b/test/discovery_and_monitoring/unified/minPoolSize-error.json @@ -0,0 +1,177 @@ +{ + "description": "minPoolSize-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverless": "forbid", + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "sdam-minPoolSize-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Network error on minPoolSize background creation", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 3 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "SDAMminPoolSizeError", + "closeConnection": true + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent", + "poolReadyEvent" + ], + "uriOptions": { + "heartbeatFrequencyMS": 10000, + "appname": "SDAMminPoolSizeError", + "minPoolSize": 10, + "serverSelectionTimeoutMS": 1000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "sdam-minPoolSize-error" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": {} + }, + "commandName": "ping" + }, + "expectError": { + "isError": true + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "off" + }, + "client": "setupClient" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 2 + } + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring_integration/pool-cleared-error.json b/test/discovery_and_monitoring/unified/pool-cleared-error.json similarity index 60% rename from test/discovery_and_monitoring_integration/pool-cleared-error.json rename to test/discovery_and_monitoring/unified/pool-cleared-error.json index 52456f9e13..9a7dfd901c 100644 --- a/test/discovery_and_monitoring_integration/pool-cleared-error.json +++ b/test/discovery_and_monitoring/unified/pool-cleared-error.json @@ -1,25 +1,72 @@ { - "runOn": [ + "description": "pool-cleared-error", + "schemaVersion": "1.10", + "runOnRequirements": [ { "minServerVersion": "4.9", - "topology": [ + "serverless": "forbid", + "topologies": [ "replicaset", "sharded" ] } ], - "database_name": "sdam-tests", - "collection_name": "pool-cleared-error", - "data": [], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "pool-cleared-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], "tests": [ { "description": "PoolClearedError does not mark server unknown", - "clientOptions": { - "retryWrites": true, - "maxPoolSize": 1, - "appname": "poolClearedErrorTest" - }, "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": true, + "maxPoolSize": 1, + "appname": "poolClearedErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "pool-cleared-error" + } + } + ] + } + }, { "name": "insertOne", "object": "collection", @@ -30,7 +77,7 @@ } }, { - "name": "configureFailPoint", + "name": "failPoint", "object": "testRunner", "arguments": { "failPoint": { @@ -47,56 +94,53 @@ "closeConnection": true, "appName": "poolClearedErrorTest" } - } - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread1" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread2" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread3" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread4" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread5" + }, + "client": "setupClient" } }, { - "name": "startThread", + "name": "createEntities", "object": "testRunner", "arguments": { - "name": "thread6" + "entities": [ + { + "thread": { + "id": "thread0" + } + }, + { + "thread": { + "id": "thread1" + } + }, + { + "thread": { + "id": "thread2" + } + }, + { + "thread": { + "id": "thread3" + } + }, + { + "thread": { + "id": "thread4" + } + }, + { + "thread": { + "id": "thread5" + } + } + ] } }, { "name": "runOnThread", "object": "testRunner", "arguments": { - "name": "thread1", + "thread": "thread0", "operation": { "name": "insertOne", "object": "collection", @@ -112,7 +156,7 @@ "name": "runOnThread", "object": "testRunner", "arguments": { - "name": "thread2", + "thread": "thread1", "operation": { "name": "insertOne", "object": "collection", @@ -128,7 +172,7 @@ "name": "runOnThread", "object": "testRunner", "arguments": { - "name": "thread3", + "thread": "thread2", "operation": { "name": "insertOne", "object": "collection", @@ -144,7 +188,7 @@ "name": "runOnThread", "object": "testRunner", "arguments": { - "name": "thread4", + "thread": "thread3", "operation": { "name": "insertOne", "object": "collection", @@ -160,7 +204,7 @@ "name": "runOnThread", "object": "testRunner", "arguments": { - "name": "thread5", + "thread": "thread4", "operation": { "name": "insertOne", "object": "collection", @@ -176,7 +220,7 @@ "name": "runOnThread", "object": "testRunner", "arguments": { - "name": "thread6", + "thread": "thread5", "operation": { "name": "insertOne", "object": "collection", @@ -192,49 +236,56 @@ "name": "waitForThread", "object": "testRunner", "arguments": { - "name": "thread1" + "thread": "thread0" } }, { "name": "waitForThread", "object": "testRunner", "arguments": { - "name": "thread2" + "thread": "thread1" } }, { "name": "waitForThread", "object": "testRunner", "arguments": { - "name": "thread3" + "thread": "thread2" } }, { "name": "waitForThread", "object": "testRunner", "arguments": { - "name": "thread4" + "thread": "thread3" } }, { "name": "waitForThread", "object": "testRunner", "arguments": { - "name": "thread5" + "thread": "thread4" } }, { "name": "waitForThread", "object": "testRunner", "arguments": { - "name": "thread6" + "thread": "thread5" } }, { "name": "waitForEvent", "object": "testRunner", "arguments": { - "event": "ServerMarkedUnknownEvent", + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, "count": 1 } }, @@ -242,7 +293,10 @@ "name": "waitForEvent", "object": "testRunner", "arguments": { - "event": "PoolClearedEvent", + "client": "client", + "event": { + "poolClearedEvent": {} + }, "count": 1 } }, @@ -259,7 +313,14 @@ "name": "assertEventCount", "object": "testRunner", "arguments": { - "event": "ServerMarkedUnknownEvent", + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, "count": 1 } }, @@ -267,14 +328,19 @@ "name": "assertEventCount", "object": "testRunner", "arguments": { - "event": "PoolClearedEvent", + "client": "client", + "event": { + "poolClearedEvent": {} + }, "count": 1 } } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "pool-cleared-error", + "databaseName": "sdam-tests", + "documents": [ { "_id": 1 }, @@ -301,7 +367,7 @@ } ] } - } + ] } ] } diff --git a/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json b/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json new file mode 100644 index 0000000000..0ad575cc9d --- /dev/null +++ b/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json @@ -0,0 +1,242 @@ +{ + "description": "rediscover-quickly-after-step-down", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient" + } + }, + { + "database": { + "id": "adminDatabase", + "client": "setupClient", + "databaseName": "admin" + } + } + ], + "initialData": [ + { + "collectionName": "test-replSetStepDown", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Rediscover quickly after replSetStepDown", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "poolClearedEvent", + "commandStartedEvent" + ], + "uriOptions": { + "appname": "replSetStepDownTest", + "heartbeatFrequencyMS": 60000, + "serverSelectionTimeoutMS": 5000, + "w": "majority" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test-replSetStepDown" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "recordTopologyDescription", + "object": "testRunner", + "arguments": { + "client": "client", + "id": "topologyDescription" + } + }, + { + "name": "assertTopologyType", + "object": "testRunner", + "arguments": { + "topologyDescription": "topologyDescription", + "topologyType": "ReplicaSetWithPrimary" + } + }, + { + "name": "runCommand", + "object": "adminDatabase", + "arguments": { + "command": { + "replSetFreeze": 0 + }, + "readPreference": { + "mode": "Secondary" + }, + "commandName": "replSetFreeze" + } + }, + { + "name": "runCommand", + "object": "adminDatabase", + "arguments": { + "command": { + "replSetStepDown": 30, + "secondaryCatchUpPeriodSecs": 30, + "force": false + }, + "commandName": "replSetStepDown" + } + }, + { + "name": "waitForPrimaryChange", + "object": "testRunner", + "arguments": { + "client": "client", + "priorTopologyDescription": "topologyDescription", + "timeoutMS": 15000 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test-replSetStepDown", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test-replSetStepDown", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test-replSetStepDown", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring_integration/auth-error.json b/test/discovery_and_monitoring_integration/auth-error.json deleted file mode 100644 index 064d660e32..0000000000 --- a/test/discovery_and_monitoring_integration/auth-error.json +++ /dev/null @@ -1,140 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "authEnabled": true - } - ], - "database_name": "sdam-tests", - "collection_name": "auth-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after AuthenticationFailure error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "appName": "authErrorTest", - "errorCode": 18 - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "authErrorTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "auth-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/auth-misc-command-error.json b/test/discovery_and_monitoring_integration/auth-misc-command-error.json deleted file mode 100644 index 70dd59251d..0000000000 --- a/test/discovery_and_monitoring_integration/auth-misc-command-error.json +++ /dev/null @@ -1,140 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "authEnabled": true - } - ], - "database_name": "sdam-tests", - "collection_name": "auth-misc-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after misc command error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "appName": "authMiscErrorTest", - "errorCode": 1 - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "authMiscErrorTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "auth-misc-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/auth-network-error.json b/test/discovery_and_monitoring_integration/auth-network-error.json deleted file mode 100644 index a75a398c5e..0000000000 --- a/test/discovery_and_monitoring_integration/auth-network-error.json +++ /dev/null @@ -1,140 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "authEnabled": true - } - ], - "database_name": "sdam-tests", - "collection_name": "auth-network-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after network error during authentication", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "closeConnection": true, - "appName": "authNetworkErrorTest" - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "authNetworkErrorTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "auth-network-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/auth-network-timeout-error.json b/test/discovery_and_monitoring_integration/auth-network-timeout-error.json deleted file mode 100644 index a4ee7d9eff..0000000000 --- a/test/discovery_and_monitoring_integration/auth-network-timeout-error.json +++ /dev/null @@ -1,143 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "authEnabled": true - } - ], - "database_name": "sdam-tests", - "collection_name": "auth-network-timeout-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after network timeout error during authentication", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "blockConnection": true, - "blockTimeMS": 500, - "appName": "authNetworkTimeoutErrorTest" - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "authNetworkTimeoutErrorTest", - "connectTimeoutMS": 250, - "socketTimeoutMS": 250 - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "auth-network-timeout-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/auth-shutdown-error.json b/test/discovery_and_monitoring_integration/auth-shutdown-error.json deleted file mode 100644 index 2dab90e1c5..0000000000 --- a/test/discovery_and_monitoring_integration/auth-shutdown-error.json +++ /dev/null @@ -1,140 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "authEnabled": true - } - ], - "database_name": "sdam-tests", - "collection_name": "auth-shutdown-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after shutdown error during authentication", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "appName": "authShutdownErrorTest", - "errorCode": 91 - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "authShutdownErrorTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "auth-shutdown-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/cancel-server-check.json b/test/discovery_and_monitoring_integration/cancel-server-check.json deleted file mode 100644 index 9586350959..0000000000 --- a/test/discovery_and_monitoring_integration/cancel-server-check.json +++ /dev/null @@ -1,130 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.2", - "topology": [ - "sharded" - ] - } - ], - "database_name": "sdam-tests", - "collection_name": "cancel-server-check", - "data": [], - "tests": [ - { - "description": "Cancel server check", - "clientOptions": { - "retryWrites": true, - "heartbeatFrequencyMS": 10000, - "serverSelectionTimeoutMS": 5000, - "appname": "cancelServerCheckTest" - }, - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 1 - } - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/connectTimeoutMS.json b/test/discovery_and_monitoring_integration/connectTimeoutMS.json deleted file mode 100644 index 36a6dc4507..0000000000 --- a/test/discovery_and_monitoring_integration/connectTimeoutMS.json +++ /dev/null @@ -1,149 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "connectTimeoutMS", - "data": [], - "tests": [ - { - "description": "connectTimeoutMS=0", - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 0, - "heartbeatFrequencyMS": 500, - "appname": "connectTimeoutMS=0" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "connectTimeoutMS=0", - "blockConnection": true, - "blockTimeMS": 550 - } - } - } - }, - { - "name": "wait", - "object": "testRunner", - "arguments": { - "ms": 750 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 0 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "connectTimeoutMS", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "connectTimeoutMS", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/find-network-error.json b/test/discovery_and_monitoring_integration/find-network-error.json deleted file mode 100644 index 4db2634cd6..0000000000 --- a/test/discovery_and_monitoring_integration/find-network-error.json +++ /dev/null @@ -1,144 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "find-network-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after network error on find", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true, - "appName": "findNetworkErrorTest" - } - }, - "clientOptions": { - "retryWrites": false, - "retryReads": false, - "appname": "findNetworkErrorTest" - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "find-network-error" - }, - "command_name": "find", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "find-network-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/find-network-timeout-error.json b/test/discovery_and_monitoring_integration/find-network-timeout-error.json deleted file mode 100644 index c4e10b3a76..0000000000 --- a/test/discovery_and_monitoring_integration/find-network-timeout-error.json +++ /dev/null @@ -1,119 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "find-network-timeout-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Ignore network timeout error on find", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 500, - "appName": "findNetworkTimeoutErrorTest" - } - }, - "clientOptions": { - "retryWrites": false, - "retryReads": false, - "appname": "findNetworkTimeoutErrorTest", - "socketTimeoutMS": 250 - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 3 - } - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 0 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "find-network-timeout-error" - }, - "command_name": "find", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "find-network-timeout-error", - "documents": [ - { - "_id": 3 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/find-shutdown-error.json b/test/discovery_and_monitoring_integration/find-shutdown-error.json deleted file mode 100644 index 65de8398b1..0000000000 --- a/test/discovery_and_monitoring_integration/find-shutdown-error.json +++ /dev/null @@ -1,168 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "find-shutdown-error", - "data": [], - "tests": [ - { - "description": "Concurrent shutdown error on find", - "clientOptions": { - "retryWrites": false, - "retryReads": false, - "heartbeatFrequencyMS": 500, - "appname": "shutdownErrorFindTest" - }, - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 1 - } - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "appName": "shutdownErrorFindTest", - "errorCode": 91, - "blockConnection": true, - "blockTimeMS": 500 - } - } - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread1" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread2" - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread1", - "operation": { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - } - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread2", - "operation": { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - } - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread1" - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread2" - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 4 - } - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/hello-command-error.json b/test/discovery_and_monitoring_integration/hello-command-error.json deleted file mode 100644 index d3bccd3900..0000000000 --- a/test/discovery_and_monitoring_integration/hello-command-error.json +++ /dev/null @@ -1,223 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.9" - } - ], - "database_name": "sdam-tests", - "collection_name": "hello-command-error", - "data": [], - "tests": [ - { - "description": "Command error on Monitor handshake", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "commandErrorHandshakeTest", - "closeConnection": false, - "errorCode": 91 - } - }, - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 250, - "heartbeatFrequencyMS": 500, - "appname": "commandErrorHandshakeTest" - }, - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-command-error", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "Command error on Monitor check", - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 1000, - "heartbeatFrequencyMS": 500, - "appname": "commandErrorCheckTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 4 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "commandErrorCheckTest", - "closeConnection": false, - "blockConnection": true, - "blockTimeMS": 750, - "errorCode": 91 - } - } - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-command-error", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "hello-command-error", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/hello-network-error.json b/test/discovery_and_monitoring_integration/hello-network-error.json deleted file mode 100644 index f9761d7556..0000000000 --- a/test/discovery_and_monitoring_integration/hello-network-error.json +++ /dev/null @@ -1,219 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.9" - } - ], - "database_name": "sdam-tests", - "collection_name": "hello-network-error", - "data": [], - "tests": [ - { - "description": "Network error on Monitor handshake", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "networkErrorHandshakeTest", - "closeConnection": true - } - }, - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 250, - "heartbeatFrequencyMS": 500, - "appname": "networkErrorHandshakeTest" - }, - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-network-error", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "Network error on Monitor check", - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 250, - "heartbeatFrequencyMS": 500, - "appname": "networkErrorCheckTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 4 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "networkErrorCheckTest", - "closeConnection": true - } - } - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-network-error", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "hello-network-error", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/hello-timeout.json b/test/discovery_and_monitoring_integration/hello-timeout.json deleted file mode 100644 index 004f8f449d..0000000000 --- a/test/discovery_and_monitoring_integration/hello-timeout.json +++ /dev/null @@ -1,337 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "hello-timeout", - "data": [], - "tests": [ - { - "description": "Network timeout on Monitor handshake", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "timeoutMonitorHandshakeTest", - "blockConnection": true, - "blockTimeMS": 1000 - } - }, - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 250, - "heartbeatFrequencyMS": 500, - "appname": "timeoutMonitorHandshakeTest" - }, - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-timeout", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "Network timeout on Monitor check", - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 750, - "heartbeatFrequencyMS": 500, - "appname": "timeoutMonitorCheckTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 4 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "timeoutMonitorCheckTest", - "blockConnection": true, - "blockTimeMS": 1000 - } - } - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-timeout", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "hello-timeout", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "Driver extends timeout while streaming", - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 250, - "heartbeatFrequencyMS": 500, - "appname": "extendsTimeoutTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - }, - { - "name": "wait", - "object": "testRunner", - "arguments": { - "ms": 2000 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 0 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-timeout", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "hello-timeout", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/insert-network-error.json b/test/discovery_and_monitoring_integration/insert-network-error.json deleted file mode 100644 index fa8bb253e1..0000000000 --- a/test/discovery_and_monitoring_integration/insert-network-error.json +++ /dev/null @@ -1,156 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "insert-network-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after network error on insert", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true, - "appName": "insertNetworkErrorTest" - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "insertNetworkErrorTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "insert-network-error", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "insert-network-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/insert-shutdown-error.json b/test/discovery_and_monitoring_integration/insert-shutdown-error.json deleted file mode 100644 index edde149a91..0000000000 --- a/test/discovery_and_monitoring_integration/insert-shutdown-error.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "insert-shutdown-error", - "data": [], - "tests": [ - { - "description": "Concurrent shutdown error on insert", - "clientOptions": { - "retryWrites": false, - "heartbeatFrequencyMS": 500, - "appname": "shutdownErrorInsertTest" - }, - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 1 - } - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "insert" - ], - "appName": "shutdownErrorInsertTest", - "errorCode": 91, - "blockConnection": true, - "blockTimeMS": 500 - } - } - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread1" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread2" - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread1", - "operation": { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 2 - } - }, - "error": true - } - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread2", - "operation": { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 3 - } - }, - "error": true - } - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread1" - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread2" - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 4 - } - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/minPoolSize-error.json b/test/discovery_and_monitoring_integration/minPoolSize-error.json deleted file mode 100644 index 9f8e4f6f8b..0000000000 --- a/test/discovery_and_monitoring_integration/minPoolSize-error.json +++ /dev/null @@ -1,102 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.9" - } - ], - "database_name": "sdam-tests", - "collection_name": "sdam-minPoolSize-error", - "data": [], - "tests": [ - { - "description": "Network error on minPoolSize background creation", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "skip": 3 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "SDAMminPoolSizeError", - "closeConnection": true - } - }, - "clientOptions": { - "heartbeatFrequencyMS": 10000, - "appname": "SDAMminPoolSizeError", - "minPoolSize": 10, - "serverSelectionTimeoutMS": 1000, - "directConnection": true - }, - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolReadyEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "ping", - "arguments": { - "command": { - "ping": {} - } - }, - "error": true - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": "off" - } - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "ping", - "arguments": { - "command": { - "ping": 1 - } - }, - "error": false - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolReadyEvent", - "count": 2 - } - } - ] - } - ] -} diff --git a/test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json b/test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json deleted file mode 100644 index 41fbdc695c..0000000000 --- a/test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json +++ /dev/null @@ -1,165 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "topology": [ - "replicaset" - ] - } - ], - "database_name": "sdam-tests", - "collection_name": "test-replSetStepDown", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Rediscover quickly after replSetStepDown", - "clientOptions": { - "appname": "replSetStepDownTest", - "heartbeatFrequencyMS": 60000, - "serverSelectionTimeoutMS": 5000, - "w": "majority" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - }, - { - "name": "recordPrimary", - "object": "testRunner" - }, - { - "name": "runAdminCommand", - "object": "testRunner", - "command_name": "replSetFreeze", - "arguments": { - "command": { - "replSetFreeze": 0 - }, - "readPreference": { - "mode": "Secondary" - } - } - }, - { - "name": "runAdminCommand", - "object": "testRunner", - "command_name": "replSetStepDown", - "arguments": { - "command": { - "replSetStepDown": 30, - "secondaryCatchUpPeriodSecs": 30, - "force": false - } - } - }, - { - "name": "waitForPrimaryChange", - "object": "testRunner", - "arguments": { - "timeoutMS": 15000 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test-replSetStepDown", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test-replSetStepDown", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 39979c2d10..9af8185ab5 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -17,16 +17,15 @@ import os import sys import threading -import time sys.path[0:0] = [""] from test import IntegrationTest, unittest from test.pymongo_mocks import DummyMonitor +from test.unified_format import generate_test_classes from test.utils import ( CMAPListener, HeartbeatEventListener, - TestCreator, assertion_context, client_context, get_pool, @@ -35,7 +34,6 @@ single_client, wait_until, ) -from test.utils_spec_runner import SpecRunner, SpecRunnerThread from bson import Timestamp, json_util from pymongo import common, monitoring @@ -55,7 +53,7 @@ from pymongo.uri_parser import parse_uri # Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "discovery_and_monitoring") +SDAM_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "discovery_and_monitoring") def create_mock_topology(uri, monitor_class=DummyMonitor): @@ -216,8 +214,11 @@ def run_scenario(self): def create_tests(): - for dirpath, _, filenames in os.walk(_TEST_PATH): + for dirpath, _, filenames in os.walk(SDAM_PATH): dirname = os.path.split(dirpath)[-1] + # SDAM unified tests are handled separately. + if dirname == "unified": + continue for filename in filenames: if os.path.splitext(filename)[1] != ".json": @@ -340,107 +341,8 @@ def test_pool_unpause(self): listener.wait_for_event(monitoring.PoolReadyEvent, 1) -class TestIntegration(SpecRunner): - # Location of JSON test specifications. - TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "discovery_and_monitoring_integration" - ) - - def _event_count(self, event): - if event == "ServerMarkedUnknownEvent": - - def marked_unknown(e): - return ( - isinstance(e, monitoring.ServerDescriptionChangedEvent) - and not e.new_description.is_server_type_known - ) - - assert self.server_listener is not None - return len(self.server_listener.matching(marked_unknown)) - # Only support CMAP events for now. - self.assertTrue(event.startswith("Pool") or event.startswith("Conn")) - event_type = getattr(monitoring, event) - assert self.pool_listener is not None - return self.pool_listener.event_count(event_type) - - def assert_event_count(self, event, count): - """Run the assertEventCount test operation. - - Assert the given event was published exactly `count` times. - """ - self.assertEqual(self._event_count(event), count, "expected %s not %r" % (count, event)) - - def wait_for_event(self, event, count): - """Run the waitForEvent test operation. - - Wait for a number of events to be published, or fail. - """ - wait_until( - lambda: self._event_count(event) >= count, "find %s %s event(s)" % (count, event) - ) - - def configure_fail_point(self, fail_point): - """Run the configureFailPoint test operation.""" - self.set_fail_point(fail_point) - self.addCleanup( - self.set_fail_point, - {"configureFailPoint": fail_point["configureFailPoint"], "mode": "off"}, - ) - - def run_admin_command(self, command, **kwargs): - """Run the runAdminCommand test operation.""" - self.client.admin.command(command, **kwargs) - - def record_primary(self): - """Run the recordPrimary test operation.""" - self._previous_primary = self.scenario_client.primary - - def wait_for_primary_change(self, timeout): - """Run the waitForPrimaryChange test operation.""" - - def primary_changed(): - primary = self.scenario_client.primary - if primary is None: - return False - return primary != self._previous_primary - - wait_until(primary_changed, "change primary", timeout=timeout) - - def wait(self, ms): - """Run the "wait" test operation.""" - time.sleep(ms / 1000.0) - - def start_thread(self, name): - """Run the 'startThread' thread operation.""" - thread = SpecRunnerThread(name) - thread.start() - self.targets[name] = thread - - def run_on_thread(self, sessions, collection, name, operation): - """Run the 'runOnThread' operation.""" - thread = self.targets[name] - thread.schedule(lambda: self._run_op(sessions, collection, operation, False)) - - def wait_for_thread(self, name): - """Run the 'waitForThread' operation.""" - thread = self.targets[name] - thread.stop() - thread.join(60) - if thread.exc: - raise thread.exc - self.assertFalse(thread.is_alive(), "Thread %s is still running" % (name,)) - - -def create_spec_test(scenario_def, test, name): - @client_context.require_test_commands - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - -test_creator = TestCreator(create_spec_test, TestIntegration, TestIntegration.TEST_PATH) -test_creator.create_tests() +# Generate unified tests. +globals().update(generate_test_classes(os.path.join(SDAM_PATH, "unified"), module=__name__)) if __name__ == "__main__": diff --git a/test/unified-test-format/invalid/entity-thread-additionalProperties.json b/test/unified-test-format/invalid/entity-thread-additionalProperties.json new file mode 100644 index 0000000000..b296719f13 --- /dev/null +++ b/test/unified-test-format/invalid/entity-thread-additionalProperties.json @@ -0,0 +1,18 @@ +{ + "description": "entity-thread-additionalProperties", + "schemaVersion": "1.10", + "createEntities": [ + { + "thread": { + "id": "thread0", + "foo": "bar" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-thread-id-required.json b/test/unified-test-format/invalid/entity-thread-id-required.json new file mode 100644 index 0000000000..3b197e3d6b --- /dev/null +++ b/test/unified-test-format/invalid/entity-thread-id-required.json @@ -0,0 +1,15 @@ +{ + "description": "entity-thread-id-required", + "schemaVersion": "1.10", + "createEntities": [ + { + "thread": {} + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-thread-id-type.json b/test/unified-test-format/invalid/entity-thread-id-type.json new file mode 100644 index 0000000000..8f281ef6f4 --- /dev/null +++ b/test/unified-test-format/invalid/entity-thread-id-type.json @@ -0,0 +1,17 @@ +{ + "description": "entity-thread-id-type", + "schemaVersion": "1.10", + "createEntities": [ + { + "thread": { + "id": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties.json new file mode 100644 index 0000000000..1c6ec460b7 --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties.json new file mode 100644 index 0000000000..58f686739a --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties.json @@ -0,0 +1,25 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "previousDescription": { + "foo": "bar" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum.json new file mode 100644 index 0000000000..1b4a7e2e70 --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum.json @@ -0,0 +1,25 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "previousDescription": { + "type": "not a server type" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type.json new file mode 100644 index 0000000000..c7ea9cc9be --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "previousDescription": { + "type": 12 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index ee64915202..dbf4ef988f 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -50,9 +50,11 @@ rs_or_single_client, single_client, snake_to_camel, + wait_until, ) +from test.utils_spec_runner import SpecRunnerThread from test.version import Version -from typing import Any, List +from typing import Any, Dict, List, Mapping, Optional import pymongo from bson import SON, Code, DBRef, Decimal128, Int64, MaxKey, MinKey, json_util @@ -94,11 +96,24 @@ PoolClosedEvent, PoolCreatedEvent, PoolReadyEvent, + ServerClosedEvent, + ServerDescriptionChangedEvent, + ServerListener, + ServerOpeningEvent, + TopologyEvent, + _CommandEvent, + _ConnectionEvent, + _PoolEvent, + _ServerEvent, ) from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.results import BulkWriteResult from pymongo.server_api import ServerApi +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import Selection, writable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.topology_description import TopologyDescription from pymongo.write_concern import WriteConcern JSON_OPTS = json_util.JSONOptions(tz_aware=False) @@ -268,7 +283,7 @@ def close(self): self.client = None -class EventListenerUtil(CMAPListener, CommandListener): +class EventListenerUtil(CMAPListener, CommandListener, ServerListener): def __init__( self, observe_events, ignore_commands, observe_sensitive_commands, store_events, entity_map ): @@ -292,9 +307,14 @@ def __init__( super(EventListenerUtil, self).__init__() def get_events(self, event_type): + assert event_type in ("command", "cmap", "sdam", "all"), event_type + if event_type == "all": + return list(self.events) if event_type == "command": - return [e for e in self.events if "Command" in type(e).__name__] - return [e for e in self.events if "Command" not in type(e).__name__] + return [e for e in self.events if isinstance(e, _CommandEvent)] + if event_type == "cmap": + return [e for e in self.events if isinstance(e, (_ConnectionEvent, _PoolEvent))] + return [e for e in self.events if isinstance(e, (_ServerEvent, TopologyEvent))] def add_event(self, event): event_name = type(event).__name__.lower() @@ -332,16 +352,25 @@ def succeeded(self, event): def failed(self, event): self._command_event(event) + def opened(self, event: ServerOpeningEvent) -> None: + self.add_event(event) + + def description_changed(self, event: ServerDescriptionChangedEvent) -> None: + self.add_event(event) + + def closed(self, event: ServerClosedEvent) -> None: + self.add_event(event) + class EntityMapUtil(object): """Utility class that implements an entity map as per the unified test format specification.""" def __init__(self, test_class): - self._entities = {} - self._listeners = {} - self._session_lsids = {} - self.test = test_class + self._entities: Dict[str, Any] = {} + self._listeners: Dict[str, EventListenerUtil] = {} + self._session_lsids: Dict[str, Mapping[str, Any]] = {} + self.test: UnifiedSpecTestMixinV1 = test_class def __contains__(self, item): return item in self._entities @@ -484,6 +513,12 @@ def drop(self: GridFSBucket, *args: Any, **kwargs: Any) -> None: opts.get("kms_tls_options", KMS_TLS_OPTS), ) return + elif entity_type == "thread": + name = spec["id"] + thread = SpecRunnerThread(name) + thread.start() + self[name] = thread + return self.test.fail("Unable to create entity of unknown type %s" % (entity_type,)) @@ -491,7 +526,7 @@ def create_entities_from_spec(self, entity_spec, uri=None): for spec in entity_spec: self._create_entity(spec, uri=uri) - def get_listener_for_client(self, client_name): + def get_listener_for_client(self, client_name: str) -> EventListenerUtil: client = self[client_name] if not isinstance(client, MongoClient): self.test.fail( @@ -710,6 +745,18 @@ def assertHasServiceId(self, spec, actual): else: self.test.assertIsNone(actual.service_id) + def match_server_description(self, actual: ServerDescription, spec: dict) -> None: + if "type" in spec: + self.test.assertEqual(actual.server_type_name, spec["type"]) + if "error" in spec: + self.test.process_error(actual.error, spec["error"]) + if "minWireVersion" in spec: + self.test.assertEqual(actual.min_wire_version, spec["minWireVersion"]) + if "maxWireVersion" in spec: + self.test.assertEqual(actual.max_wire_version, spec["maxWireVersion"]) + if "topologyVersion" in spec: + self.test.assertEqual(actual.topology_version, spec["topologyVersion"]) + def match_event(self, event_type, expectation, actual): name, spec = next(iter(expectation.items())) @@ -770,8 +817,16 @@ def match_event(self, event_type, expectation, actual): self.test.assertIsInstance(actual, ConnectionCheckedOutEvent) elif name == "connectionCheckedInEvent": self.test.assertIsInstance(actual, ConnectionCheckedInEvent) + elif name == "serverDescriptionChangedEvent": + self.test.assertIsInstance(actual, ServerDescriptionChangedEvent) + if "previousDescription" in spec: + self.match_server_description( + actual.previous_description, spec["previousDescription"] + ) + if "newDescription" in spec: + self.match_server_description(actual.new_description, spec["newDescription"]) else: - self.test.fail("Unsupported event type %s" % (name,)) + raise Exception("Unsupported event type %s" % (name,)) def coerce_result(opname, result): @@ -805,7 +860,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.9") + SCHEMA_VERSION = Version.from_string("1.10") RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True TEST_SPEC: Any @@ -1339,6 +1394,90 @@ def _testOperation_assertNumberConnectionsCheckedOut(self, spec): pool = get_pool(client) self.assertEqual(spec["connections"], pool.active_sockets) + def _event_count(self, client_name, event): + listener = self.entity_map.get_listener_for_client(client_name) + actual_events = listener.get_events("all") + count = 0 + for actual in actual_events: + try: + self.match_evaluator.match_event("all", event, actual) + except AssertionError: + continue + else: + count += 1 + return count + + def _testOperation_assertEventCount(self, spec): + """Run the assertEventCount test operation. + + Assert the given event was published exactly `count` times. + """ + client, event, count = spec["client"], spec["event"], spec["count"] + self.assertEqual( + self._event_count(client, event), count, "expected %s not %r" % (count, event) + ) + + def _testOperation_waitForEvent(self, spec): + """Run the waitForEvent test operation. + + Wait for a number of events to be published, or fail. + """ + client, event, count = spec["client"], spec["event"], spec["count"] + wait_until( + lambda: self._event_count(client, event) >= count, + "find %s %s event(s)" % (count, event), + ) + + def _testOperation_wait(self, spec): + """Run the "wait" test operation.""" + time.sleep(spec["ms"] / 1000.0) + + def _testOperation_recordTopologyDescription(self, spec): + """Run the recordTopologyDescription test operation.""" + self.entity_map[spec["id"]] = self.entity_map[spec["client"]].topology_description + + def _testOperation_assertTopologyType(self, spec): + """Run the assertTopologyType test operation.""" + description = self.entity_map[spec["topologyDescription"]] + self.assertIsInstance(description, TopologyDescription) + self.assertEqual(description.topology_type_name, spec["topologyType"]) + + def _testOperation_waitForPrimaryChange(self, spec): + """Run the waitForPrimaryChange test operation.""" + client = self.entity_map[spec["client"]] + old_description: TopologyDescription = self.entity_map[spec["priorTopologyDescription"]] + timeout = spec["timeoutMS"] / 1000.0 + + def get_primary(td: TopologyDescription) -> Optional[ServerDescription]: + servers = writable_server_selector(Selection.from_topology_description(td)) + if servers and servers[0].server_type == SERVER_TYPE.RSPrimary: + return servers[0] + return None + + old_primary = get_primary(old_description) + + def primary_changed(): + primary = client.primary + if primary is None: + return False + return primary != old_primary + + wait_until(primary_changed, "change primary", timeout=timeout) + + def _testOperation_runOnThread(self, spec): + """Run the 'runOnThread' operation.""" + thread = self.entity_map[spec["thread"]] + thread.schedule(lambda: self.run_entity_operation(spec["operation"])) + + def _testOperation_waitForThread(self, spec): + """Run the 'waitForThread' operation.""" + thread = self.entity_map[spec["thread"]] + thread.stop() + thread.join(10) + if thread.exc: + raise thread.exc + self.assertFalse(thread.is_alive(), "Thread %s is still running" % (spec["thread"],)) + def _testOperation_loop(self, spec): failure_key = spec.get("storeFailuresAsEntity") error_key = spec.get("storeErrorsAsEntity") @@ -1398,14 +1537,10 @@ def check_events(self, spec): for event_spec in spec: client_name = event_spec["client"] events = event_spec["events"] - # Valid types: 'command', 'cmap' event_type = event_spec.get("eventType", "command") ignore_extra_events = event_spec.get("ignoreExtraEvents", False) server_connection_id = event_spec.get("serverConnectionId") has_server_connection_id = event_spec.get("hasServerConnectionId", False) - - assert event_type in ("command", "cmap") - listener = self.entity_map.get_listener_for_client(client_name) actual_events = listener.get_events(event_type) if ignore_extra_events: From 92a6fa79b66ae1f91691c7540d3f40331195278f Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Wed, 3 Aug 2022 16:53:50 -0700 Subject: [PATCH 0231/1588] PYTHON-3376/PYTHON-3378 Update FAQ about OverflowError when decoding out of range datetimes (#1025) --- doc/examples/datetimes.rst | 52 ++++++++++++++++++++++++++------------ doc/faq.rst | 41 +++++++++++++++++++++++++++--- 2 files changed, 73 insertions(+), 20 deletions(-) diff --git a/doc/examples/datetimes.rst b/doc/examples/datetimes.rst index b9c509e075..f965b9f58c 100644 --- a/doc/examples/datetimes.rst +++ b/doc/examples/datetimes.rst @@ -125,32 +125,52 @@ To decode UTC datetime values as :class:`~bson.datetime_ms.DatetimeMS`, :attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_AUTO`, :attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_CLAMP`. :attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME` is the default -option and has the behavior of raising an exception upon attempting to -decode an out-of-range date. +option and has the behavior of raising an :class:`~builtin.OverflowError` upon +attempting to decode an out-of-range date. :attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_MS` will only return :class:`~bson.datetime_ms.DatetimeMS` objects, regardless of whether the -represented datetime is in- or out-of-range. +represented datetime is in- or out-of-range: + +.. doctest:: + + >>> from datetime import datetime + >>> from bson import encode, decode + >>> from bson.datetime_ms import DatetimeMS + >>> from bson.codec_options import CodecOptions, DatetimeConversionOpts + >>> x = encode({"x": datetime(1970, 1, 1)}) + >>> codec_ms = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_MS) + >>> decode(x, codec_options=codec_ms) + {'x': DatetimeMS(0)} + :attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_AUTO` will return :class:`~datetime.datetime` if the underlying UTC datetime is within range, or :class:`~bson.datetime_ms.DatetimeMS` if the underlying datetime -cannot be represented using the builtin Python :class:`~datetime.datetime`. +cannot be represented using the builtin Python :class:`~datetime.datetime`: + +.. doctest:: + + >>> x = encode({"x": datetime(1970, 1, 1)}) + >>> y = encode({"x": DatetimeMS(-2**62)}) + >>> codec_auto = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_AUTO) + >>> decode(x, codec_options=codec_auto) + {'x': datetime.datetime(1970, 1, 1, 0, 0)} + >>> decode(y, codec_options=codec_auto) + {'x': DatetimeMS(-4611686018427387904)} + :attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_CLAMP` will clamp resulting :class:`~datetime.datetime` objects to be within :attr:`~datetime.datetime.min` and :attr:`~datetime.datetime.max` -(trimmed to `999000` microseconds). - -An example of encoding and decoding using `DATETIME_MS` is as follows: +(trimmed to `999000` microseconds): .. doctest:: - >>> from datetime import datetime - >>> from bson import encode, decode - >>> from bson.datetime_ms import DatetimeMS - >>> from bson.codec_options import CodecOptions,DatetimeConversionOpts - >>> x = encode({"x": datetime(1970, 1, 1)}) - >>> x - b'\x10\x00\x00\x00\tx\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - >>> decode(x, codec_options=CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_MS)) - {'x': DatetimeMS(0)} + + >>> x = encode({"x": DatetimeMS(2**62)}) + >>> y = encode({"x": DatetimeMS(-2**62)}) + >>> codec_clamp = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_CLAMP) + >>> decode(x, codec_options=codec_clamp) + {'x': datetime.datetime(9999, 12, 31, 23, 59, 59, 999000)} + >>> decode(y, codec_options=codec_clamp) + {'x': datetime.datetime(1, 1, 1, 0, 0)} :class:`~bson.datetime_ms.DatetimeMS` objects have support for rich comparison methods against other instances of :class:`~bson.datetime_ms.DatetimeMS`. diff --git a/doc/faq.rst b/doc/faq.rst index ca83f5de4c..5eb39c4276 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -264,7 +264,7 @@ collection, configured to use :class:`~bson.son.SON` instead of dict: >>> from bson import CodecOptions, SON >>> opts = CodecOptions(document_class=SON) >>> opts - CodecOptions(document_class=...SON..., tz_aware=False, uuid_representation=UuidRepresentation.UNSPECIFIED, unicode_decode_error_handler='strict', tzinfo=None, type_registry=TypeRegistry(type_codecs=[], fallback_encoder=None)) + CodecOptions(document_class=...SON..., tz_aware=False, uuid_representation=UuidRepresentation.UNSPECIFIED, unicode_decode_error_handler='strict', tzinfo=None, type_registry=TypeRegistry(type_codecs=[], fallback_encoder=None), datetime_conversion=DatetimeConversionOpts.DATETIME) >>> collection_son = collection.with_options(codec_options=opts) Now, documents and subdocuments in query results are represented with @@ -489,9 +489,42 @@ limited to years between :data:`datetime.MINYEAR` (usually 1) and driver) can store BSON datetimes with year values far outside those supported by :class:`datetime.datetime`. -There are a few ways to work around this issue. One option is to filter -out documents with values outside of the range supported by -:class:`datetime.datetime`:: +There are a few ways to work around this issue. Starting with PyMongo 4.3, +:func:`bson.decode` can decode BSON datetimes in one of four ways, and can +be specified using the ``datetime_conversion`` parameter of +:class:`~bson.codec_options.CodecOptions`. + +The default option is +:attr:`~bson.codec_options.DatetimeConversionOpts.DATETIME`, which will +attempt to decode as a :class:`datetime.datetime`, allowing +:class:`~builtin.OverflowError` to occur upon out-of-range dates. +:attr:`~bson.codec_options.DatetimeConversionOpts.DATETIME_AUTO` alters +this behavior to instead return :class:`~bson.datetime_ms.DatetimeMS` when +representations are out-of-range, while returning :class:`~datetime.datetime` +objects as before: + +.. doctest:: + + >>> from datetime import datetime + >>> from bson.datetime_ms import DatetimeMS + >>> from bson.codec_options import DatetimeConversionOpts + >>> from pymongo import MongoClient + >>> client = MongoClient(datetime_conversion=DatetimeConversionOpts.DATETIME_AUTO) + >>> client.db.collection.insert_one({"x": datetime(1970, 1, 1)}) + + >>> client.db.collection.insert_one({"x": DatetimeMS(2**62)}) + + >>> for x in client.db.collection.find(): + ... print(x) + {'_id': ObjectId('...'), 'x': datetime.datetime(1970, 1, 1, 0, 0)} + {'_id': ObjectId('...'), 'x': DatetimeMS(4611686018427387904)} + +For other options, please refer to +:class:`~bson.codec_options.DatetimeConversionOpts`. + +Another option that does not involve setting `datetime_conversion` is to to +filter out documents values outside of the range supported by +:class:`~datetime.datetime`: >>> from datetime import datetime >>> coll = client.test.dates From 46673c370521330f2705ae83c2b74db2a34fe7e5 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Thu, 4 Aug 2022 12:53:57 -0700 Subject: [PATCH 0232/1588] PYTHON-3379 Refactored DatetimeConversionOpts to DatetimeConversion (#1031) --- bson/__init__.py | 4 ++-- bson/codec_options.py | 6 +++--- bson/codec_options.pyi | 2 +- bson/datetime_ms.py | 20 ++++++++------------ bson/json_util.py | 6 +++--- doc/examples/datetimes.rst | 26 +++++++++++++------------- doc/faq.rst | 12 ++++++------ pymongo/common.py | 16 ++++++++-------- test/test_bson.py | 22 +++++++++------------- test/test_client.py | 10 ++++------ test/test_json_util.py | 4 ++-- 11 files changed, 59 insertions(+), 69 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index 4283faf7dc..b43c686de8 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -99,7 +99,7 @@ from bson.codec_options import ( DEFAULT_CODEC_OPTIONS, CodecOptions, - DatetimeConversionOpts, + DatetimeConversion, _DocumentType, _raw_document_class, ) @@ -194,7 +194,7 @@ "is_valid", "BSON", "has_c", - "DatetimeConversionOpts", + "DatetimeConversion", "DatetimeMS", ] diff --git a/bson/codec_options.py b/bson/codec_options.py index bceab5e003..efba8af78d 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -199,7 +199,7 @@ def __eq__(self, other: Any) -> Any: ) -class DatetimeConversionOpts(enum.IntEnum): +class DatetimeConversion(enum.IntEnum): """Options for decoding BSON datetimes.""" DATETIME = 1 @@ -241,7 +241,7 @@ class _BaseCodecOptions(NamedTuple): unicode_decode_error_handler: str tzinfo: Optional[datetime.tzinfo] type_registry: TypeRegistry - datetime_conversion: Optional[DatetimeConversionOpts] + datetime_conversion: Optional[DatetimeConversion] class CodecOptions(_BaseCodecOptions): @@ -335,7 +335,7 @@ def __new__( unicode_decode_error_handler: str = "strict", tzinfo: Optional[datetime.tzinfo] = None, type_registry: Optional[TypeRegistry] = None, - datetime_conversion: Optional[DatetimeConversionOpts] = DatetimeConversionOpts.DATETIME, + datetime_conversion: Optional[DatetimeConversion] = DatetimeConversion.DATETIME, ) -> "CodecOptions": doc_class = document_class or dict # issubclass can raise TypeError for generic aliases like SON[str, Any]. diff --git a/bson/codec_options.pyi b/bson/codec_options.pyi index 260407524f..2424516f08 100644 --- a/bson/codec_options.pyi +++ b/bson/codec_options.pyi @@ -55,7 +55,7 @@ class TypeRegistry: _DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) -class DatetimeConversionOpts(int, enum.Enum): +class DatetimeConversion(int, enum.Enum): DATETIME = ... DATETIME_CLAMP = ... DATETIME_MS = ... diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py index 925087a5aa..c64a0cce87 100644 --- a/bson/datetime_ms.py +++ b/bson/datetime_ms.py @@ -22,11 +22,7 @@ import functools from typing import Any, Union, cast -from bson.codec_options import ( - DEFAULT_CODEC_OPTIONS, - CodecOptions, - DatetimeConversionOpts, -) +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, DatetimeConversion from bson.tz_util import utc EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) @@ -127,14 +123,14 @@ def _max_datetime_ms(tz=datetime.timezone.utc): def _millis_to_datetime(millis: int, opts: CodecOptions) -> Union[datetime.datetime, DatetimeMS]: """Convert milliseconds since epoch UTC to datetime.""" if ( - opts.datetime_conversion == DatetimeConversionOpts.DATETIME - or opts.datetime_conversion == DatetimeConversionOpts.DATETIME_CLAMP - or opts.datetime_conversion == DatetimeConversionOpts.DATETIME_AUTO + opts.datetime_conversion == DatetimeConversion.DATETIME + or opts.datetime_conversion == DatetimeConversion.DATETIME_CLAMP + or opts.datetime_conversion == DatetimeConversion.DATETIME_AUTO ): tz = opts.tzinfo or datetime.timezone.utc - if opts.datetime_conversion == DatetimeConversionOpts.DATETIME_CLAMP: + if opts.datetime_conversion == DatetimeConversion.DATETIME_CLAMP: millis = max(_min_datetime_ms(tz), min(millis, _max_datetime_ms(tz))) - elif opts.datetime_conversion == DatetimeConversionOpts.DATETIME_AUTO: + elif opts.datetime_conversion == DatetimeConversion.DATETIME_AUTO: if not (_min_datetime_ms(tz) <= millis <= _max_datetime_ms(tz)): return DatetimeMS(millis) @@ -149,10 +145,10 @@ def _millis_to_datetime(millis: int, opts: CodecOptions) -> Union[datetime.datet return dt else: return EPOCH_NAIVE + datetime.timedelta(seconds=seconds, microseconds=micros) - elif opts.datetime_conversion == DatetimeConversionOpts.DATETIME_MS: + elif opts.datetime_conversion == DatetimeConversion.DATETIME_MS: return DatetimeMS(millis) else: - raise ValueError("datetime_conversion must be an element of DatetimeConversionOpts") + raise ValueError("datetime_conversion must be an element of DatetimeConversion") def _datetime_to_millis(dtm: datetime.datetime) -> int: diff --git a/bson/json_util.py b/bson/json_util.py index 0b5494e85c..517adff4e0 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -96,7 +96,7 @@ from bson.binary import ALL_UUID_SUBTYPES, UUID_SUBTYPE, Binary, UuidRepresentation from bson.code import Code -from bson.codec_options import CodecOptions, DatetimeConversionOpts +from bson.codec_options import CodecOptions, DatetimeConversion from bson.datetime_ms import ( EPOCH_AWARE, DatetimeMS, @@ -662,12 +662,12 @@ def _parse_canonical_datetime( if json_options.tz_aware: if json_options.tzinfo: aware = aware.astimezone(json_options.tzinfo) - if json_options.datetime_conversion == DatetimeConversionOpts.DATETIME_MS: + if json_options.datetime_conversion == DatetimeConversion.DATETIME_MS: return DatetimeMS(aware) return aware else: aware_tzinfo_none = aware.replace(tzinfo=None) - if json_options.datetime_conversion == DatetimeConversionOpts.DATETIME_MS: + if json_options.datetime_conversion == DatetimeConversion.DATETIME_MS: return DatetimeMS(aware_tzinfo_none) return aware_tzinfo_none return _millis_to_datetime(int(dtm), json_options) diff --git a/doc/examples/datetimes.rst b/doc/examples/datetimes.rst index f965b9f58c..3b30000ffc 100644 --- a/doc/examples/datetimes.rst +++ b/doc/examples/datetimes.rst @@ -119,15 +119,15 @@ of milliseconds from the Unix epoch. To deal with this, we can use the To decode UTC datetime values as :class:`~bson.datetime_ms.DatetimeMS`, :class:`~bson.codec_options.CodecOptions` should have its ``datetime_conversion`` parameter set to one of the options available in -:class:`bson.datetime_ms.DatetimeConversionOpts`. These include -:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME`, -:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_MS`, -:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_AUTO`, -:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_CLAMP`. -:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME` is the default +:class:`bson.datetime_ms.DatetimeConversion`. These include +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME`, +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_MS`, +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_AUTO`, +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_CLAMP`. +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME` is the default option and has the behavior of raising an :class:`~builtin.OverflowError` upon attempting to decode an out-of-range date. -:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_MS` will only return +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_MS` will only return :class:`~bson.datetime_ms.DatetimeMS` objects, regardless of whether the represented datetime is in- or out-of-range: @@ -136,13 +136,13 @@ represented datetime is in- or out-of-range: >>> from datetime import datetime >>> from bson import encode, decode >>> from bson.datetime_ms import DatetimeMS - >>> from bson.codec_options import CodecOptions, DatetimeConversionOpts + >>> from bson.codec_options import CodecOptions, DatetimeConversion >>> x = encode({"x": datetime(1970, 1, 1)}) - >>> codec_ms = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_MS) + >>> codec_ms = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_MS) >>> decode(x, codec_options=codec_ms) {'x': DatetimeMS(0)} -:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_AUTO` will return +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_AUTO` will return :class:`~datetime.datetime` if the underlying UTC datetime is within range, or :class:`~bson.datetime_ms.DatetimeMS` if the underlying datetime cannot be represented using the builtin Python :class:`~datetime.datetime`: @@ -151,13 +151,13 @@ cannot be represented using the builtin Python :class:`~datetime.datetime`: >>> x = encode({"x": datetime(1970, 1, 1)}) >>> y = encode({"x": DatetimeMS(-2**62)}) - >>> codec_auto = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_AUTO) + >>> codec_auto = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_AUTO) >>> decode(x, codec_options=codec_auto) {'x': datetime.datetime(1970, 1, 1, 0, 0)} >>> decode(y, codec_options=codec_auto) {'x': DatetimeMS(-4611686018427387904)} -:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_CLAMP` will clamp +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_CLAMP` will clamp resulting :class:`~datetime.datetime` objects to be within :attr:`~datetime.datetime.min` and :attr:`~datetime.datetime.max` (trimmed to `999000` microseconds): @@ -166,7 +166,7 @@ resulting :class:`~datetime.datetime` objects to be within >>> x = encode({"x": DatetimeMS(2**62)}) >>> y = encode({"x": DatetimeMS(-2**62)}) - >>> codec_clamp = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_CLAMP) + >>> codec_clamp = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP) >>> decode(x, codec_options=codec_clamp) {'x': datetime.datetime(9999, 12, 31, 23, 59, 59, 999000)} >>> decode(y, codec_options=codec_clamp) diff --git a/doc/faq.rst b/doc/faq.rst index 5eb39c4276..c48dd316e5 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -264,7 +264,7 @@ collection, configured to use :class:`~bson.son.SON` instead of dict: >>> from bson import CodecOptions, SON >>> opts = CodecOptions(document_class=SON) >>> opts - CodecOptions(document_class=...SON..., tz_aware=False, uuid_representation=UuidRepresentation.UNSPECIFIED, unicode_decode_error_handler='strict', tzinfo=None, type_registry=TypeRegistry(type_codecs=[], fallback_encoder=None), datetime_conversion=DatetimeConversionOpts.DATETIME) + CodecOptions(document_class=...SON..., tz_aware=False, uuid_representation=UuidRepresentation.UNSPECIFIED, unicode_decode_error_handler='strict', tzinfo=None, type_registry=TypeRegistry(type_codecs=[], fallback_encoder=None), datetime_conversion=DatetimeConversion.DATETIME) >>> collection_son = collection.with_options(codec_options=opts) Now, documents and subdocuments in query results are represented with @@ -495,10 +495,10 @@ be specified using the ``datetime_conversion`` parameter of :class:`~bson.codec_options.CodecOptions`. The default option is -:attr:`~bson.codec_options.DatetimeConversionOpts.DATETIME`, which will +:attr:`~bson.codec_options.DatetimeConversion.DATETIME`, which will attempt to decode as a :class:`datetime.datetime`, allowing :class:`~builtin.OverflowError` to occur upon out-of-range dates. -:attr:`~bson.codec_options.DatetimeConversionOpts.DATETIME_AUTO` alters +:attr:`~bson.codec_options.DatetimeConversion.DATETIME_AUTO` alters this behavior to instead return :class:`~bson.datetime_ms.DatetimeMS` when representations are out-of-range, while returning :class:`~datetime.datetime` objects as before: @@ -507,9 +507,9 @@ objects as before: >>> from datetime import datetime >>> from bson.datetime_ms import DatetimeMS - >>> from bson.codec_options import DatetimeConversionOpts + >>> from bson.codec_options import DatetimeConversion >>> from pymongo import MongoClient - >>> client = MongoClient(datetime_conversion=DatetimeConversionOpts.DATETIME_AUTO) + >>> client = MongoClient(datetime_conversion=DatetimeConversion.DATETIME_AUTO) >>> client.db.collection.insert_one({"x": datetime(1970, 1, 1)}) >>> client.db.collection.insert_one({"x": DatetimeMS(2**62)}) @@ -520,7 +520,7 @@ objects as before: {'_id': ObjectId('...'), 'x': DatetimeMS(4611686018427387904)} For other options, please refer to -:class:`~bson.codec_options.DatetimeConversionOpts`. +:class:`~bson.codec_options.DatetimeConversion`. Another option that does not involve setting `datetime_conversion` is to to filter out documents values outside of the range supported by diff --git a/pymongo/common.py b/pymongo/common.py index 319b07193c..add70cfb5f 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -36,7 +36,7 @@ from bson import SON from bson.binary import UuidRepresentation -from bson.codec_options import CodecOptions, DatetimeConversionOpts, TypeRegistry +from bson.codec_options import CodecOptions, DatetimeConversion, TypeRegistry from bson.raw_bson import RawBSONDocument from pymongo.auth import MECHANISMS from pymongo.compression_support import ( @@ -620,19 +620,19 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A return value -def validate_datetime_conversion(option: Any, value: Any) -> Optional[DatetimeConversionOpts]: - """Validate a DatetimeConversionOpts string.""" +def validate_datetime_conversion(option: Any, value: Any) -> Optional[DatetimeConversion]: + """Validate a DatetimeConversion string.""" if value is None: - return DatetimeConversionOpts.DATETIME + return DatetimeConversion.DATETIME if isinstance(value, str): if value.isdigit(): - return DatetimeConversionOpts(int(value)) - return DatetimeConversionOpts[value] + return DatetimeConversion(int(value)) + return DatetimeConversion[value] elif isinstance(value, int): - return DatetimeConversionOpts(value) + return DatetimeConversion(value) - raise TypeError("%s must be a str or int representing DatetimeConversionOpts" % (option,)) + raise TypeError("%s must be a str or int representing DatetimeConversion" % (option,)) # Dictionary where keys are the names of public URI options, and values diff --git a/test/test_bson.py b/test/test_bson.py index 7fe0c168c6..e3c4a3a028 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -50,7 +50,7 @@ ) from bson.binary import Binary, UuidRepresentation from bson.code import Code -from bson.codec_options import CodecOptions, DatetimeConversionOpts +from bson.codec_options import CodecOptions, DatetimeConversion from bson.dbref import DBRef from bson.errors import InvalidBSON, InvalidDocument from bson.int64 import Int64 @@ -981,7 +981,7 @@ def test_codec_options_repr(self): "unicode_decode_error_handler='strict', " "tzinfo=None, type_registry=TypeRegistry(type_codecs=[], " "fallback_encoder=None), " - "datetime_conversion=DatetimeConversionOpts.DATETIME)" + "datetime_conversion=DatetimeConversion.DATETIME)" ) self.assertEqual(r, repr(CodecOptions())) @@ -1189,14 +1189,14 @@ def test_class_conversions(self): self.assertNotEqual(type(dtr1), type(dec1["x"])) # Test encode and decode with codec options. Expect: UTCDateimteRaw => DatetimeMS - opts1 = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_MS) + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_MS) enc1 = encode({"x": dtr1}) dec1 = decode(enc1, opts1) self.assertEqual(type(dtr1), type(dec1["x"])) self.assertEqual(dtr1, dec1["x"]) # Expect: datetime => DatetimeMS - opts1 = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_MS) + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_MS) dt1 = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) enc1 = encode({"x": dt1}) dec1 = decode(enc1, opts1) @@ -1206,7 +1206,7 @@ def test_class_conversions(self): def test_clamping(self): # Test clamping from below and above. opts1 = CodecOptions( - datetime_conversion=DatetimeConversionOpts.DATETIME_CLAMP, + datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=True, tzinfo=datetime.timezone.utc, ) @@ -1225,9 +1225,7 @@ def test_clamping(self): def test_tz_clamping(self): # Naive clamping to local tz. - opts1 = CodecOptions( - datetime_conversion=DatetimeConversionOpts.DATETIME_CLAMP, tz_aware=False - ) + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=False) below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) dec_below = decode(below, opts1) @@ -1241,9 +1239,7 @@ def test_tz_clamping(self): ) # Aware clamping. - opts2 = CodecOptions( - datetime_conversion=DatetimeConversionOpts.DATETIME_CLAMP, tz_aware=True - ) + opts2 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=True) below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) dec_below = decode(below, opts2) self.assertEqual( @@ -1259,7 +1255,7 @@ def test_tz_clamping(self): def test_datetime_auto(self): # Naive auto, in range. - opts1 = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_AUTO) + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_AUTO) inr = encode({"x": datetime.datetime(1970, 1, 1)}, codec_options=opts1) dec_inr = decode(inr) self.assertEqual(dec_inr["x"], datetime.datetime(1970, 1, 1)) @@ -1281,7 +1277,7 @@ def test_datetime_auto(self): # Aware auto, in range. opts2 = CodecOptions( - datetime_conversion=DatetimeConversionOpts.DATETIME_AUTO, + datetime_conversion=DatetimeConversion.DATETIME_AUTO, tz_aware=True, tzinfo=datetime.timezone.utc, ) diff --git a/test/test_client.py b/test/test_client.py index f520043ecf..7e7e14c0e5 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -67,7 +67,7 @@ from bson import encode from bson.codec_options import ( CodecOptions, - DatetimeConversionOpts, + DatetimeConversion, TypeEncoder, TypeRegistry, ) @@ -412,17 +412,15 @@ def test_uri_codec_options(self): ) self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) self.assertEqual( - c.codec_options.datetime_conversion, DatetimeConversionOpts[datetime_conversion] + c.codec_options.datetime_conversion, DatetimeConversion[datetime_conversion] ) # Change the passed datetime_conversion to a number and re-assert. - uri = uri.replace( - datetime_conversion, f"{int(DatetimeConversionOpts[datetime_conversion])}" - ) + uri = uri.replace(datetime_conversion, f"{int(DatetimeConversion[datetime_conversion])}") c = MongoClient(uri, connect=False) self.assertEqual( - c.codec_options.datetime_conversion, DatetimeConversionOpts[datetime_conversion] + c.codec_options.datetime_conversion, DatetimeConversion[datetime_conversion] ) def test_uri_option_precedence(self): diff --git a/test/test_json_util.py b/test/test_json_util.py index 576746e865..08ee63618f 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -21,7 +21,7 @@ import uuid from typing import Any, List, MutableMapping -from bson.codec_options import CodecOptions, DatetimeConversionOpts +from bson.codec_options import CodecOptions, DatetimeConversion sys.path[0:0] = [""] @@ -295,7 +295,7 @@ def test_datetime_ms(self): dat_max = {"x": DatetimeMS(_max_datetime_ms()).as_datetime(CodecOptions(tz_aware=False))} opts = JSONOptions( datetime_representation=DatetimeRepresentation.ISO8601, - datetime_conversion=DatetimeConversionOpts.DATETIME_MS, + datetime_conversion=DatetimeConversion.DATETIME_MS, ) self.assertEqual( From 3204290e93594f8ffe537ca5e7ed071d4c13056e Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Thu, 4 Aug 2022 16:58:56 -0700 Subject: [PATCH 0233/1588] PYTHON-2484 Added lock sanitization for MongoClient and ObjectId (#985) --- bson/__init__.py | 14 +++++ pymongo/cursor.py | 4 +- pymongo/lock.py | 39 ++++++++++++++ pymongo/mongo_client.py | 54 +++++++++++++++---- pymongo/monitor.py | 4 +- pymongo/ocsp_cache.py | 5 +- pymongo/periodic_executor.py | 5 +- pymongo/pool.py | 3 +- pymongo/topology.py | 17 +++--- test/test_fork.py | 100 +++++++++++++++++++++++++++++++++++ test/utils.py | 3 +- 11 files changed, 219 insertions(+), 29 deletions(-) create mode 100644 pymongo/lock.py create mode 100644 test/test_fork.py diff --git a/bson/__init__.py b/bson/__init__.py index b43c686de8..dc2e29238a 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -56,6 +56,7 @@ import datetime import itertools +import os import re import struct import sys @@ -1336,3 +1337,16 @@ def decode(self, codec_options: "CodecOptions[_DocumentType]" = DEFAULT_CODEC_OP def has_c() -> bool: """Is the C extension installed?""" return _USE_C + + +def _after_fork(): + """Releases the ObjectID lock child.""" + if ObjectId._inc_lock.locked(): + ObjectId._inc_lock.release() + + +if hasattr(os, "register_at_fork"): + # This will run in the same thread as the fork was called. + # If we fork in a critical region on the same thread, it should break. + # This is fine since we would never call fork directly from a critical region. + os.register_at_fork(after_in_child=_after_fork) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 2a85f1d82a..658c4276ef 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -14,7 +14,6 @@ """Cursor class to iterate over Mongo query results.""" import copy -import threading import warnings from collections import deque from typing import ( @@ -45,6 +44,7 @@ validate_is_mapping, ) from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure +from pymongo.lock import _create_lock from pymongo.message import ( _CursorAddress, _GetMore, @@ -133,7 +133,7 @@ def __init__(self, sock, more_to_come): self.sock = sock self.more_to_come = more_to_come self.closed = False - self.lock = threading.Lock() + self.lock = _create_lock() def update_exhaust(self, more_to_come): self.more_to_come = more_to_come diff --git a/pymongo/lock.py b/pymongo/lock.py new file mode 100644 index 0000000000..b7c01f56b7 --- /dev/null +++ b/pymongo/lock.py @@ -0,0 +1,39 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import threading +import weakref + +_HAS_REGISTER_AT_FORK = hasattr(os, "register_at_fork") + +# References to instances of _create_lock +_forkable_locks: weakref.WeakSet = weakref.WeakSet() + + +def _create_lock(): + """Represents a lock that is tracked upon instantiation using a WeakSet and + reset by pymongo upon forking. + """ + lock = threading.Lock() + if _HAS_REGISTER_AT_FORK: + _forkable_locks.add(lock) + return lock + + +def _release_locks() -> None: + # Completed the fork, reset all the locks in the child. + for lock in _forkable_locks: + if lock.locked(): + lock.release() diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index fd4c0e84bc..c8330f32d0 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -32,7 +32,7 @@ """ import contextlib -import threading +import os import weakref from collections import defaultdict from typing import ( @@ -82,6 +82,7 @@ ServerSelectionTimeoutError, WaitQueueTimeoutError, ) +from pymongo.lock import _create_lock, _release_locks from pymongo.pool import ConnectionClosedReason from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.server_selectors import writable_server_selector @@ -126,6 +127,7 @@ class MongoClient(common.BaseObject, Generic[_DocumentType]): # Define order to retrieve options from ClientOptions for __repr__. # No host/port; these are retrieved from TopologySettings. _constructor_args = ("document_class", "tz_aware", "connect") + _clients: weakref.WeakValueDictionary = weakref.WeakValueDictionary() def __init__( self, @@ -788,7 +790,7 @@ def __init__( self.__options = options = ClientOptions(username, password, dbase, opts) self.__default_database_name = dbase - self.__lock = threading.Lock() + self.__lock = _create_lock() self.__kill_cursors_queue: List = [] self._event_listeners = options.pool_options._event_listeners @@ -817,6 +819,23 @@ def __init__( srv_max_hosts=srv_max_hosts, ) + self._init_background() + + if connect: + self._get_topology() + + self._encrypter = None + if self.__options.auto_encryption_opts: + from pymongo.encryption import _Encrypter + + self._encrypter = _Encrypter(self, self.__options.auto_encryption_opts) + self._timeout = self.__options.timeout + + # Add this client to the list of weakly referenced items. + # This will be used later if we fork. + MongoClient._clients[self._topology._topology_id] = self + + def _init_background(self): self._topology = Topology(self._topology_settings) def target(): @@ -838,15 +857,9 @@ def target(): self_ref: Any = weakref.ref(self, executor.close) self._kill_cursors_executor = executor - if connect: - self._get_topology() - - self._encrypter = None - if self.__options.auto_encryption_opts: - from pymongo.encryption import _Encrypter - - self._encrypter = _Encrypter(self, self.__options.auto_encryption_opts) - self._timeout = options.timeout + def _after_fork(self): + """Resets topology in a child after successfully forking.""" + self._init_background() def _duplicate(self, **kwargs): args = self.__init_kwargs.copy() @@ -2150,3 +2163,22 @@ def __enter__(self): def __exit__(self, exc_type, exc_val, exc_tb): return self.handle(exc_type, exc_val) + + +def _after_fork_child(): + """Releases the locks in child process and resets the + topologies in all MongoClients. + """ + # Reinitialize locks + _release_locks() + + # Perform cleanup in clients (i.e. get rid of topology) + for _, client in MongoClient._clients.items(): + client._after_fork() + + +if hasattr(os, "register_at_fork"): + # This will run in the same thread as the fork was called. + # If we fork in a critical region on the same thread, it should break. + # This is fine since we would never call fork directly from a critical region. + os.register_at_fork(after_in_child=_after_fork_child) diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 844ad02262..b7d2b19118 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -15,7 +15,6 @@ """Class to monitor a MongoDB server on a background thread.""" import atexit -import threading import time import weakref from typing import Any, Mapping, cast @@ -23,6 +22,7 @@ from pymongo import common, periodic_executor from pymongo.errors import NotPrimaryError, OperationFailure, _OperationCancelled from pymongo.hello import Hello +from pymongo.lock import _create_lock from pymongo.periodic_executor import _shutdown_executors from pymongo.read_preferences import MovingAverage from pymongo.server_description import ServerDescription @@ -350,7 +350,7 @@ def __init__(self, topology, topology_settings, pool): self._pool = pool self._moving_average = MovingAverage() - self._lock = threading.Lock() + self._lock = _create_lock() def close(self): self.gc_safe_close() diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py index 24507260ed..389ee09ce7 100644 --- a/pymongo/ocsp_cache.py +++ b/pymongo/ocsp_cache.py @@ -16,7 +16,8 @@ from collections import namedtuple from datetime import datetime as _datetime -from threading import Lock + +from pymongo.lock import _create_lock class _OCSPCache(object): @@ -30,7 +31,7 @@ class _OCSPCache(object): def __init__(self): self._data = {} # Hold this lock when accessing _data. - self._lock = Lock() + self._lock = _create_lock() def _get_cache_key(self, ocsp_request): return self.CACHE_KEY_TYPE( diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index 2c3727a7a3..95e7830674 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -19,6 +19,8 @@ import weakref from typing import Any, Optional +from pymongo.lock import _create_lock + class PeriodicExecutor(object): def __init__(self, interval, min_interval, target, name=None): @@ -45,9 +47,8 @@ def __init__(self, interval, min_interval, target, name=None): self._thread: Optional[threading.Thread] = None self._name = name self._skip_sleep = False - self._thread_will_exit = False - self._lock = threading.Lock() + self._lock = _create_lock() def __repr__(self): return "<%s(name=%s) object at 0x%x>" % (self.__class__.__name__, self._name, id(self)) diff --git a/pymongo/pool.py b/pymongo/pool.py index 1fab98209f..6355692ac9 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -56,6 +56,7 @@ _CertificateError, ) from pymongo.hello import Hello, HelloCompat +from pymongo.lock import _create_lock from pymongo.monitoring import ConnectionCheckOutFailedReason, ConnectionClosedReason from pymongo.network import command, receive_message from pymongo.read_preferences import ReadPreference @@ -1152,7 +1153,7 @@ def __init__(self, address, options, handshake=True): # and returned to pool from the left side. Stale sockets removed # from the right side. self.sockets: collections.deque = collections.deque() - self.lock = threading.Lock() + self.lock = _create_lock() self.active_sockets = 0 # Monotonically increasing connection ID required for CMAP Events. self.next_connection_id = 1 diff --git a/pymongo/topology.py b/pymongo/topology.py index 6781a9e549..84975ca076 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -17,7 +17,6 @@ import os import queue import random -import threading import time import warnings import weakref @@ -37,6 +36,7 @@ WriteError, ) from pymongo.hello import Hello +from pymongo.lock import _HAS_REGISTER_AT_FORK, _create_lock from pymongo.monitor import SrvMonitor from pymongo.pool import PoolOptions from pymongo.server import Server @@ -127,7 +127,7 @@ def __init__(self, topology_settings): self._seed_addresses = list(topology_description.server_descriptions()) self._opened = False self._closed = False - self._lock = threading.Lock() + self._lock = _create_lock() self._condition = self._settings.condition_class(self._lock) self._servers = {} self._pid = None @@ -174,12 +174,13 @@ def open(self): self._pid = pid elif pid != self._pid: self._pid = pid - warnings.warn( - "MongoClient opened before fork. Create MongoClient only " - "after forking. See PyMongo's documentation for details: " - "https://pymongo.readthedocs.io/en/stable/faq.html#" - "is-pymongo-fork-safe" - ) + if not _HAS_REGISTER_AT_FORK: + warnings.warn( + "MongoClient opened before fork. May not be entirely fork-safe, " + "proceed with caution. See PyMongo's documentation for details: " + "https://pymongo.readthedocs.io/en/stable/faq.html#" + "is-pymongo-fork-safe" + ) with self._lock: # Close servers and clear the pools. for server in self._servers.values(): diff --git a/test/test_fork.py b/test/test_fork.py new file mode 100644 index 0000000000..7180e1a239 --- /dev/null +++ b/test/test_fork.py @@ -0,0 +1,100 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that pymongo is fork safe.""" + +import os +from multiprocessing import Pipe +from test import IntegrationTest, client_context +from unittest import skipIf + +from bson.objectid import ObjectId + + +@client_context.require_connection +def setUpModule(): + pass + + +# Not available for versions of Python without "register_at_fork" +@skipIf( + not hasattr(os, "register_at_fork"), "register_at_fork not available in this version of Python" +) +class TestFork(IntegrationTest): + def test_lock_client(self): + """ + Forks the client with some items locked. + Parent => All locks should be as before the fork. + Child => All locks should be reset. + """ + + def exit_cond(): + self.client.admin.command("ping") + return 0 + + with self.client._MongoClient__lock: + # Call _get_topology, will launch a thread to fork upon __enter__ing + # the with region. + lock_pid = os.fork() + # The POSIX standard states only the forking thread is cloned. + # In the parent, it'll return here. + # In the child, it'll end with the calling thread. + if lock_pid == 0: + os._exit(exit_cond()) + else: + self.assertEqual(0, os.waitpid(lock_pid, 0)[1] >> 8) + + def test_lock_object_id(self): + """ + Forks the client with ObjectId's _inc_lock locked. + Parent => _inc_lock should remain locked. + Child => _inc_lock should be unlocked. + """ + with ObjectId._inc_lock: + lock_pid: int = os.fork() + + if lock_pid == 0: + os._exit(int(ObjectId._inc_lock.locked())) + else: + self.assertEqual(0, os.waitpid(lock_pid, 0)[1] >> 8) + + def test_topology_reset(self): + """ + Tests that topologies are different from each other. + Cannot use ID because virtual memory addresses may be the same. + Cannot reinstantiate ObjectId in the topology settings. + Relies on difference in PID when opened again. + """ + parent_conn, child_conn = Pipe() + init_id = self.client._topology._pid + parent_cursor_exc = self.client._kill_cursors_executor + lock_pid: int = os.fork() + + if lock_pid == 0: # Child + self.client.admin.command("ping") + child_conn.send(self.client._topology._pid) + child_conn.send( + ( + parent_cursor_exc != self.client._kill_cursors_executor, + "client._kill_cursors_executor was not reinitialized", + ) + ) + os._exit(0) + else: # Parent + self.assertEqual(0, os.waitpid(lock_pid, 0)[1] >> 8) + self.assertEqual(self.client._topology._pid, init_id) + child_id = parent_conn.recv() + self.assertNotEqual(child_id, init_id) + passed, msg = parent_conn.recv() + self.assertTrue(passed, msg) diff --git a/test/utils.py b/test/utils.py index 29ee1ca477..73003585c3 100644 --- a/test/utils.py +++ b/test/utils.py @@ -38,6 +38,7 @@ from pymongo.cursor import CursorType from pymongo.errors import ConfigurationError, OperationFailure from pymongo.hello import HelloCompat +from pymongo.lock import _create_lock from pymongo.monitoring import ( _SENSITIVE_COMMANDS, ConnectionCheckedInEvent, @@ -279,7 +280,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): class MockPool(object): def __init__(self, address, options, handshake=True): self.gen = _PoolGeneration() - self._lock = threading.Lock() + self._lock = _create_lock() self.opts = options self.operation_count = 0 From c0dadcb6ca77177db7120c0c5712f251ea637d84 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 12 Aug 2022 13:53:07 -0500 Subject: [PATCH 0234/1588] PYTHON-3385 Add prose test for RewrapManyDataKey (#1034) --- test/test_encryption.py | 82 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 81 insertions(+), 1 deletion(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index 94a588bd6a..00f76b7c95 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -24,7 +24,7 @@ import textwrap import traceback import uuid -from typing import Any, Dict +from typing import Any, Dict, Mapping from pymongo.collection import Collection @@ -2202,6 +2202,86 @@ def test_05_roundtrip_encrypted_unindexed(self): self.assertEqual(decrypted, val) +# https://github.com/mongodb/specifications/blob/072601/source/client-side-encryption/tests/README.rst#rewrap +class TestRewrapWithSeparateClientEncryption(EncryptionIntegrationTest): + + MASTER_KEYS: Mapping[str, Mapping[str, Any]] = { + "aws": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + }, + "azure": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + }, + "gcp": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + }, + "kmip": {}, + "local": {}, + } + + def test_rewrap(self): + for src_provider in self.MASTER_KEYS: + for dst_provider in self.MASTER_KEYS: + with self.subTest(src_provider=src_provider, dst_provider=dst_provider): + self.run_test(src_provider, dst_provider) + + def run_test(self, src_provider, dst_provider): + # Step 1. Drop the collection ``keyvault.datakeys``. + self.client.keyvault.drop_collection("datakeys") + + # Step 2. Create a ``ClientEncryption`` object named ``client_encryption1`` + client_encryption1 = ClientEncryption( + key_vault_client=self.client, + key_vault_namespace="keyvault.datakeys", + kms_providers=ALL_KMS_PROVIDERS, + kms_tls_options=KMS_TLS_OPTS, + codec_options=OPTS, + ) + self.addCleanup(client_encryption1.close) + + # Step 3. Call ``client_encryption1.create_data_key`` with ``src_provider``. + key_id = client_encryption1.create_data_key( + master_key=self.MASTER_KEYS[src_provider], kms_provider=src_provider + ) + + # Step 4. Call ``client_encryption1.encrypt`` with the value "test" + cipher_text = client_encryption1.encrypt( + "test", key_id=key_id, algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + ) + + # Step 5. Create a ``ClientEncryption`` object named ``client_encryption2`` + client2 = MongoClient() + self.addCleanup(client2.close) + client_encryption2 = ClientEncryption( + key_vault_client=client2, + key_vault_namespace="keyvault.datakeys", + kms_providers=ALL_KMS_PROVIDERS, + kms_tls_options=KMS_TLS_OPTS, + codec_options=OPTS, + ) + self.addCleanup(client_encryption1.close) + + # Step 6. Call ``client_encryption2.rewrap_many_data_key`` with an empty ``filter``. + rewrap_many_data_key_result = client_encryption2.rewrap_many_data_key( + {}, provider=dst_provider, master_key=self.MASTER_KEYS[dst_provider] + ) + + self.assertEqual(rewrap_many_data_key_result.bulk_write_result.modified_count, 1) + + # 7. Call ``client_encryption1.decrypt`` with the ``cipher_text``. Assert the return value is "test". + decrypt_result1 = client_encryption1.decrypt(cipher_text) + self.assertEqual(decrypt_result1, "test") + + # 8. Call ``client_encryption2.decrypt`` with the ``cipher_text``. Assert the return value is "test". + decrypt_result2 = client_encryption2.decrypt(cipher_text) + self.assertEqual(decrypt_result2, "test") + + class TestQueryableEncryptionDocsExample(EncryptionIntegrationTest): # Queryable Encryption is not supported on Standalone topology. @client_context.require_no_standalone From a20ff68d51734d272542d03329b657101a093806 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Mon, 15 Aug 2022 12:07:49 -0700 Subject: [PATCH 0235/1588] PYTHON-3390 Test for encrypted client post-fork (#1037) --- test/test_encryption.py | 19 +++++++++++++++++++ test/test_fork.py | 6 +++--- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index 00f76b7c95..e4372d7e5a 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -329,6 +329,25 @@ def test_use_after_close(self): with self.assertRaisesRegex(InvalidOperation, "Cannot use MongoClient after close"): client.admin.command("ping") + # Not available for versions of Python without "register_at_fork" + @unittest.skipIf( + not hasattr(os, "register_at_fork"), + "register_at_fork not available in this version of Python", + ) + def test_fork(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = rs_or_single_client(auto_encryption_opts=opts) + + lock_pid = os.fork() + if lock_pid == 0: + client.admin.command("ping") + client.close() + os._exit(0) + else: + self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) + client.admin.command("ping") + client.close() + class TestEncryptedBulkWrite(BulkTestBase, EncryptionIntegrationTest): def test_upsert_uuid_standard_encrypt(self): diff --git a/test/test_fork.py b/test/test_fork.py index 7180e1a239..b1c98a26f1 100644 --- a/test/test_fork.py +++ b/test/test_fork.py @@ -53,7 +53,7 @@ def exit_cond(): if lock_pid == 0: os._exit(exit_cond()) else: - self.assertEqual(0, os.waitpid(lock_pid, 0)[1] >> 8) + self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) def test_lock_object_id(self): """ @@ -67,7 +67,7 @@ def test_lock_object_id(self): if lock_pid == 0: os._exit(int(ObjectId._inc_lock.locked())) else: - self.assertEqual(0, os.waitpid(lock_pid, 0)[1] >> 8) + self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) def test_topology_reset(self): """ @@ -92,7 +92,7 @@ def test_topology_reset(self): ) os._exit(0) else: # Parent - self.assertEqual(0, os.waitpid(lock_pid, 0)[1] >> 8) + self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) self.assertEqual(self.client._topology._pid, init_id) child_id = parent_conn.recv() self.assertNotEqual(child_id, init_id) From 6d2e27a1b743e4c3747975e26b8b587c2faae428 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 15 Aug 2022 21:18:44 -0700 Subject: [PATCH 0236/1588] PYTHON-3355 Test with consistent versions of crypt_shared and server (#1033) --- .evergreen/config.yml | 6 +++++- .evergreen/run-tests.sh | 8 +++++++- .../spec/legacy/fle2-InsertFind-Unindexed.json | 2 +- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index ac7f97f6fa..6acb6e3b74 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -474,7 +474,8 @@ functions: export MULTI_MONGOS_LB_URI="${MONGODB_URI}" fi - PYTHON_BINARY=${PYTHON_BINARY} \ + MONGODB_VERSION=${VERSION} \ + PYTHON_BINARY=${PYTHON_BINARY} \ GREEN_FRAMEWORK=${GREEN_FRAMEWORK} \ C_EXTENSIONS=${C_EXTENSIONS} \ COVERAGE=${COVERAGE} \ @@ -2316,6 +2317,7 @@ buildvariants: encryption: [ "encryption_crypt_shared" ] then: remove_tasks: + - ".rapid" - ".5.0" - ".4.4" - ".4.2" @@ -2410,6 +2412,7 @@ buildvariants: encryption: [ "encryption_crypt_shared" ] then: remove_tasks: + - ".rapid" - ".5.0" - ".4.4" - ".4.2" @@ -2519,6 +2522,7 @@ buildvariants: encryption: [ "encryption_crypt_shared" ] then: remove_tasks: + - ".rapid" - ".5.0" - ".4.4" - ".4.2" diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 4367bad246..9a0eb25e00 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -26,6 +26,7 @@ GREEN_FRAMEWORK=${GREEN_FRAMEWORK:-} C_EXTENSIONS=${C_EXTENSIONS:-} COVERAGE=${COVERAGE:-} COMPRESSORS=${COMPRESSORS:-} +MONGODB_VERSION=${MONGODB_VERSION:-} MONGODB_API_VERSION=${MONGODB_API_VERSION:-} TEST_ENCRYPTION=${TEST_ENCRYPTION:-} TEST_CRYPT_SHARED=${TEST_CRYPT_SHARED:-} @@ -151,9 +152,14 @@ if [ -n "$TEST_ENCRYPTION" ]; then . $DRIVERS_TOOLS/.evergreen/csfle/set-temp-creds.sh if [ -n "$TEST_CRYPT_SHARED" ]; then + REAL_VERSION=$(mongod --version | head -n1 | cut -d v -f3 | tr -d "\r") + if [ "$MONGODB_VERSION" = "latest" ]; then + REAL_VERSION="latest" + fi echo "Testing CSFLE with crypt_shared lib" $PYTHON $DRIVERS_TOOLS/.evergreen/mongodl.py --component crypt_shared \ - --version latest --out ../crypt_shared/ + --version "$REAL_VERSION" \ + --out ../crypt_shared/ export DYLD_FALLBACK_LIBRARY_PATH=../crypt_shared/lib/:$DYLD_FALLBACK_LIBRARY_PATH export LD_LIBRARY_PATH=../crypt_shared/lib:$LD_LIBRARY_PATH export PATH=../crypt_shared/bin:$PATH diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json index 1a75095907..c1bdc90760 100644 --- a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json +++ b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json @@ -241,7 +241,7 @@ } }, "result": { - "errorContains": "Cannot query" + "errorContains": "encrypt" } } ] From 4170dc958e2ac1a43d92fe0ea3bb8f22674cff0a Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Tue, 16 Aug 2022 10:40:28 -0700 Subject: [PATCH 0237/1588] PYTHON-3393 Added fork-safety stress test. (#1036) --- test/test_fork.py | 64 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 62 insertions(+), 2 deletions(-) diff --git a/test/test_fork.py b/test/test_fork.py index b1c98a26f1..41ce162492 100644 --- a/test/test_fork.py +++ b/test/test_fork.py @@ -17,6 +17,7 @@ import os from multiprocessing import Pipe from test import IntegrationTest, client_context +from test.utils import ExceptionCatchingThread, rs_or_single_client from unittest import skipIf from bson.objectid import ObjectId @@ -51,7 +52,11 @@ def exit_cond(): # In the parent, it'll return here. # In the child, it'll end with the calling thread. if lock_pid == 0: - os._exit(exit_cond()) + code = -1 + try: + code = exit_cond() + finally: + os._exit(code) else: self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) @@ -65,7 +70,11 @@ def test_lock_object_id(self): lock_pid: int = os.fork() if lock_pid == 0: - os._exit(int(ObjectId._inc_lock.locked())) + code = -1 + try: + code = int(ObjectId._inc_lock.locked()) + finally: + os._exit(code) else: self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) @@ -98,3 +107,54 @@ def test_topology_reset(self): self.assertNotEqual(child_id, init_id) passed, msg = parent_conn.recv() self.assertTrue(passed, msg) + + def test_many_threaded(self): + # Fork randomly while doing operations. + + clients = [] + for _ in range(10): + c = rs_or_single_client() + clients.append(c) + self.addCleanup(c.close) + + class ForkThread(ExceptionCatchingThread): + def __init__(self, runner, clients): + self.runner = runner + self.clients = clients + self.fork = False + + super().__init__(target=self.fork_behavior) + + def fork_behavior(self) -> None: + def action(client): + client.admin.command("ping") + return 0 + + for i in range(200): + # Pick a random client. + rc = self.clients[i % len(self.clients)] + if i % 50 == 0 and self.fork: + # Fork + pid = os.fork() + if pid == 0: + code = -1 + try: + for c in self.clients: + action(c) + code = 0 + finally: + os._exit(code) + else: + self.runner.assertEqual(0, os.waitpid(pid, 0)[1]) + action(rc) + + threads = [ForkThread(self, clients) for _ in range(10)] + threads[-1].fork = True + for t in threads: + t.start() + + for t in threads: + t.join() + + for c in clients: + c.close() From dd3b4b11d2ce4e08ed27d0ea1d9c95c527c0aec0 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Thu, 18 Aug 2022 12:15:44 -0700 Subject: [PATCH 0238/1588] PYTHON-3403 Skips unit test if eventlent or gevent is imported (#1039) --- test/test_encryption.py | 5 +++++ test/test_fork.py | 13 ++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index e4372d7e5a..cf34ca61a0 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -51,6 +51,7 @@ TestCreator, TopologyEventListener, camel_to_snake_args, + is_greenthread_patched, rs_or_single_client, wait_until, ) @@ -334,6 +335,10 @@ def test_use_after_close(self): not hasattr(os, "register_at_fork"), "register_at_fork not available in this version of Python", ) + @unittest.skipIf( + is_greenthread_patched(), + "gevent and eventlet do not support POSIX-style forking.", + ) def test_fork(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) diff --git a/test/test_fork.py b/test/test_fork.py index 41ce162492..df1f009e21 100644 --- a/test/test_fork.py +++ b/test/test_fork.py @@ -17,7 +17,11 @@ import os from multiprocessing import Pipe from test import IntegrationTest, client_context -from test.utils import ExceptionCatchingThread, rs_or_single_client +from test.utils import ( + ExceptionCatchingThread, + is_greenthread_patched, + rs_or_single_client, +) from unittest import skipIf from bson.objectid import ObjectId @@ -32,6 +36,10 @@ def setUpModule(): @skipIf( not hasattr(os, "register_at_fork"), "register_at_fork not available in this version of Python" ) +@skipIf( + is_greenthread_patched(), + "gevent and eventlet do not support POSIX-style forking.", +) class TestFork(IntegrationTest): def test_lock_client(self): """ @@ -156,5 +164,8 @@ def action(client): for t in threads: t.join() + for t in threads: + self.assertIsNone(t.exc) + for c in clients: c.close() From cfc99c82f34ddc402bac6010db1ca54bd093dd8c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 18 Aug 2022 14:01:35 -0700 Subject: [PATCH 0239/1588] PYTHON-3402 Fix TestRewrapWithSeparateClientEncryption (#1040) --- test/test_encryption.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index cf34ca61a0..4ed415d4d5 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2279,7 +2279,7 @@ def run_test(self, src_provider, dst_provider): ) # Step 5. Create a ``ClientEncryption`` object named ``client_encryption2`` - client2 = MongoClient() + client2 = rs_or_single_client() self.addCleanup(client2.close) client_encryption2 = ClientEncryption( key_vault_client=client2, From 09aeef0f9d7118f3a1faaed78dcf88ba01180dc2 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Thu, 18 Aug 2022 15:30:45 -0700 Subject: [PATCH 0240/1588] Changelog 4.3 (#1038) --- doc/changelog.rst | 33 +++++++++++++++++++++++++++++++++ doc/faq.rst | 24 +++++++++++++++--------- 2 files changed, 48 insertions(+), 9 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 7afaca22a1..a83df179c1 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,39 @@ Changelog ========= +Changes in Version 4.3 +---------------------- + +PyMongo 4.3 brings a number of improvements including: + +- Added support for decoding BSON datetimes outside of the range supported + by Python's :class:`~datetime.datetime` builtin. See + :ref:`handling-out-of-range-datetimes` for examples, as well as + :class:`bson.datetime_ms.DatetimeMS`, + :class:`bson.codec_options.DatetimeConversion`, and + :class:`bson.codec_options.CodecOptions`'s ``datetime_conversion`` + parameter for more details (`PYTHON-1824`_). +- Added support for using a :class:`~pymongo.mongo_client.MongoClient` after + an :py:func:`os.fork` (`PYTHON-2484`_). + +Bug fixes +......... + +- Fixed a bug where :class:`~pymongo.change_stream.ChangeStream` + would allow an app to retry calling ``next()`` or ``try_next()`` even + after non-resumable errors (`PYTHON-3389`_). + +Issues Resolved +............... + +See the `PyMongo 4.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-1824: https://jira.mongodb.org/browse/PYTHON-1824 +.. _PYTHON-2484: https://jira.mongodb.org/browse/PYTHON-2484 +.. _PYTHON-3389: https://jira.mongodb.org/browse/PYTHON-3389 +.. _PyMongo 4.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33425 + Changes in Version 4.2 ---------------------- diff --git a/doc/faq.rst b/doc/faq.rst index c48dd316e5..a04f761f84 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -14,15 +14,21 @@ for threaded applications. Is PyMongo fork-safe? --------------------- -PyMongo is not fork-safe. Care must be taken when using instances of -:class:`~pymongo.mongo_client.MongoClient` with ``fork()``. Specifically, -instances of MongoClient must not be copied from a parent process to -a child process. Instead, the parent process and each child process must -create their own instances of MongoClient. Instances of MongoClient copied from -the parent process have a high probability of deadlock in the child process due -to the inherent incompatibilities between ``fork()``, threads, and locks -described :ref:`below `. PyMongo will attempt to -issue a warning if there is a chance of this deadlock occurring. +Starting in PyMongo 4.3, forking on a compatible Python interpreter while +using a client will result in all locks held by :class:`~bson.objectid +.ObjectId` and :class:`~pymongo.mongo_client.MongoClient` being released in +the child, as well as state shared between child and parent processes being +reset. + +If greenlet has been imported (usually with a library like gevent or +Eventlet), care must be taken when using instances of :class:`~pymongo +.mongo_client.MongoClient` with ``fork()``. Specifically, instances of +MongoClient must not be copied from a parent process to a child process. +Instead, the parent process and each child process must create their own +instances of MongoClient. Instances of MongoClient copied from the parent +process have a high probability of deadlock in the child process due to the +inherent incompatibilities between ``fork()``, threads, and locks described +:ref:`below`. .. _pymongo-fork-safe-details: From a0a5c7194de2b0f8b7e814a6beb56a2e15517bb9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 18 Aug 2022 15:38:09 -0700 Subject: [PATCH 0241/1588] PYTHON-3405/PYTHON-2531 Fix tests for primary step down (#1041) --- test/unified_format.py | 9 +++++---- test/utils.py | 24 ++++++++++++++++++------ 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/test/unified_format.py b/test/unified_format.py index dbf4ef988f..173b4dcb97 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -114,6 +114,7 @@ from pymongo.server_selectors import Selection, writable_server_selector from pymongo.server_type import SERVER_TYPE from pymongo.topology_description import TopologyDescription +from pymongo.typings import _Address from pymongo.write_concern import WriteConcern JSON_OPTS = json_util.JSONOptions(tz_aware=False) @@ -1442,21 +1443,21 @@ def _testOperation_assertTopologyType(self, spec): self.assertIsInstance(description, TopologyDescription) self.assertEqual(description.topology_type_name, spec["topologyType"]) - def _testOperation_waitForPrimaryChange(self, spec): + def _testOperation_waitForPrimaryChange(self, spec: dict) -> None: """Run the waitForPrimaryChange test operation.""" client = self.entity_map[spec["client"]] old_description: TopologyDescription = self.entity_map[spec["priorTopologyDescription"]] timeout = spec["timeoutMS"] / 1000.0 - def get_primary(td: TopologyDescription) -> Optional[ServerDescription]: + def get_primary(td: TopologyDescription) -> Optional[_Address]: servers = writable_server_selector(Selection.from_topology_description(td)) if servers and servers[0].server_type == SERVER_TYPE.RSPrimary: - return servers[0] + return servers[0].address return None old_primary = get_primary(old_description) - def primary_changed(): + def primary_changed() -> bool: primary = client.primary if primary is None: return False diff --git a/test/utils.py b/test/utils.py index 73003585c3..1ac726d2d4 100644 --- a/test/utils.py +++ b/test/utils.py @@ -593,7 +593,7 @@ def rs_or_single_client(h=None, p=None, **kwargs): return _mongo_client(h, p, **kwargs) -def ensure_all_connected(client): +def ensure_all_connected(client: MongoClient) -> None: """Ensure that the client's connection pool has socket connections to all members of a replica set. Raises ConfigurationError when called with a non-replica set client. @@ -605,14 +605,26 @@ def ensure_all_connected(client): if "setName" not in hello: raise ConfigurationError("cluster is not a replica set") - target_host_list = set(hello["hosts"]) + target_host_list = set(hello["hosts"] + hello.get("passives", [])) connected_host_list = set([hello["me"]]) - admindb = client.get_database("admin") # Run hello until we have connected to each host at least once. - while connected_host_list != target_host_list: - hello = admindb.command(HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY) - connected_host_list.update([hello["me"]]) + def discover(): + i = 0 + while i < 100 and connected_host_list != target_host_list: + hello = client.admin.command( + HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY + ) + connected_host_list.update([hello["me"]]) + i += 1 + return connected_host_list + + try: + wait_until(lambda: target_host_list == discover(), "connected to all hosts") + except AssertionError as exc: + raise AssertionError( + f"{exc}, {connected_host_list} != {target_host_list}, {client.topology_description}" + ) def one(s): From 7f19186cacbcf3e2bcb42dba46997f5ff68c5378 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 18 Aug 2022 17:06:02 -0700 Subject: [PATCH 0242/1588] PYTHON-3406 Refactor fork tests to print traceback on failure (#1042) --- test/__init__.py | 29 +++++++++- test/test_encryption.py | 10 +--- test/test_fork.py | 116 +++++++++++++--------------------------- 3 files changed, 65 insertions(+), 90 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index 2a3e59adf9..a3e1ca7342 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -43,7 +43,7 @@ from contextlib import contextmanager from functools import wraps from test.version import Version -from typing import Dict, no_type_check +from typing import Dict, Generator, no_type_check from unittest import SkipTest from urllib.parse import quote_plus @@ -998,6 +998,33 @@ def fail_point(self, command_args): "configureFailPoint", cmd_on["configureFailPoint"], mode="off" ) + @contextmanager + def fork(self) -> Generator[int, None, None]: + """Helper for tests that use os.fork() + + Use in a with statement: + + with self.fork() as pid: + if pid == 0: # Child + pass + else: # Parent + pass + """ + pid = os.fork() + in_child = pid == 0 + try: + yield pid + except: + if in_child: + traceback.print_exc() + os._exit(1) + raise + finally: + if in_child: + os._exit(0) + # In parent, assert child succeeded. + self.assertEqual(0, os.waitpid(pid, 0)[1]) + class IntegrationTest(PyMongoTestCase): """Base class for TestCases that need a connection to MongoDB to pass.""" diff --git a/test/test_encryption.py b/test/test_encryption.py index 4ed415d4d5..4146695707 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -330,7 +330,6 @@ def test_use_after_close(self): with self.assertRaisesRegex(InvalidOperation, "Cannot use MongoClient after close"): client.admin.command("ping") - # Not available for versions of Python without "register_at_fork" @unittest.skipIf( not hasattr(os, "register_at_fork"), "register_at_fork not available in this version of Python", @@ -342,14 +341,7 @@ def test_use_after_close(self): def test_fork(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) - - lock_pid = os.fork() - if lock_pid == 0: - client.admin.command("ping") - client.close() - os._exit(0) - else: - self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) + with self.fork(): client.admin.command("ping") client.close() diff --git a/test/test_fork.py b/test/test_fork.py index df1f009e21..092ac434a0 100644 --- a/test/test_fork.py +++ b/test/test_fork.py @@ -16,7 +16,7 @@ import os from multiprocessing import Pipe -from test import IntegrationTest, client_context +from test import IntegrationTest from test.utils import ( ExceptionCatchingThread, is_greenthread_patched, @@ -27,12 +27,6 @@ from bson.objectid import ObjectId -@client_context.require_connection -def setUpModule(): - pass - - -# Not available for versions of Python without "register_at_fork" @skipIf( not hasattr(os, "register_at_fork"), "register_at_fork not available in this version of Python" ) @@ -42,83 +36,52 @@ def setUpModule(): ) class TestFork(IntegrationTest): def test_lock_client(self): - """ - Forks the client with some items locked. - Parent => All locks should be as before the fork. - Child => All locks should be reset. - """ - - def exit_cond(): - self.client.admin.command("ping") - return 0 - + # Forks the client with some items locked. + # Parent => All locks should be as before the fork. + # Child => All locks should be reset. with self.client._MongoClient__lock: - # Call _get_topology, will launch a thread to fork upon __enter__ing - # the with region. - lock_pid = os.fork() - # The POSIX standard states only the forking thread is cloned. - # In the parent, it'll return here. - # In the child, it'll end with the calling thread. - if lock_pid == 0: - code = -1 - try: - code = exit_cond() - finally: - os._exit(code) - else: - self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) + with self.fork() as pid: + if pid == 0: # Child + self.client.admin.command("ping") + self.client.admin.command("ping") def test_lock_object_id(self): - """ - Forks the client with ObjectId's _inc_lock locked. - Parent => _inc_lock should remain locked. - Child => _inc_lock should be unlocked. - """ + # Forks the client with ObjectId's _inc_lock locked. + # Parent => _inc_lock should remain locked. + # Child => _inc_lock should be unlocked. with ObjectId._inc_lock: - lock_pid: int = os.fork() - - if lock_pid == 0: - code = -1 - try: - code = int(ObjectId._inc_lock.locked()) - finally: - os._exit(code) - else: - self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) + with self.fork() as pid: + if pid == 0: # Child + self.assertFalse(ObjectId._inc_lock.locked()) + self.assertTrue(ObjectId()) def test_topology_reset(self): - """ - Tests that topologies are different from each other. - Cannot use ID because virtual memory addresses may be the same. - Cannot reinstantiate ObjectId in the topology settings. - Relies on difference in PID when opened again. - """ + # Tests that topologies are different from each other. + # Cannot use ID because virtual memory addresses may be the same. + # Cannot reinstantiate ObjectId in the topology settings. + # Relies on difference in PID when opened again. parent_conn, child_conn = Pipe() init_id = self.client._topology._pid parent_cursor_exc = self.client._kill_cursors_executor - lock_pid: int = os.fork() - - if lock_pid == 0: # Child - self.client.admin.command("ping") - child_conn.send(self.client._topology._pid) - child_conn.send( - ( - parent_cursor_exc != self.client._kill_cursors_executor, - "client._kill_cursors_executor was not reinitialized", + with self.fork() as pid: + if pid == 0: # Child + self.client.admin.command("ping") + child_conn.send(self.client._topology._pid) + child_conn.send( + ( + parent_cursor_exc != self.client._kill_cursors_executor, + "client._kill_cursors_executor was not reinitialized", + ) ) - ) - os._exit(0) - else: # Parent - self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) - self.assertEqual(self.client._topology._pid, init_id) - child_id = parent_conn.recv() - self.assertNotEqual(child_id, init_id) - passed, msg = parent_conn.recv() - self.assertTrue(passed, msg) + else: # Parent + self.assertEqual(self.client._topology._pid, init_id) + child_id = parent_conn.recv() + self.assertNotEqual(child_id, init_id) + passed, msg = parent_conn.recv() + self.assertTrue(passed, msg) def test_many_threaded(self): # Fork randomly while doing operations. - clients = [] for _ in range(10): c = rs_or_single_client() @@ -143,17 +106,10 @@ def action(client): rc = self.clients[i % len(self.clients)] if i % 50 == 0 and self.fork: # Fork - pid = os.fork() - if pid == 0: - code = -1 - try: + with self.runner.fork() as pid: + if pid == 0: # Child for c in self.clients: action(c) - code = 0 - finally: - os._exit(code) - else: - self.runner.assertEqual(0, os.waitpid(pid, 0)[1]) action(rc) threads = [ForkThread(self, clients) for _ in range(10)] From 1e6b4a48d45b502986e1dd5033b3b6b06a59d3d6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 22 Aug 2022 14:16:27 -0700 Subject: [PATCH 0243/1588] PYTHON-3406 Log traceback when fork() test encounters a deadlock (#1045) Co-authored-by: Ben Warner --- test/__init__.py | 42 ++++++++++++---------- test/test_encryption.py | 8 +++-- test/test_fork.py | 80 +++++++++++++++++++++++++---------------- 3 files changed, 78 insertions(+), 52 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index a3e1ca7342..ada09db55e 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -17,7 +17,9 @@ import base64 import gc +import multiprocessing import os +import signal import socket import sys import threading @@ -43,7 +45,7 @@ from contextlib import contextmanager from functools import wraps from test.version import Version -from typing import Dict, Generator, no_type_check +from typing import Callable, Dict, Generator, no_type_check from unittest import SkipTest from urllib.parse import quote_plus @@ -999,31 +1001,33 @@ def fail_point(self, command_args): ) @contextmanager - def fork(self) -> Generator[int, None, None]: + def fork( + self, target: Callable, timeout: float = 60 + ) -> Generator[multiprocessing.Process, None, None]: """Helper for tests that use os.fork() Use in a with statement: - with self.fork() as pid: - if pid == 0: # Child - pass - else: # Parent - pass + with self.fork(target=lambda: print('in child')) as proc: + self.assertTrue(proc.pid) # Child process was started """ - pid = os.fork() - in_child = pid == 0 + ctx = multiprocessing.get_context("fork") + proc = ctx.Process(target=target) + proc.start() try: - yield pid - except: - if in_child: - traceback.print_exc() - os._exit(1) - raise + yield proc # type: ignore finally: - if in_child: - os._exit(0) - # In parent, assert child succeeded. - self.assertEqual(0, os.waitpid(pid, 0)[1]) + proc.join(timeout) + pid = proc.pid + assert pid + if proc.exitcode is None: + # If it failed, SIGINT to get traceback and wait 10s. + os.kill(pid, signal.SIGINT) + proc.join(10) + proc.kill() + proc.join(1) + self.fail(f"child timed out after {timeout}s (see traceback in logs): deadlock?") + self.assertEqual(proc.exitcode, 0) class IntegrationTest(PyMongoTestCase): diff --git a/test/test_encryption.py b/test/test_encryption.py index 4146695707..8e8814a421 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -341,9 +341,13 @@ def test_use_after_close(self): def test_fork(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) - with self.fork(): + self.addCleanup(client.close) + + def target(): client.admin.command("ping") - client.close() + + with self.fork(target): + target() class TestEncryptedBulkWrite(BulkTestBase, EncryptionIntegrationTest): diff --git a/test/test_fork.py b/test/test_fork.py index 092ac434a0..ac103af385 100644 --- a/test/test_fork.py +++ b/test/test_fork.py @@ -15,22 +15,26 @@ """Test that pymongo is fork safe.""" import os +import sys +import unittest from multiprocessing import Pipe + +from bson.objectid import ObjectId + +sys.path[0:0] = [""] + from test import IntegrationTest from test.utils import ( ExceptionCatchingThread, is_greenthread_patched, rs_or_single_client, ) -from unittest import skipIf -from bson.objectid import ObjectId - -@skipIf( +@unittest.skipIf( not hasattr(os, "register_at_fork"), "register_at_fork not available in this version of Python" ) -@skipIf( +@unittest.skipIf( is_greenthread_patched(), "gevent and eventlet do not support POSIX-style forking.", ) @@ -40,9 +44,12 @@ def test_lock_client(self): # Parent => All locks should be as before the fork. # Child => All locks should be reset. with self.client._MongoClient__lock: - with self.fork() as pid: - if pid == 0: # Child - self.client.admin.command("ping") + + def target(): + self.client.admin.command("ping") + + with self.fork(target): + pass self.client.admin.command("ping") def test_lock_object_id(self): @@ -50,10 +57,13 @@ def test_lock_object_id(self): # Parent => _inc_lock should remain locked. # Child => _inc_lock should be unlocked. with ObjectId._inc_lock: - with self.fork() as pid: - if pid == 0: # Child - self.assertFalse(ObjectId._inc_lock.locked()) - self.assertTrue(ObjectId()) + + def target(): + self.assertFalse(ObjectId._inc_lock.locked()) + self.assertTrue(ObjectId()) + + with self.fork(target): + pass def test_topology_reset(self): # Tests that topologies are different from each other. @@ -63,22 +73,23 @@ def test_topology_reset(self): parent_conn, child_conn = Pipe() init_id = self.client._topology._pid parent_cursor_exc = self.client._kill_cursors_executor - with self.fork() as pid: - if pid == 0: # Child - self.client.admin.command("ping") - child_conn.send(self.client._topology._pid) - child_conn.send( - ( - parent_cursor_exc != self.client._kill_cursors_executor, - "client._kill_cursors_executor was not reinitialized", - ) + + def target(): + self.client.admin.command("ping") + child_conn.send(self.client._topology._pid) + child_conn.send( + ( + parent_cursor_exc != self.client._kill_cursors_executor, + "client._kill_cursors_executor was not reinitialized", ) - else: # Parent - self.assertEqual(self.client._topology._pid, init_id) - child_id = parent_conn.recv() - self.assertNotEqual(child_id, init_id) - passed, msg = parent_conn.recv() - self.assertTrue(passed, msg) + ) + + with self.fork(target): + self.assertEqual(self.client._topology._pid, init_id) + child_id = parent_conn.recv() + self.assertNotEqual(child_id, init_id) + passed, msg = parent_conn.recv() + self.assertTrue(passed, msg) def test_many_threaded(self): # Fork randomly while doing operations. @@ -106,10 +117,13 @@ def action(client): rc = self.clients[i % len(self.clients)] if i % 50 == 0 and self.fork: # Fork - with self.runner.fork() as pid: - if pid == 0: # Child - for c in self.clients: - action(c) + def target(): + for c_ in self.clients: + action(c_) + c_.close() + + with self.runner.fork(target=target) as proc: + self.runner.assertTrue(proc.pid) action(rc) threads = [ForkThread(self, clients) for _ in range(10)] @@ -125,3 +139,7 @@ def action(client): for c in clients: c.close() + + +if __name__ == "__main__": + unittest.main() From 9ff0ac8a62f1ef1a334c01af88a9c853cd7dec18 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 22 Aug 2022 15:05:39 -0700 Subject: [PATCH 0244/1588] PYTHON-3407 macos release failing on Python 3.8 AttributeError: 'Distribution' object has no attribute 'convert_2to3_doctests' (#1044) --- .evergreen/build-mac.sh | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index 09950a592f..2dd02a0fbe 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -12,16 +12,10 @@ for VERSION in 3.7 3.8 3.9 3.10; do PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 rm -rf build - # Install wheel if not already there. - if ! $PYTHON -m wheel version; then - createvirtualenv $PYTHON releasevenv - WHEELPYTHON=python - python -m pip install --upgrade wheel - else - WHEELPYTHON=$PYTHON - fi - - $WHEELPYTHON setup.py bdist_wheel + createvirtualenv $PYTHON releasevenv + python -m pip install --upgrade wheel + python -m pip install setuptools==63.2.0 + python setup.py bdist_wheel deactivate || true rm -rf releasevenv From 1575e53ef739008951a92717b190b8ec290165c7 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 22 Aug 2022 16:19:30 -0700 Subject: [PATCH 0245/1588] PYTHON-3409 Retry flakey CSOT tests twice (#1046) --- test/test_retryable_writes.py | 1 + test/unified_format.py | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 0eb863f4cf..8d556b90ae 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -471,6 +471,7 @@ def setUpClass(cls): def test_RetryableWriteError_error_label(self): listener = OvertCommandListener() client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) + self.addCleanup(client.close) # Ensure collection exists. client.pymongo_test.testcoll.insert_one({}) diff --git a/test/unified_format.py b/test/unified_format.py index 173b4dcb97..aec7763272 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -25,6 +25,7 @@ import re import sys import time +import traceback import types from collections import abc from test import ( @@ -1580,6 +1581,25 @@ def verify_outcome(self, spec): self.assertListEqual(sorted_expected_documents, actual_documents) def run_scenario(self, spec, uri=None): + if "csot" in self.id().lower(): + # Retry CSOT tests up to 2 times to deal with flakey tests. + attempts = 3 + for i in range(attempts): + try: + return self._run_scenario(spec, uri) + except AssertionError: + if i < attempts - 1: + print( + f"Retrying after attempt {i+1} of {self.id()} failed with:\n" + f"{traceback.format_exc()}" + ) + self.setUp() + continue + raise + else: + self._run_scenario(spec, uri) + + def _run_scenario(self, spec, uri=None): # maybe skip test manually self.maybe_skip_test(spec) From 0f135a157e2fa6ae66d4091186bdf0c40113ef77 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 25 Aug 2022 20:16:39 -0500 Subject: [PATCH 0246/1588] PYTHON-3413 Ensure AWS EC2 Credential Test is Running Properly (#1048) --- .evergreen/config.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 6acb6e3b74..621542226d 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -643,6 +643,13 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} + if [ "${skip_EC2_auth_test}" = "true" ]; then + echo "This platform does not support the EC2 auth test, skipping..." + exit 0 + fi + # Write an empty prepare_mongodb_aws so no auth environment variables + # are set. + echo "" > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" PYTHON_BINARY=${PYTHON_BINARY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh "run aws auth test with aws credentials as environment variables": From 78256368c7073e5c71007a7dc4bb9f12db7746b7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 29 Aug 2022 17:33:00 -0500 Subject: [PATCH 0247/1588] PYTHON-3411 Stop testing MongoDB 6.0 on Amazon1 2018 (#1049) --- .evergreen/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 621542226d..a487c264a0 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2242,8 +2242,8 @@ buildvariants: - matrix_name: "tests-all" matrix_spec: platform: - # OSes that support versions of MongoDB>=2.6 with SSL. - - awslinux + # OSes that support versions of MongoDB>=3.6 with SSL. + - ubuntu-18.04 auth-ssl: "*" display_name: "${platform} ${auth-ssl}" tasks: From e3ff041b474835f007faaead470bb12dcc9dc22c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 9 Sep 2022 16:28:15 -0500 Subject: [PATCH 0248/1588] PYTHON-3433 Failure: test.test_encryption.TestSpec.test_legacy_maxWireVersion_operation_fails_with_maxWireVersion___8 (#1052) --- test/client-side-encryption/spec/legacy/maxWireVersion.json | 2 +- test/test_encryption.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/test/client-side-encryption/spec/legacy/maxWireVersion.json b/test/client-side-encryption/spec/legacy/maxWireVersion.json index c1088a0ecf..f04f58dffd 100644 --- a/test/client-side-encryption/spec/legacy/maxWireVersion.json +++ b/test/client-side-encryption/spec/legacy/maxWireVersion.json @@ -1,7 +1,7 @@ { "runOn": [ { - "maxServerVersion": "4.0" + "maxServerVersion": "4.0.99" } ], "database_name": "default", diff --git a/test/test_encryption.py b/test/test_encryption.py index 8e8814a421..567d606893 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -616,6 +616,8 @@ def parse_auto_encrypt_opts(self, opts): opts["kms_tls_options"] = KMS_TLS_OPTS if "key_vault_namespace" not in opts: opts["key_vault_namespace"] = "keyvault.datakeys" + if "extra_options" in opts: + opts.update(camel_to_snake_args(opts.pop("extra_options"))) opts = dict(opts) return AutoEncryptionOpts(**opts) From 1019c91bf67fbed09f6ce26df16614092676ab54 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 12 Sep 2022 19:14:50 -0500 Subject: [PATCH 0249/1588] PYTHON-3424 PyMongo Universal Wheels Are Improperly Compiled (#1051) --- .evergreen/build-mac.sh | 27 +++++++++++++-------------- .evergreen/config.yml | 23 +++++++++++++++++++++-- tools/fail_if_no_c.py | 13 +++++++++++++ 3 files changed, 47 insertions(+), 16 deletions(-) diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index 2dd02a0fbe..270c92b59a 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -8,22 +8,21 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 3.7 3.8 3.9 3.10; do - PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 - rm -rf build +VERSION=${VERSION:-3.10} +PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 +rm -rf build - createvirtualenv $PYTHON releasevenv - python -m pip install --upgrade wheel - python -m pip install setuptools==63.2.0 - python setup.py bdist_wheel - deactivate || true - rm -rf releasevenv +createvirtualenv $PYTHON releasevenv +python -m pip install --upgrade wheel +python -m pip install setuptools==63.2.0 +python setup.py bdist_wheel +deactivate || true +rm -rf releasevenv - # Test that each wheel is installable. - for release in dist/*; do - testinstall $PYTHON $release - mv $release validdist/ - done +# Test that each wheel is installable. +for release in dist/*; do + testinstall $PYTHON $release + mv $release validdist/ done mv validdist/* dist diff --git a/.evergreen/config.yml b/.evergreen/config.yml index a487c264a0..0808cc11be 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -891,7 +891,7 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - .evergreen/release.sh + VERSION=${VERSION} ENSURE_UNIVERSAL2=${ENSURE_UNIVERSAL2} .evergreen/release.sh "upload release": - command: archive.targz_pack @@ -1046,11 +1046,30 @@ tasks: genhtml --version || true valgrind --version || true - - name: "release-mac" + - name: "release-mac-1100" + tags: ["release_tag"] + run_on: macos-1100 + commands: + - func: "build release" + vars: + VERSION: "3.10" + ENSURE_UNIVERSAL2: "1" + - func: "build release" + vars: + VERSION: "3.9" + ENSURE_UNIVERSAL2: "1" + - func: "upload release" + + - name: "release-mac-1014" tags: ["release_tag"] run_on: macos-1014 commands: - func: "build release" + vars: + VERSION: "3.7" + - func: "build release" + vars: + VERSION: "3.8" - func: "upload release" - name: "release-windows" diff --git a/tools/fail_if_no_c.py b/tools/fail_if_no_c.py index 6cb82eed57..e2e9c52527 100644 --- a/tools/fail_if_no_c.py +++ b/tools/fail_if_no_c.py @@ -17,6 +17,9 @@ Only really intended to be used by internal build scripts. """ +import glob +import os +import subprocess import sys sys.path[0:0] = [""] @@ -26,3 +29,13 @@ if not pymongo.has_c() or not bson.has_c(): sys.exit("could not load C extensions") + +if os.environ.get("ENSURE_UNIVERSAL2") == "1": + parent_dir = os.path.dirname(pymongo.__path__[0]) + for so_file in glob.glob(f"{parent_dir}/**/*.so"): + print(f"Checking universal2 compatibility in {so_file}...") + output = subprocess.check_output(["file", so_file]) + if "arm64" not in output.decode("utf-8"): + sys.exit("Universal wheel was not compiled with arm64 support") + if "x86_64" not in output.decode("utf-8"): + sys.exit("Universal wheel was not compiled with x86_64 support") From b8cb1c1cf06623ff52faf4813afee3bca420c995 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 13 Sep 2022 15:30:56 -0500 Subject: [PATCH 0250/1588] PYTHON-3413 Skip EC2 test on Windows (#1054) --- .evergreen/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 0808cc11be..9d016f4d8a 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1938,6 +1938,7 @@ axes: run_on: windows-64-vsMulti-small batchtime: 10080 # 7 days variables: + skip_EC2_auth_test: true skip_ECS_auth_test: true python3_binary: "C:/python/Python38/python.exe" venv_bin_dir: "Scripts" From 179efda31200b495beab6c2e94f365f5713aadc4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 13 Sep 2022 14:14:53 -0700 Subject: [PATCH 0251/1588] PYTHON-3406 Reinstate warning and docs that PyMongo is not fork safe (#1050) Log child process C-level stacks when fork tests deadlock. Encode hostname to bytes to avoid getaddrinfo importlib deadlock. --- doc/changelog.rst | 7 +++-- doc/faq.rst | 47 +++++++++++++++++++---------- pymongo/mongo_client.py | 11 +++---- pymongo/pool.py | 6 ++-- pymongo/topology.py | 15 +++++----- test/__init__.py | 66 +++++++++++++++++++++++++++++++++++++---- test/test_fork.py | 61 +++---------------------------------- test/unified_format.py | 3 +- 8 files changed, 120 insertions(+), 96 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index a83df179c1..7107e57333 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -13,8 +13,11 @@ PyMongo 4.3 brings a number of improvements including: :class:`bson.codec_options.DatetimeConversion`, and :class:`bson.codec_options.CodecOptions`'s ``datetime_conversion`` parameter for more details (`PYTHON-1824`_). -- Added support for using a :class:`~pymongo.mongo_client.MongoClient` after - an :py:func:`os.fork` (`PYTHON-2484`_). +- PyMongo now resets its locks and other shared state in the child process + after a :py:func:`os.fork` to reduce the frequency of deadlocks. Note that + deadlocks are still possible because libraries that PyMongo depends like + OpenSSL cannot be made fork() safe in multithreaded applications. + (`PYTHON-2484`_). For more info see :ref:`pymongo-fork-safe`. Bug fixes ......... diff --git a/doc/faq.rst b/doc/faq.rst index a04f761f84..acf557a81b 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -14,21 +14,15 @@ for threaded applications. Is PyMongo fork-safe? --------------------- -Starting in PyMongo 4.3, forking on a compatible Python interpreter while -using a client will result in all locks held by :class:`~bson.objectid -.ObjectId` and :class:`~pymongo.mongo_client.MongoClient` being released in -the child, as well as state shared between child and parent processes being -reset. - -If greenlet has been imported (usually with a library like gevent or -Eventlet), care must be taken when using instances of :class:`~pymongo -.mongo_client.MongoClient` with ``fork()``. Specifically, instances of -MongoClient must not be copied from a parent process to a child process. -Instead, the parent process and each child process must create their own -instances of MongoClient. Instances of MongoClient copied from the parent -process have a high probability of deadlock in the child process due to the -inherent incompatibilities between ``fork()``, threads, and locks described -:ref:`below`. +PyMongo is not fork-safe. Care must be taken when using instances of +:class:`~pymongo.mongo_client.MongoClient` with ``fork()``. Specifically, +instances of MongoClient must not be copied from a parent process to +a child process. Instead, the parent process and each child process must +create their own instances of MongoClient. Instances of MongoClient copied from +the parent process have a high probability of deadlock in the child process due +to the inherent incompatibilities between ``fork()``, threads, and locks +described :ref:`below `. PyMongo will attempt to +issue a warning if there is a chance of this deadlock occurring. .. _pymongo-fork-safe-details: @@ -44,10 +38,33 @@ created by ``fork()`` only has one thread, so any locks that were taken out by other threads in the parent will never be released in the child. The next time the child process attempts to acquire one of these locks, deadlock occurs. +Starting in version 4.3, PyMongo utilizes :py:func:`os.register_at_fork` to +reset its locks and other shared state in the child process after a +:py:func:`os.fork` to reduce the frequency of deadlocks. However deadlocks +are still possible because libraries that PyMongo depends on, like `OpenSSL`_ +and `getaddrinfo(3)`_ (on some platforms), are not fork() safe in a +multithreaded application. Linux also imposes the restriction that: + + After a `fork()`_ in a multithreaded program, the child can + safely call only async-signal-safe functions (see + `signal-safety(7)`_) until such time as it calls `execve(2)`_. + +PyMongo relies on functions that are *not* `async-signal-safe`_ and hence the +child process can experience deadlocks or crashes when attempting to call +a non `async-signal-safe`_ function. For examples of deadlocks or crashes +that could occur see `PYTHON-3406`_. + For a long but interesting read about the problems of Python locks in multithreaded contexts with ``fork()``, see http://bugs.python.org/issue6721. .. _not fork-safe: http://bugs.python.org/issue6721 +.. _OpenSSL: https://github.com/openssl/openssl/issues/19066 +.. _fork(): https://man7.org/linux/man-pages/man2/fork.2.html +.. _signal-safety(7): https://man7.org/linux/man-pages/man7/signal-safety.7.html +.. _async-signal-safe: https://man7.org/linux/man-pages/man7/signal-safety.7.html +.. _execve(2): https://man7.org/linux/man-pages/man2/execve.2.html +.. _getaddrinfo(3): https://man7.org/linux/man-pages/man3/gai_strerror.3.html +.. _PYTHON-3406: https://jira.mongodb.org/browse/PYTHON-3406 .. _connection-pooling: diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index c8330f32d0..7e4e4f10ca 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -82,7 +82,7 @@ ServerSelectionTimeoutError, WaitQueueTimeoutError, ) -from pymongo.lock import _create_lock, _release_locks +from pymongo.lock import _HAS_REGISTER_AT_FORK, _create_lock, _release_locks from pymongo.pool import ConnectionClosedReason from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.server_selectors import writable_server_selector @@ -831,9 +831,10 @@ def __init__( self._encrypter = _Encrypter(self, self.__options.auto_encryption_opts) self._timeout = self.__options.timeout - # Add this client to the list of weakly referenced items. - # This will be used later if we fork. - MongoClient._clients[self._topology._topology_id] = self + if _HAS_REGISTER_AT_FORK: + # Add this client to the list of weakly referenced items. + # This will be used later if we fork. + MongoClient._clients[self._topology._topology_id] = self def _init_background(self): self._topology = Topology(self._topology_settings) @@ -2177,7 +2178,7 @@ def _after_fork_child(): client._after_fork() -if hasattr(os, "register_at_fork"): +if _HAS_REGISTER_AT_FORK: # This will run in the same thread as the fork was called. # If we fork in a critical region on the same thread, it should break. # This is fine since we would never call fork directly from a critical region. diff --git a/pymongo/pool.py b/pymongo/pool.py index 6355692ac9..88f56b16e5 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -979,9 +979,11 @@ def _create_connection(address, options): This is a modified version of create_connection from CPython >= 2.7. """ host, port = address + # Avoid the getaddrinfo importlib deadlock on fork() described in PYTHON-3406. + host = host.encode("idna") # Check if dealing with a unix domain socket - if host.endswith(".sock"): + if host.endswith(b".sock"): if not hasattr(socket, "AF_UNIX"): raise ConnectionFailure("UNIX-sockets are not supported on this system") sock = socket.socket(socket.AF_UNIX) @@ -998,7 +1000,7 @@ def _create_connection(address, options): # is 'localhost' (::1 is fine). Avoids slow connect issues # like PYTHON-356. family = socket.AF_INET - if socket.has_ipv6 and host != "localhost": + if socket.has_ipv6 and host != b"localhost": family = socket.AF_UNSPEC err = None diff --git a/pymongo/topology.py b/pymongo/topology.py index 84975ca076..87a566fa6e 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -36,7 +36,7 @@ WriteError, ) from pymongo.hello import Hello -from pymongo.lock import _HAS_REGISTER_AT_FORK, _create_lock +from pymongo.lock import _create_lock from pymongo.monitor import SrvMonitor from pymongo.pool import PoolOptions from pymongo.server import Server @@ -174,13 +174,12 @@ def open(self): self._pid = pid elif pid != self._pid: self._pid = pid - if not _HAS_REGISTER_AT_FORK: - warnings.warn( - "MongoClient opened before fork. May not be entirely fork-safe, " - "proceed with caution. See PyMongo's documentation for details: " - "https://pymongo.readthedocs.io/en/stable/faq.html#" - "is-pymongo-fork-safe" - ) + warnings.warn( + "MongoClient opened before fork. May not be entirely fork-safe, " + "proceed with caution. See PyMongo's documentation for details: " + "https://pymongo.readthedocs.io/en/stable/faq.html#" + "is-pymongo-fork-safe" + ) with self._lock: # Close servers and clear the pools. for server in self._servers.values(): diff --git a/test/__init__.py b/test/__init__.py index ada09db55e..b89cd88d26 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -21,8 +21,10 @@ import os import signal import socket +import subprocess import sys import threading +import time import traceback import unittest import warnings @@ -1011,8 +1013,28 @@ def fork( with self.fork(target=lambda: print('in child')) as proc: self.assertTrue(proc.pid) # Child process was started """ + + def _print_threads(*args: object) -> None: + if _print_threads.called: + return + _print_threads.called = True + print_thread_tracebacks() + + _print_threads.called = False + + def _target() -> None: + signal.signal(signal.SIGUSR1, _print_threads) + try: + target() + except Exception as exc: + sys.stderr.write(f"Child process failed with: {exc}\n") + _print_threads() + # Sleep for a while to let the parent attach via GDB. + time.sleep(2 * timeout) + raise + ctx = multiprocessing.get_context("fork") - proc = ctx.Process(target=target) + proc = ctx.Process(target=_target) proc.start() try: yield proc # type: ignore @@ -1021,15 +1043,47 @@ def fork( pid = proc.pid assert pid if proc.exitcode is None: - # If it failed, SIGINT to get traceback and wait 10s. - os.kill(pid, signal.SIGINT) - proc.join(10) - proc.kill() - proc.join(1) + # gdb to get C-level tracebacks + print_thread_stacks(pid) + # If it failed, SIGUSR1 to get thread tracebacks. + os.kill(pid, signal.SIGUSR1) + proc.join(5) + if proc.exitcode is None: + # SIGINT to get main thread traceback in case SIGUSR1 didn't work. + os.kill(pid, signal.SIGINT) + proc.join(5) + if proc.exitcode is None: + # SIGKILL in case SIGINT didn't work. + proc.kill() + proc.join(1) self.fail(f"child timed out after {timeout}s (see traceback in logs): deadlock?") self.assertEqual(proc.exitcode, 0) +def print_thread_tracebacks() -> None: + """Print all Python thread tracebacks.""" + for thread_id, frame in sys._current_frames().items(): + sys.stderr.write(f"\n--- Traceback for thread {thread_id} ---\n") + traceback.print_stack(frame, file=sys.stderr) + + +def print_thread_stacks(pid: int) -> None: + """Print all C-level thread stacks for a given process id.""" + if sys.platform == "darwin": + cmd = ["lldb", "--attach-pid", f"{pid}", "--batch", "--one-line", '"thread backtrace all"'] + else: + cmd = ["gdb", f"--pid={pid}", "--batch", '--eval-command="thread apply all bt"'] + + try: + res = subprocess.run( + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8" + ) + except Exception as exc: + sys.stderr.write(f"Could not print C-level thread stacks because {cmd[0]} failed: {exc}") + else: + sys.stderr.write(res.stdout) + + class IntegrationTest(PyMongoTestCase): """Base class for TestCases that need a connection to MongoDB to pass.""" diff --git a/test/test_fork.py b/test/test_fork.py index ac103af385..422cd89f28 100644 --- a/test/test_fork.py +++ b/test/test_fork.py @@ -12,23 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test that pymongo is fork safe.""" +"""Test that pymongo resets its own locks after a fork.""" import os import sys import unittest from multiprocessing import Pipe -from bson.objectid import ObjectId - sys.path[0:0] = [""] from test import IntegrationTest -from test.utils import ( - ExceptionCatchingThread, - is_greenthread_patched, - rs_or_single_client, -) +from test.utils import is_greenthread_patched + +from bson.objectid import ObjectId @unittest.skipIf( @@ -91,55 +87,6 @@ def target(): passed, msg = parent_conn.recv() self.assertTrue(passed, msg) - def test_many_threaded(self): - # Fork randomly while doing operations. - clients = [] - for _ in range(10): - c = rs_or_single_client() - clients.append(c) - self.addCleanup(c.close) - - class ForkThread(ExceptionCatchingThread): - def __init__(self, runner, clients): - self.runner = runner - self.clients = clients - self.fork = False - - super().__init__(target=self.fork_behavior) - - def fork_behavior(self) -> None: - def action(client): - client.admin.command("ping") - return 0 - - for i in range(200): - # Pick a random client. - rc = self.clients[i % len(self.clients)] - if i % 50 == 0 and self.fork: - # Fork - def target(): - for c_ in self.clients: - action(c_) - c_.close() - - with self.runner.fork(target=target) as proc: - self.runner.assertTrue(proc.pid) - action(rc) - - threads = [ForkThread(self, clients) for _ in range(10)] - threads[-1].fork = True - for t in threads: - t.start() - - for t in threads: - t.join() - - for t in threads: - self.assertIsNone(t.exc) - - for c in clients: - c.close() - if __name__ == "__main__": unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index aec7763272..3f51c335eb 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1591,7 +1591,8 @@ def run_scenario(self, spec, uri=None): if i < attempts - 1: print( f"Retrying after attempt {i+1} of {self.id()} failed with:\n" - f"{traceback.format_exc()}" + f"{traceback.format_exc()}", + file=sys.stderr, ) self.setUp() continue From dcb1327395c96b6401492737db9e923d8577b35e Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 15 Sep 2022 13:31:45 -0700 Subject: [PATCH 0252/1588] PYTHON-3423 Make dnspython a required dependency (#1055) --- README.rst | 9 +++++---- doc/changelog.rst | 4 ++++ doc/installation.rst | 9 ++++++--- pymongo/uri_parser.py | 4 ++-- setup.py | 4 ++-- 5 files changed, 19 insertions(+), 11 deletions(-) diff --git a/README.rst b/README.rst index f60b8da680..576facb5b5 100644 --- a/README.rst +++ b/README.rst @@ -90,6 +90,11 @@ Dependencies PyMongo supports CPython 3.7+ and PyPy3.7+. +Required dependencies: + +Support for mongodb+srv:// URIs requires `dnspython +`_ + Optional dependencies: GSSAPI authentication requires `pykerberos @@ -104,10 +109,6 @@ MONGODB-AWS authentication requires `pymongo-auth-aws $ python -m pip install "pymongo[aws]" -Support for mongodb+srv:// URIs requires `dnspython -`_:: - - $ python -m pip install "pymongo[srv]" OCSP (Online Certificate Status Protocol) requires `PyOpenSSL `_, `requests diff --git a/doc/changelog.rst b/doc/changelog.rst index 7107e57333..24c80efa2e 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,6 +4,10 @@ Changelog Changes in Version 4.3 ---------------------- +`dnspython `_ is now a required +dependency. This change makes PyMongo easier to install for use with "mongodb+srv://" +connection strings and `MongoDB Atlas `_. + PyMongo 4.3 brings a number of improvements including: - Added support for decoding BSON datetimes outside of the range supported diff --git a/doc/installation.rst b/doc/installation.rst index 788faf46cc..4355f771eb 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -30,6 +30,12 @@ Dependencies PyMongo supports CPython 3.7+ and PyPy3.7+. +Required dependencies: + +Support for mongodb+srv:// URIs requires `dnspython +`_ + + Optional dependencies: GSSAPI authentication requires `pykerberos @@ -44,10 +50,7 @@ dependency can be installed automatically along with PyMongo:: $ python3 -m pip install "pymongo[aws]" -Support for mongodb+srv:// URIs requires `dnspython -`_:: - $ python3 -m pip install "pymongo[srv]" :ref:`OCSP` requires `PyOpenSSL `_, `requests diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index cd18c067e7..f59af2e74c 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -469,8 +469,8 @@ def parse_uri( raise ConfigurationError( 'The "dnspython" module must be ' "installed to use mongodb+srv:// URIs. " - "To fix this error install pymongo with the srv extra:\n " - '%s -m pip install "pymongo[srv]"' % (python_path) + "To fix this error install pymongo again:\n " + "%s -m pip install pymongo>=4.3" % (python_path) ) is_srv = True scheme_free = uri[SRV_SCHEME_LEN:] diff --git a/setup.py b/setup.py index 0e983e4642..524c1303e6 100755 --- a/setup.py +++ b/setup.py @@ -281,7 +281,7 @@ def build_extension(self, ext): "snappy": ["python-snappy"], "zstd": ["zstandard"], "aws": ["pymongo-auth-aws<2.0.0"], - "srv": ["dnspython>=1.16.0,<3.0.0"], + "srv": [], } # GSSAPI extras @@ -314,7 +314,7 @@ def build_extension(self, ext): author="The MongoDB Python Team", url="http://github.com/mongodb/mongo-python-driver", keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], - install_requires=[], + install_requires=["dnspython>=1.16.0,<3.0.0"], license="Apache License, Version 2.0", python_requires=">=3.7", classifiers=[ From eb028d0195ebfb21fa290688202251f275131709 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 16 Sep 2022 13:41:06 -0500 Subject: [PATCH 0253/1588] PYTHON-3420 Update ChangeStreamEvent type definition to include clusterTime (#1057) --- .../unified/change-streams-clusterTime.json | 82 +++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 test/change_streams/unified/change-streams-clusterTime.json diff --git a/test/change_streams/unified/change-streams-clusterTime.json b/test/change_streams/unified/change-streams-clusterTime.json new file mode 100644 index 0000000000..55b4ae3fbc --- /dev/null +++ b/test/change_streams/unified/change-streams-clusterTime.json @@ -0,0 +1,82 @@ +{ + "description": "change-streams-clusterTime", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "clusterTime is present", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "ns": { + "db": "database0", + "coll": "collection0" + }, + "clusterTime": { + "$$exists": true + } + } + } + ] + } + ] +} From 0143881f0261a54a9d0ca99ad98ecc825dd89d56 Mon Sep 17 00:00:00 2001 From: Max Zhenzhera <59729293+maxzhenzhera@users.noreply.github.com> Date: Mon, 19 Sep 2022 23:19:14 +0300 Subject: [PATCH 0254/1588] PYTHON-3441 Add missing pool_ready method in monitoring docs example (#1060) --- pymongo/monitoring.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 90b8c1a3eb..c53e7e5727 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -125,6 +125,9 @@ class ConnectionPoolLogger(ConnectionPoolListener): def pool_created(self, event): logging.info("[pool {0.address}] pool created".format(event)) + def pool_ready(self, event): + logging.info("[pool {0.address}] pool is ready".format(event)) + def pool_cleared(self, event): logging.info("[pool {0.address}] pool cleared".format(event)) From 449cb8fb0fc596ce7d453aa3a48bad7f275d480e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 22 Sep 2022 15:14:40 -0500 Subject: [PATCH 0255/1588] PYTHON-2722 Improve performance of find/aggregate_raw_batches (#1047) --- bson/__init__.py | 80 +++++++++++++++++++----- bson/_cbsonmodule.c | 147 +++++++++++++++++++++++++++++++++++++++++--- bson/raw_bson.py | 47 ++++++++++---- pymongo/message.py | 9 ++- 4 files changed, 245 insertions(+), 38 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index dc2e29238a..c6a81d97ec 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -128,7 +128,6 @@ from array import array from mmap import mmap - try: from bson import _cbson # type: ignore[attr-defined] @@ -520,19 +519,32 @@ def _get_decimal128( if _USE_C: def _element_to_dict( - data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions + data: Any, + view: Any, + position: int, + obj_end: int, + opts: CodecOptions, + raw_array: bool = False, ) -> Any: - return _cbson._element_to_dict(data, position, obj_end, opts) + return _cbson._element_to_dict(data, position, obj_end, opts, raw_array) else: def _element_to_dict( - data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions + data: Any, + view: Any, + position: int, + obj_end: int, + opts: CodecOptions, + raw_array: bool = False, ) -> Any: """Decode a single key, value pair.""" element_type = data[position] position += 1 element_name, position = _get_c_string(data, view, position, opts) + if raw_array and element_type == ord(BSONARR): + _, end = _get_object_size(data, position, len(data)) + return element_name, view[position : end + 1], end + 1 try: value, position = _ELEMENT_GETTER[element_type]( data, view, position, obj_end, opts, element_name @@ -551,20 +563,30 @@ def _element_to_dict( _T = TypeVar("_T", bound=MutableMapping[Any, Any]) -def _raw_to_dict(data: Any, position: int, obj_end: int, opts: CodecOptions, result: _T) -> _T: +def _raw_to_dict( + data: Any, position: int, obj_end: int, opts: CodecOptions, result: _T, raw_array: bool = False +) -> _T: data, view = get_data_and_view(data) - return _elements_to_dict(data, view, position, obj_end, opts, result) + return _elements_to_dict(data, view, position, obj_end, opts, result, raw_array=raw_array) def _elements_to_dict( - data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, result: Any = None + data: Any, + view: Any, + position: int, + obj_end: int, + opts: CodecOptions, + result: Any = None, + raw_array: bool = False, ) -> Any: """Decode a BSON document into result.""" if result is None: result = opts.document_class() end = obj_end - 1 while position < end: - key, value, position = _element_to_dict(data, view, position, obj_end, opts) + key, value, position = _element_to_dict( + data, view, position, obj_end, opts, raw_array=raw_array + ) result[key] = value if position != obj_end: raise InvalidBSON("bad object or element length") @@ -1119,14 +1141,44 @@ def _decode_selective(rawdoc: Any, fields: Any, codec_options: Any) -> Mapping[A return doc +def _array_of_documents_to_buffer(view: memoryview) -> bytes: + # Extract the raw bytes of each document. + position = 0 + _, end = _get_object_size(view, position, len(view)) + position += 4 + buffers: List[memoryview] = [] + append = buffers.append + while position < end - 1: + # Just skip the keys. + while view[position] != 0: + position += 1 + position += 1 + obj_size, _ = _get_object_size(view, position, end) + append(view[position : position + obj_size]) + position += obj_size + if position != end: + raise InvalidBSON("bad object or element length") + return b"".join(buffers) + + +if _USE_C: + _array_of_documents_to_buffer = _cbson._array_of_documents_to_buffer # noqa: F811 + + def _convert_raw_document_lists_to_streams(document: Any) -> None: + """Convert raw array of documents to a stream of BSON documents.""" cursor = document.get("cursor") - if cursor: - for key in ("firstBatch", "nextBatch"): - batch = cursor.get(key) - if batch: - stream = b"".join(doc.raw for doc in batch) - cursor[key] = [stream] + if not cursor: + return + for key in ("firstBatch", "nextBatch"): + batch = cursor.get(key) + if not batch: + continue + data = _array_of_documents_to_buffer(batch) + if data: + cursor[key] = [data] + else: + cursor[key] = [] def _decode_all_selective(data: Any, codec_options: CodecOptions, fields: Any) -> List[Any]: diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 019f049bb5..8678e8050b 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -1615,7 +1615,7 @@ static PyObject *_dbref_hook(PyObject* self, PyObject* value) { static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, unsigned* position, unsigned char type, - unsigned max, const codec_options_t* options) { + unsigned max, const codec_options_t* options, int raw_array) { struct module_state *state = GETSTATE(self); PyObject* value = NULL; switch (type) { @@ -1712,11 +1712,20 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, if (size < BSON_MIN_SIZE || max < size) { goto invalid; } + end = *position + size - 1; /* Check for bad eoo */ if (buffer[end]) { goto invalid; } + + if (raw_array != 0) { + // Treat it as a binary buffer. + value = PyBytes_FromStringAndSize(buffer + *position, size); + *position += size; + break; + } + *position += 4; value = PyList_New(0); @@ -1740,7 +1749,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto invalid; } to_append = get_value(self, name, buffer, position, bson_type, - max - (unsigned)key_size, options); + max - (unsigned)key_size, options, raw_array); Py_LeaveRecursiveCall(); if (!to_append) { Py_DECREF(value); @@ -2464,6 +2473,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, static int _element_to_dict(PyObject* self, const char* string, unsigned position, unsigned max, const codec_options_t* options, + int raw_array, PyObject** name, PyObject** value) { unsigned char type = (unsigned char)string[position++]; size_t name_length = strlen(string + position); @@ -2504,7 +2514,7 @@ static int _element_to_dict(PyObject* self, const char* string, } position += (unsigned)name_length + 1; *value = get_value(self, *name, string, &position, type, - max - position, options); + max - position, options, raw_array); if (!*value) { Py_DECREF(*name); return -1; @@ -2520,12 +2530,13 @@ static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { unsigned position; unsigned max; int new_position; + int raw_array = 0; PyObject* name; PyObject* value; PyObject* result_tuple; - if (!PyArg_ParseTuple(args, "OIIO&", &bson, &position, &max, - convert_codec_options, &options)) { + if (!PyArg_ParseTuple(args, "OIIO&p", &bson, &position, &max, + convert_codec_options, &options, &raw_array)) { return NULL; } @@ -2535,8 +2546,7 @@ static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { } string = PyBytes_AS_STRING(bson); - new_position = _element_to_dict(self, string, position, max, &options, - &name, &value); + new_position = _element_to_dict(self, string, position, max, &options, raw_array, &name, &value); if (new_position < 0) { return NULL; } @@ -2560,13 +2570,14 @@ static PyObject* _elements_to_dict(PyObject* self, const char* string, if (!dict) { return NULL; } + int raw_array = 0; while (position < max) { PyObject* name = NULL; PyObject* value = NULL; int new_position; new_position = _element_to_dict( - self, string, position, max, options, &name, &value); + self, string, position, max, options, raw_array, &name, &value); if (new_position < 0) { Py_DECREF(dict); return NULL; @@ -2649,7 +2660,6 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { } string = (char*)view.buf; - memcpy(&size, string, 4); size = (int32_t)BSON_UINT32_FROM_LE(size); if (size < BSON_MIN_SIZE) { @@ -2797,6 +2807,124 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { return result; } + +static PyObject* _cbson_array_of_documents_to_buffer(PyObject* self, PyObject* args) { + uint32_t size; + uint32_t value_length; + uint32_t position = 0; + buffer_t buffer; + const char* string; + PyObject* arr; + PyObject* result = NULL; + Py_buffer view = {0}; + + if (!PyArg_ParseTuple(args, "O", &arr)) { + return NULL; + } + + if (!_get_buffer(arr, &view)) { + return NULL; + } + + buffer = pymongo_buffer_new(); + if (!buffer) { + PyBuffer_Release(&view); + return NULL; + } + + string = (char*)view.buf; + + if (view.len < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, + "not enough data for a BSON document"); + Py_DECREF(InvalidBSON); + } + goto done; + } + + memcpy(&size, string, 4); + size = BSON_UINT32_FROM_LE(size); + /* save space for length */ + if (pymongo_buffer_save_space(buffer, size) == -1) { + goto fail; + } + pymongo_buffer_update_position(buffer, 0); + + position += 4; + while (position < size - 1) { + // Verify the value is an object. + unsigned char type = (unsigned char)string[position]; + if (type != 3) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "array element was not an object"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + // Just skip the keys. + position = position + strlen(string + position) + 1; + + if (position >= size || (size - position) < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "invalid array content"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + memcpy(&value_length, string + position, 4); + value_length = BSON_UINT32_FROM_LE(value_length); + if (value_length < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "invalid message size"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + if (view.len < size) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "objsize too large"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + if (string[size - 1]) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "bad eoo"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + if (pymongo_buffer_write(buffer, string + position, value_length) == 1) { + goto fail; + } + position += value_length; + } + + /* objectify buffer */ + result = Py_BuildValue("y#", pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer)); + goto done; +fail: + result = NULL; +done: + PyBuffer_Release(&view); + pymongo_buffer_free(buffer); + return result; +} + + static PyMethodDef _CBSONMethods[] = { {"_dict_to_bson", _cbson_dict_to_bson, METH_VARARGS, "convert a dictionary to a string containing its BSON representation."}, @@ -2806,6 +2934,7 @@ static PyMethodDef _CBSONMethods[] = { "convert binary data to a sequence of documents."}, {"_element_to_dict", _cbson_element_to_dict, METH_VARARGS, "Decode a single key, value pair."}, + {"_array_of_documents_to_buffer", _cbson_array_of_documents_to_buffer, METH_VARARGS, "Convert raw array of documents to a stream of BSON documents"}, {NULL, NULL, 0, NULL} }; diff --git a/bson/raw_bson.py b/bson/raw_bson.py index ca7207f0a2..6a80ea70ca 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -60,6 +60,23 @@ from bson.son import SON +def _inflate_bson( + bson_bytes: bytes, codec_options: CodecOptions, raw_array: bool = False +) -> Mapping[Any, Any]: + """Inflates the top level fields of a BSON document. + + :Parameters: + - `bson_bytes`: the BSON bytes that compose this document + - `codec_options`: An instance of + :class:`~bson.codec_options.CodecOptions` whose ``document_class`` + must be :class:`RawBSONDocument`. + """ + # Use SON to preserve ordering of elements. + return _raw_to_dict( + bson_bytes, 4, len(bson_bytes) - 1, codec_options, SON(), raw_array=raw_array + ) + + class RawBSONDocument(Mapping[str, Any]): """Representation for a MongoDB document that provides access to the raw BSON bytes that compose it. @@ -111,7 +128,7 @@ class from the standard library so it can be used like a read-only # it refers to this class RawBSONDocument. if codec_options is None: codec_options = DEFAULT_RAW_BSON_OPTIONS - elif codec_options.document_class is not RawBSONDocument: + elif not issubclass(codec_options.document_class, RawBSONDocument): raise TypeError( "RawBSONDocument cannot use CodecOptions with document " "class %s" % (codec_options.document_class,) @@ -135,9 +152,13 @@ def __inflated(self) -> Mapping[str, Any]: # We already validated the object's size when this document was # created, so no need to do that again. # Use SON to preserve ordering of elements. - self.__inflated_doc = _inflate_bson(self.__raw, self.__codec_options) + self.__inflated_doc = self._inflate_bson(self.__raw, self.__codec_options) return self.__inflated_doc + @staticmethod + def _inflate_bson(bson_bytes: bytes, codec_options: CodecOptions) -> Mapping[Any, Any]: + return _inflate_bson(bson_bytes, codec_options) + def __getitem__(self, item: str) -> Any: return self.__inflated[item] @@ -153,23 +174,23 @@ def __eq__(self, other: Any) -> bool: return NotImplemented def __repr__(self): - return "RawBSONDocument(%r, codec_options=%r)" % (self.raw, self.__codec_options) + return "%s(%r, codec_options=%r)" % ( + self.__class__.__name__, + self.raw, + self.__codec_options, + ) -def _inflate_bson(bson_bytes: bytes, codec_options: CodecOptions) -> Mapping[Any, Any]: - """Inflates the top level fields of a BSON document. +class _RawArrayBSONDocument(RawBSONDocument): + """A RawBSONDocument that only expands sub-documents and arrays when accessed.""" - :Parameters: - - `bson_bytes`: the BSON bytes that compose this document - - `codec_options`: An instance of - :class:`~bson.codec_options.CodecOptions` whose ``document_class`` - must be :class:`RawBSONDocument`. - """ - # Use SON to preserve ordering of elements. - return _raw_to_dict(bson_bytes, 4, len(bson_bytes) - 1, codec_options, SON()) + @staticmethod + def _inflate_bson(bson_bytes: bytes, codec_options: CodecOptions) -> Mapping[Any, Any]: + return _inflate_bson(bson_bytes, codec_options, raw_array=True) DEFAULT_RAW_BSON_OPTIONS: CodecOptions = DEFAULT.with_options(document_class=RawBSONDocument) +_RAW_ARRAY_BSON_OPTIONS: CodecOptions = DEFAULT.with_options(document_class=_RawArrayBSONDocument) """The default :class:`~bson.codec_options.CodecOptions` for :class:`RawBSONDocument`. """ diff --git a/pymongo/message.py b/pymongo/message.py index 8f37fdc062..960832cb9e 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -29,7 +29,12 @@ import bson from bson import CodecOptions, _decode_selective, _dict_to_bson, _make_c_string, encode from bson.int64 import Int64 -from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson +from bson.raw_bson import ( + _RAW_ARRAY_BSON_OPTIONS, + DEFAULT_RAW_BSON_OPTIONS, + RawBSONDocument, + _inflate_bson, +) from bson.son import SON try: @@ -1379,7 +1384,7 @@ def raw_response(self, cursor_id=None, user_fields={}): # noqa: B006 user_fields is used to determine which fields must not be decoded """ inflated_response = _decode_selective( - RawBSONDocument(self.payload_document), user_fields, DEFAULT_RAW_BSON_OPTIONS + RawBSONDocument(self.payload_document), user_fields, _RAW_ARRAY_BSON_OPTIONS ) return [inflated_response] From 2af12e64639b55c2d8c5d52892d42fab89355220 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 26 Sep 2022 15:48:48 -0700 Subject: [PATCH 0256/1588] PYTHON-3444 MyPy Errors With Version 0.981 (#1063) --- test/test_auth.py | 4 ++-- test/test_bson.py | 15 +++++---------- test/test_change_stream.py | 3 +-- test/test_database.py | 5 ++--- 4 files changed, 10 insertions(+), 17 deletions(-) diff --git a/test/test_auth.py b/test/test_auth.py index 20d53ef24b..69ed27bda0 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -329,8 +329,8 @@ def auth_string(user, password): bad_user = MongoClient(auth_string("not-user", SASL_PASS)) bad_pwd = MongoClient(auth_string(SASL_USER, "not-pwd")) # OperationFailure raised upon connecting. - self.assertRaises(OperationFailure, bad_user.admin.command, "ping") # type: ignore[arg-type] - self.assertRaises(OperationFailure, bad_pwd.admin.command, "ping") # type: ignore[arg-type] + self.assertRaises(OperationFailure, bad_user.admin.command, "ping") + self.assertRaises(OperationFailure, bad_pwd.admin.command, "ping") class TestSCRAMSHA1(IntegrationTest): diff --git a/test/test_bson.py b/test/test_bson.py index e3c4a3a028..a8fd1fef45 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -119,8 +119,7 @@ def tzname(self, dt): class TestBSON(unittest.TestCase): def assertInvalid(self, data): - # Remove type ignore after: https://github.com/python/mypy/issues/13220 - self.assertRaises(InvalidBSON, decode, data) # type: ignore[arg-type] + self.assertRaises(InvalidBSON, decode, data) def check_encode_then_decode(self, doc_class=dict, decoder=decode, encoder=encode): @@ -1029,17 +1028,14 @@ def test_unicode_decode_error_handler(self): # Ensure that strict mode raises an error. for invalid in [invalid_key, invalid_val, invalid_both]: - # Remove type ignore after: https://github.com/python/mypy/issues/13220 self.assertRaises( InvalidBSON, - decode, # type: ignore[arg-type] + decode, invalid, CodecOptions(unicode_decode_error_handler="strict"), ) - self.assertRaises( - InvalidBSON, decode, invalid, CodecOptions() # type: ignore[arg-type] - ) - self.assertRaises(InvalidBSON, decode, invalid) # type: ignore[arg-type] + self.assertRaises(InvalidBSON, decode, invalid, CodecOptions()) + self.assertRaises(InvalidBSON, decode, invalid) # Test all other error handlers. for handler in ["replace", "backslashreplace", "surrogateescape", "ignore"]: @@ -1056,10 +1052,9 @@ def test_unicode_decode_error_handler(self): dec = decode(enc, CodecOptions(unicode_decode_error_handler="junk")) self.assertEqual(dec, {"keystr": "foobar"}) - # Remove type ignore after: https://github.com/python/mypy/issues/13220 self.assertRaises( InvalidBSON, - decode, # type: ignore[arg-type] + decode, invalid_both, CodecOptions(unicode_decode_error_handler="junk"), ) diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 18a0ec84c4..62d7abee62 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -1080,9 +1080,8 @@ def setFailPoint(self, scenario_dict): fail_cmd = SON([("configureFailPoint", "failCommand")]) fail_cmd.update(fail_point) client_context.client.admin.command(fail_cmd) - # Remove type ignore after: https://github.com/python/mypy/issues/13220 self.addCleanup( - client_context.client.admin.command, # type: ignore[arg-type] + client_context.client.admin.command, "configureFailPoint", fail_cmd["configureFailPoint"], mode="off", diff --git a/test/test_database.py b/test/test_database.py index a1c0439089..d49ac8324f 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -604,14 +604,13 @@ def test_command_max_time_ms(self): try: db = self.client.pymongo_test db.command("count", "test") - # Remove type ignore after: https://github.com/python/mypy/issues/13220 - self.assertRaises(ExecutionTimeout, db.command, "count", "test", maxTimeMS=1) # type: ignore[arg-type] + self.assertRaises(ExecutionTimeout, db.command, "count", "test", maxTimeMS=1) pipeline = [{"$project": {"name": 1, "count": 1}}] # Database command helper. db.command("aggregate", "test", pipeline=pipeline, cursor={}) self.assertRaises( ExecutionTimeout, - db.command, # type: ignore[arg-type] + db.command, "aggregate", "test", pipeline=pipeline, From c874c96e29e1b2e19b6bc456eb476e8512e22683 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 27 Sep 2022 15:31:20 -0700 Subject: [PATCH 0257/1588] PYTHON-3232 Improved change stream event visibility for C2C Replication (#1062) --- doc/changelog.rst | 4 + pymongo/aggregation.py | 2 + pymongo/change_stream.py | 7 + pymongo/collection.py | 8 +- pymongo/database.py | 6 + pymongo/mongo_client.py | 6 + .../change-streams-disambiguatedPaths.json | 252 +++++++++ .../change-streams-showExpandedEvents.json | 517 ++++++++++++++++++ test/utils.py | 2 + 9 files changed, 803 insertions(+), 1 deletion(-) create mode 100644 test/change_streams/unified/change-streams-disambiguatedPaths.json create mode 100644 test/change_streams/unified/change-streams-showExpandedEvents.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 24c80efa2e..b8f346e571 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -22,6 +22,10 @@ PyMongo 4.3 brings a number of improvements including: deadlocks are still possible because libraries that PyMongo depends like OpenSSL cannot be made fork() safe in multithreaded applications. (`PYTHON-2484`_). For more info see :ref:`pymongo-fork-safe`. +- When used with MongoDB 6.0+, :class:`~pymongo.change_stream.ChangeStream` s + now allow for new types of events (such as DDL and C2C replication events) + to be recorded with the new parameter ``show_expanded_events`` + that can be passed to methods such as :meth:`~pymongo.collection.Collection.watch`. Bug fixes ......... diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 62fe4bd055..a13f164f53 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -40,6 +40,7 @@ def __init__( user_fields=None, result_processor=None, comment=None, + show_expanded_events=None, ): if "explain" in options: raise ConfigurationError( @@ -60,6 +61,7 @@ def __init__( options["let"] = let if comment is not None: options["comment"] = comment + self._options = options # This is the batchSize that will be used for setting the initial diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 0edf513a3c..775f93c79a 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -109,6 +109,7 @@ def __init__( start_after: Optional[Mapping[str, Any]], comment: Optional[Any] = None, full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, ) -> None: if pipeline is None: pipeline = [] @@ -143,6 +144,7 @@ def __init__( self._comment = comment self._closed = False self._timeout = self._target._timeout + self._show_expanded_events = show_expanded_events # Initialize cursor. self._cursor = self._create_cursor() @@ -175,6 +177,10 @@ def _change_stream_options(self): if self._start_at_operation_time is not None: options["startAtOperationTime"] = self._start_at_operation_time + + if self._show_expanded_events: + options["showExpandedEvents"] = self._show_expanded_events + return options def _command_options(self): @@ -230,6 +236,7 @@ def _run_aggregation_cmd(self, session, explicit_session): explicit_session, result_processor=self._process_result, comment=self._comment, + show_expanded_events=self._show_expanded_events, ) return self._client._retryable_read( cmd.get_cursor, self._target._read_preference_for(session), session diff --git a/pymongo/collection.py b/pymongo/collection.py index 9a9ba56618..8f1afc575d 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2495,6 +2495,7 @@ def watch( start_after: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, ) -> CollectionChangeStream[_DocumentType]: """Watch changes on this collection. @@ -2579,12 +2580,16 @@ def watch( This option and `resume_after` are mutually exclusive. - `comment` (optional): A user-provided comment to attach to this command. + - `show_expanded_events` (optional): Include expanded events such as DDL events like `dropIndexes`. :Returns: A :class:`~pymongo.change_stream.CollectionChangeStream` cursor. + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + .. versionchanged:: 4.2 - Added ``full_document_before_change`` parameter. + Added ``full_document_before_change`` parameter. .. versionchanged:: 4.1 Added ``comment`` parameter. @@ -2615,6 +2620,7 @@ def watch( start_after, comment, full_document_before_change, + show_expanded_events, ) @_csot.apply diff --git a/pymongo/database.py b/pymongo/database.py index 4f87a58dda..59328a1b53 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -547,6 +547,7 @@ def watch( start_after: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, ) -> DatabaseChangeStream[_DocumentType]: """Watch changes on this database. @@ -624,10 +625,14 @@ def watch( This option and `resume_after` are mutually exclusive. - `comment` (optional): A user-provided comment to attach to this command. + - `show_expanded_events` (optional): Include expanded events such as DDL events like `dropIndexes`. :Returns: A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + .. versionchanged:: 4.2 Added ``full_document_before_change`` parameter. @@ -657,6 +662,7 @@ def watch( start_after, comment, full_document_before_change, + show_expanded_events=show_expanded_events, ) def _command( diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 7e4e4f10ca..7d16e58777 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -895,6 +895,7 @@ def watch( start_after: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, ) -> ChangeStream[_DocumentType]: """Watch changes on this cluster. @@ -972,10 +973,14 @@ def watch( This option and `resume_after` are mutually exclusive. - `comment` (optional): A user-provided comment to attach to this command. + - `show_expanded_events` (optional): Include expanded events such as DDL events like `dropIndexes`. :Returns: A :class:`~pymongo.change_stream.ClusterChangeStream` cursor. + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + .. versionchanged:: 4.2 Added ``full_document_before_change`` parameter. @@ -1005,6 +1010,7 @@ def watch( start_after, comment, full_document_before_change, + show_expanded_events=show_expanded_events, ) @property diff --git a/test/change_streams/unified/change-streams-disambiguatedPaths.json b/test/change_streams/unified/change-streams-disambiguatedPaths.json new file mode 100644 index 0000000000..91d8e66da2 --- /dev/null +++ b/test/change_streams/unified/change-streams-disambiguatedPaths.json @@ -0,0 +1,252 @@ +{ + "description": "disambiguatedPaths", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "6.1.0", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "disambiguatedPaths is not present when showExpandedEvents is false/unset", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": { + "1": 1 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "a.1": 2 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": { + "$$exists": true + }, + "removedFields": { + "$$exists": true + }, + "truncatedArrays": { + "$$exists": true + }, + "disambiguatedPaths": { + "$$exists": false + } + } + } + } + ] + }, + { + "description": "disambiguatedPaths is present on updateDescription when an ambiguous path is present", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": { + "1": 1 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "a.1": 2 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": { + "$$exists": true + }, + "removedFields": { + "$$exists": true + }, + "truncatedArrays": { + "$$exists": true + }, + "disambiguatedPaths": { + "a.1": [ + "a", + "1" + ] + } + } + } + } + ] + }, + { + "description": "disambiguatedPaths returns array indices as integers", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": [ + { + "1": 1 + } + ] + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "a.0.1": 2 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": { + "$$exists": true + }, + "removedFields": { + "$$exists": true + }, + "truncatedArrays": { + "$$exists": true + }, + "disambiguatedPaths": { + "a.0.1": [ + "a", + { + "$$type": "int" + }, + "1" + ] + } + } + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-showExpandedEvents.json b/test/change_streams/unified/change-streams-showExpandedEvents.json new file mode 100644 index 0000000000..3eed2f534a --- /dev/null +++ b/test/change_streams/unified/change-streams-showExpandedEvents.json @@ -0,0 +1,517 @@ +{ + "description": "change-streams-showExpandedEvents", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0", + "topologies": [ + "replicaset", + "sharded-replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "collection1" + } + }, + { + "database": { + "id": "shardedDb", + "client": "client0", + "databaseName": "shardedDb" + } + }, + { + "database": { + "id": "adminDb", + "client": "client0", + "databaseName": "admin" + } + }, + { + "collection": { + "id": "shardedCollection", + "database": "shardedDb", + "collectionName": "shardedCollection" + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "when provided, showExpandedEvents is sent as a part of the aggregate command", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "showExpandedEvents": true + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "when omitted, showExpandedEvents is not sent as a part of the aggregate command", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "showExpandedEvents": { + "$$exists": false + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "when showExpandedEvents is true, new fields on change stream events are handled appropriately", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "a": 1 + } + } + }, + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "rename", + "object": "collection0", + "arguments": { + "to": "foo", + "dropTarget": true + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "collectionUUID": { + "$$exists": true + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "createIndexes", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "operationDescription": { + "$$exists": true + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "rename", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "to": { + "db": "database0", + "coll": "foo" + }, + "operationDescription": { + "dropTarget": { + "$$exists": true + }, + "to": { + "db": "database0", + "coll": "foo" + } + } + } + } + ] + }, + { + "description": "when showExpandedEvents is true, createIndex events are reported", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "operationType": { + "$ne": "create" + } + } + } + ], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "createIndexes" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, dropIndexes events are reported", + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "dropIndex", + "object": "collection0", + "arguments": { + "name": "x_1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "dropIndexes" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, create events are reported", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, create events on views are reported", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo", + "viewOn": "testName" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, modify events are reported", + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_2" + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "command": { + "collMod": "collection0" + }, + "commandName": "collMod" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "modify" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, shardCollection events are reported", + "runOnRequirements": [ + { + "topologies": [ + "sharded-replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "shardedDb", + "arguments": { + "collection": "shardedCollection" + } + }, + { + "name": "createCollection", + "object": "shardedDb", + "arguments": { + "collection": "shardedCollection" + } + }, + { + "name": "createChangeStream", + "object": "shardedCollection", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "runCommand", + "object": "adminDb", + "arguments": { + "command": { + "shardCollection": "shardedDb.shardedCollection", + "key": { + "_id": 1 + } + }, + "commandName": "shardCollection" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "shardCollection" + } + } + ] + } + ] +} diff --git a/test/utils.py b/test/utils.py index 1ac726d2d4..33a594d15a 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1127,6 +1127,8 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac arguments["index_or_name"] = arguments.pop(arg_name) elif opname == "rename" and arg_name == "to": arguments["new_name"] = arguments.pop(arg_name) + elif opname == "rename" and arg_name == "dropTarget": + arguments["dropTarget"] = arguments.pop(arg_name) elif arg_name == "cursorType": cursor_type = arguments.pop(arg_name) if cursor_type == "tailable": From c9ac5a5cf8816893376912ca9d5fc024fbf28e03 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 28 Sep 2022 16:00:15 -0700 Subject: [PATCH 0258/1588] PYTHON-3447 Add back empty tls extra to avoid pip warnings (#1065) --- .github/workflows/test-python.yml | 2 +- README.rst | 2 +- doc/installation.rst | 2 +- setup.py | 3 ++- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 6d5f26c503..d451197e4e 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -60,7 +60,7 @@ jobs: - name: Install dependencies run: | python -m pip install -U pip mypy - pip install -e ".[zstd, srv, encryption, ocsp]" + pip install -e ".[zstd, encryption, ocsp]" - name: Run mypy run: | mypy --install-types --non-interactive bson gridfs tools pymongo diff --git a/README.rst b/README.rst index 576facb5b5..f15ac48098 100644 --- a/README.rst +++ b/README.rst @@ -137,7 +137,7 @@ Client-Side Field Level Encryption requires `pymongocrypt You can install all dependencies automatically with the following command:: - $ python -m pip install "pymongo[gssapi,aws,ocsp,snappy,srv,tls,zstd,encryption]" + $ python -m pip install "pymongo[gssapi,aws,ocsp,snappy,zstd,encryption]" Additional dependencies are: diff --git a/doc/installation.rst b/doc/installation.rst index 4355f771eb..b02949335b 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -77,7 +77,7 @@ Wire protocol compression with zstandard requires `zstandard You can install all dependencies automatically with the following command:: - $ python3 -m pip install "pymongo[gssapi,aws,ocsp,snappy,srv,zstd,encryption]" + $ python3 -m pip install "pymongo[gssapi,aws,ocsp,snappy,zstd,encryption]" Installing from source ---------------------- diff --git a/setup.py b/setup.py index 524c1303e6..2706facf90 100755 --- a/setup.py +++ b/setup.py @@ -281,7 +281,8 @@ def build_extension(self, ext): "snappy": ["python-snappy"], "zstd": ["zstandard"], "aws": ["pymongo-auth-aws<2.0.0"], - "srv": [], + "srv": [], # PYTHON-3423 Removed in 4.3 but kept here to avoid pip warnings. + "tls": [], # PYTHON-2133 Removed in 4.0 but kept here to avoid pip warnings. } # GSSAPI extras From 64d7d6da8af8bf10a8d1a58482a610fe64507d38 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 9 Mar 2022 11:13:18 -0800 Subject: [PATCH 0259/1588] PYTHON-2970 Prioritize electionId over setVersion for stale primary check (#845) --- doc/changelog.rst | 3 + pymongo/topology_description.py | 29 ++-- .../rs/electionId_precedence_setVersion.json | 92 +++++++++++ .../rs/null_election_id.json | 30 ++-- .../rs/secondary_ignore_ok_0.json | 2 +- .../rs/set_version_can_rollback.json | 149 ++++++++++++++++++ ...tversion_equal_max_without_electionid.json | 84 ++++++++++ ...on_greaterthan_max_without_electionid.json | 84 ++++++++++ .../rs/setversion_without_electionid.json | 12 +- .../rs/use_setversion_without_electionid.json | 32 ++-- 10 files changed, 467 insertions(+), 50 deletions(-) create mode 100644 test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json create mode 100644 test/discovery_and_monitoring/rs/set_version_can_rollback.json create mode 100644 test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json create mode 100644 test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json diff --git a/doc/changelog.rst b/doc/changelog.rst index b8f346e571..eb9d1233bb 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -33,6 +33,8 @@ Bug fixes - Fixed a bug where :class:`~pymongo.change_stream.ChangeStream` would allow an app to retry calling ``next()`` or ``try_next()`` even after non-resumable errors (`PYTHON-3389`_). +- Fixed a bug where the client could be unable to discover the new primary + after a simultaneous replica set election and reconfig (`PYTHON-2970`_). Issues Resolved ............... @@ -42,6 +44,7 @@ in this release. .. _PYTHON-1824: https://jira.mongodb.org/browse/PYTHON-1824 .. _PYTHON-2484: https://jira.mongodb.org/browse/PYTHON-2484 +.. _PYTHON-2970: https://jira.mongodb.org/browse/PYTHON-2970 .. _PYTHON-3389: https://jira.mongodb.org/browse/PYTHON-3389 .. _PyMongo 4.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33425 diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index b32a86e2d7..552d8f719a 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -17,6 +17,7 @@ from random import sample from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple +from bson.min_key import MinKey from bson.objectid import ObjectId from pymongo import common from pymongo.errors import ConfigurationError @@ -532,24 +533,16 @@ def _update_rs_from_primary( sds.pop(server_description.address) return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) - max_election_tuple = max_set_version, max_election_id - if None not in server_description.election_tuple: - if ( - None not in max_election_tuple - and max_election_tuple > server_description.election_tuple - ): - - # Stale primary, set to type Unknown. - sds[server_description.address] = server_description.to_unknown() - return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) - - max_election_id = server_description.election_id - - if server_description.set_version is not None and ( - max_set_version is None or server_description.set_version > max_set_version - ): - - max_set_version = server_description.set_version + new_election_tuple = server_description.election_id, server_description.set_version + max_election_tuple = max_election_id, max_set_version + new_election_safe = tuple(MinKey() if i is None else i for i in new_election_tuple) + max_election_safe = tuple(MinKey() if i is None else i for i in max_election_tuple) + if new_election_safe >= max_election_safe: + max_election_id, max_set_version = new_election_tuple + else: + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown() + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id # We've heard from the primary. Is it the same primary as before? for server in sds.values(): diff --git a/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json new file mode 100644 index 0000000000..a7b49e2b97 --- /dev/null +++ b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json @@ -0,0 +1,92 @@ +{ + "description": "ElectionId is considered higher precedence than setVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "setVersion": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/null_election_id.json b/test/discovery_and_monitoring/rs/null_election_id.json index 62120e8448..8eb519595a 100644 --- a/test/discovery_and_monitoring/rs/null_election_id.json +++ b/test/discovery_and_monitoring/rs/null_election_id.json @@ -123,16 +123,19 @@ "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - }, - "b:27017": { "type": "Unknown", "setName": null, + "setVersion": null, "electionId": null }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, "c:27017": { "type": "Unknown", "setName": null, @@ -174,16 +177,19 @@ "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - }, - "b:27017": { "type": "Unknown", "setName": null, + "setVersion": null, "electionId": null }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, "c:27017": { "type": "Unknown", "setName": null, diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json index 4c1cb011a5..ee9519930b 100644 --- a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json @@ -1,5 +1,5 @@ { - "description": "New primary", + "description": "Secondary ignored when ok is zero", "uri": "mongodb://a,b/?replicaSet=rs", "phases": [ { diff --git a/test/discovery_and_monitoring/rs/set_version_can_rollback.json b/test/discovery_and_monitoring/rs/set_version_can_rollback.json new file mode 100644 index 0000000000..28ecbeefca --- /dev/null +++ b/test/discovery_and_monitoring/rs/set_version_can_rollback.json @@ -0,0 +1,149 @@ +{ + "description": "Set version rolls back after new primary with higher election Id", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "hello": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "_comment": "Response from new primary with newer election Id", + "responses": [ + [ + "b:27017", + { + "ok": 1, + "hello": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "_comment": "Response from stale primary", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "hello": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json new file mode 100644 index 0000000000..91e84d4fa0 --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json @@ -0,0 +1,84 @@ +{ + "description": "setVersion version that is equal is treated the same as greater than if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json new file mode 100644 index 0000000000..b15fd5c1a7 --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json @@ -0,0 +1,84 @@ +{ + "description": "setVersion that is greater than maxSetVersion is used if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_without_electionid.json index 2f68287f1d..f59c162ae1 100644 --- a/test/discovery_and_monitoring/rs/setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid.json @@ -1,5 +1,5 @@ { - "description": "setVersion is ignored if there is no electionId", + "description": "setVersion that is less than maxSetVersion is ignored if there is no electionId", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -63,14 +63,14 @@ "outcome": { "servers": { "a:27017": { - "type": "Unknown", - "setName": null, + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, "electionId": null }, "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, + "type": "Unknown", + "setName": null, "electionId": null } }, diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json index 421ff57c8d..6dd753d5d8 100644 --- a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json @@ -71,20 +71,23 @@ "outcome": { "servers": { "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { "type": "Unknown", "setName": null, "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 2 } }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, "setName": "rs", - "maxSetVersion": 2, + "maxSetVersion": 1, "maxElectionId": { "$oid": "000000000000000000000001" } @@ -115,22 +118,25 @@ "outcome": { "servers": { "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "b:27017": { "type": "Unknown", "setName": null, "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 2 } }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, "setName": "rs", - "maxSetVersion": 2, + "maxSetVersion": 1, "maxElectionId": { - "$oid": "000000000000000000000001" + "$oid": "000000000000000000000002" } } } From 85f0987e1d6609ea71d5d6840add033608d22877 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 29 Sep 2022 14:31:47 -0700 Subject: [PATCH 0260/1588] PYTHON-3400 Only use new electionId/setVersion logic on 6.0+ --- pymongo/topology_description.py | 38 +++- .../rs/electionId_precedence_setVersion.json | 6 +- .../rs/null_election_id-pre-6.0.json | 203 ++++++++++++++++++ .../rs/null_election_id.json | 8 +- .../rs/secondary_ignore_ok_0-pre-6.0.json | 83 +++++++ .../rs/set_version_can_rollback.json | 14 +- ...tversion_equal_max_without_electionid.json | 4 +- ...on_greaterthan_max_without_electionid.json | 4 +- ...setversion_without_electionid-pre-6.0.json | 84 ++++++++ .../rs/setversion_without_electionid.json | 4 +- ...setversion_without_electionid-pre-6.0.json | 138 ++++++++++++ .../rs/use_setversion_without_electionid.json | 6 +- .../rediscover-quickly-after-step-down.json | 2 +- test/utils.py | 5 +- 14 files changed, 559 insertions(+), 40 deletions(-) create mode 100644 test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json create mode 100644 test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json create mode 100644 test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json create mode 100644 test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 552d8f719a..df11a6ec75 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -531,19 +531,35 @@ def _update_rs_from_primary( # We found a primary but it doesn't have the replica_set_name # provided by the user. sds.pop(server_description.address) - return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) - - new_election_tuple = server_description.election_id, server_description.set_version - max_election_tuple = max_election_id, max_set_version - new_election_safe = tuple(MinKey() if i is None else i for i in new_election_tuple) - max_election_safe = tuple(MinKey() if i is None else i for i in max_election_tuple) - if new_election_safe >= max_election_safe: - max_election_id, max_set_version = new_election_tuple - else: - # Stale primary, set to type Unknown. - sds[server_description.address] = server_description.to_unknown() return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + if server_description.max_wire_version is None or server_description.max_wire_version < 17: + new_election_tuple = server_description.set_version, server_description.election_id + max_election_tuple = max_set_version, max_election_id + if None not in new_election_tuple: + if None not in max_election_tuple and new_election_tuple < max_election_tuple: + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown() + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + max_election_id = server_description.election_id + + if server_description.set_version is not None and ( + max_set_version is None or server_description.set_version > max_set_version + ): + max_set_version = server_description.set_version + else: + new_election_tuple = server_description.election_id, server_description.set_version + max_election_tuple = max_election_id, max_set_version + new_election_safe = tuple(MinKey() if i is None else i for i in new_election_tuple) + max_election_safe = tuple(MinKey() if i is None else i for i in max_election_tuple) + if new_election_safe < max_election_safe: + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown() + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + else: + max_election_id = server_description.election_id + max_set_version = server_description.set_version + # We've heard from the primary. Is it the same primary as before? for server in sds.values(): if ( diff --git a/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json index a7b49e2b97..2fcea2bf66 100644 --- a/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json +++ b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ], [ @@ -39,7 +39,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ], [ @@ -58,7 +58,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], diff --git a/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json b/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json new file mode 100644 index 0000000000..f1fa2e252e --- /dev/null +++ b/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json @@ -0,0 +1,203 @@ +{ + "description": "Pre 6.0 Primaries with and without electionIds", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setVersion": 1, + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setVersion": 1, + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "c:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/null_election_id.json b/test/discovery_and_monitoring/rs/null_election_id.json index 8eb519595a..8a99a78475 100644 --- a/test/discovery_and_monitoring/rs/null_election_id.json +++ b/test/discovery_and_monitoring/rs/null_election_id.json @@ -18,7 +18,7 @@ "setVersion": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -66,7 +66,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -116,7 +116,7 @@ "setVersion": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -170,7 +170,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json new file mode 100644 index 0000000000..054425c84c --- /dev/null +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json @@ -0,0 +1,83 @@ +{ + "description": "Pre 6.0 New primary", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 0, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/set_version_can_rollback.json b/test/discovery_and_monitoring/rs/set_version_can_rollback.json index 28ecbeefca..1cc608a344 100644 --- a/test/discovery_and_monitoring/rs/set_version_can_rollback.json +++ b/test/discovery_and_monitoring/rs/set_version_can_rollback.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "hello": true, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -50,13 +50,12 @@ } }, { - "_comment": "Response from new primary with newer election Id", "responses": [ [ "b:27017", { "ok": 1, - "hello": true, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -68,7 +67,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -98,13 +97,12 @@ } }, { - "_comment": "Response from stale primary", "responses": [ [ "a:27017", { "ok": 1, - "hello": true, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -116,7 +114,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], diff --git a/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json index 91e84d4fa0..3669511c5a 100644 --- a/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json @@ -17,7 +17,7 @@ "setName": "rs", "setVersion": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -56,7 +56,7 @@ "setName": "rs", "setVersion": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], diff --git a/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json index b15fd5c1a7..97870d71d5 100644 --- a/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json @@ -17,7 +17,7 @@ "setName": "rs", "setVersion": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -56,7 +56,7 @@ "setName": "rs", "setVersion": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json b/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json new file mode 100644 index 0000000000..c2e2fe5b9b --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json @@ -0,0 +1,84 @@ +{ + "description": "Pre 6.0 setVersion is ignored if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_without_electionid.json index f59c162ae1..256fafe108 100644 --- a/test/discovery_and_monitoring/rs/setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid.json @@ -17,7 +17,7 @@ "setName": "rs", "setVersion": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -56,7 +56,7 @@ "setName": "rs", "setVersion": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json new file mode 100644 index 0000000000..5c58b65614 --- /dev/null +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json @@ -0,0 +1,138 @@ +{ + "description": "Pre 6.0 Record max setVersion, even from primary without electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2 + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2 + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json index 6dd753d5d8..551f3e12c2 100644 --- a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -64,7 +64,7 @@ "setName": "rs", "setVersion": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -111,7 +111,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], diff --git a/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json b/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json index 0ad575cc9d..c7c2494857 100644 --- a/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json +++ b/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json @@ -117,7 +117,7 @@ "replSetFreeze": 0 }, "readPreference": { - "mode": "Secondary" + "mode": "secondary" }, "commandName": "replSetFreeze" } diff --git a/test/utils.py b/test/utils.py index 33a594d15a..6f35b48538 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1004,10 +1004,7 @@ def assertion_context(msg): try: yield except AssertionError as exc: - msg = "%s (%s)" % (exc, msg) - exc_type, exc_val, exc_tb = sys.exc_info() - assert exc_type is not None - raise exc_type(exc_val).with_traceback(exc_tb) + raise AssertionError(f"{msg}: {exc}") def parse_spec_options(opts): From 774154e934509dd0c7d854f6491304a295845dd6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 5 Oct 2022 14:34:59 -0700 Subject: [PATCH 0261/1588] PYTHON-3451 Stop passing bytes to getaddrinfo to fix eventlet support (#1066) --- pymongo/pool.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index 88f56b16e5..6355692ac9 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -979,11 +979,9 @@ def _create_connection(address, options): This is a modified version of create_connection from CPython >= 2.7. """ host, port = address - # Avoid the getaddrinfo importlib deadlock on fork() described in PYTHON-3406. - host = host.encode("idna") # Check if dealing with a unix domain socket - if host.endswith(b".sock"): + if host.endswith(".sock"): if not hasattr(socket, "AF_UNIX"): raise ConnectionFailure("UNIX-sockets are not supported on this system") sock = socket.socket(socket.AF_UNIX) @@ -1000,7 +998,7 @@ def _create_connection(address, options): # is 'localhost' (::1 is fine). Avoids slow connect issues # like PYTHON-356. family = socket.AF_INET - if socket.has_ipv6 and host != b"localhost": + if socket.has_ipv6 and host != "localhost": family = socket.AF_UNSPEC err = None From eaf0e6d84f41136ce32f1aa61a8cb48b5895cfaa Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 7 Oct 2022 12:05:20 -0700 Subject: [PATCH 0262/1588] PYTHON-3445 Improve documentation for custom readPreference tags (#1068) --- README.rst | 2 +- RELEASE.rst | 2 +- pymongo/read_preferences.py | 13 ++++++++++++- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index f15ac48098..115085ac13 100644 --- a/README.rst +++ b/README.rst @@ -25,7 +25,7 @@ For issues with, questions about, or feedback for PyMongo, please look into our `support channels `_. Please do not email any of the PyMongo developers directly with issues or questions - you're more likely to get an answer on the `MongoDB Community -Forums `_. +Forums `_. Bugs / Feature Requests ======================= diff --git a/RELEASE.rst b/RELEASE.rst index ad18446a0f..83c6c0f1d4 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -84,7 +84,7 @@ Doing a Release 13. Publish the release version in Jira. 14. Announce the release on: - https://developer.mongodb.com/community/forums/c/community/release-notes/ + https://www.mongodb.com/community/forums/c/announcements/driver-releases/110 15. File a ticket for DOCSP highlighting changes in server version and Python version compatibility or the lack thereof, for example: diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index ccb635bec0..46f029ed31 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -151,6 +151,13 @@ def tag_sets(self) -> _TagSets: set, ``{}``, means "read from any member that matches the mode, ignoring tags." MongoClient tries each set of tags in turn until it finds a set of tags with at least one matching member. + For example, to only send a query to an analytic node:: + + Nearest(tag_sets=[{"node":"analytics"}]) + + Or using :class:`SecondaryPreferred`:: + + SecondaryPreferred(tag_sets=[{"node":"analytics"}]) .. seealso:: `Data-Center Awareness `_ @@ -518,7 +525,11 @@ def make_read_preference( class ReadPreference(object): - """An enum that defines the read preference modes supported by PyMongo. + """An enum that defines some commonly used read preference modes. + + Apps can also create a custom read preference, for example:: + + Nearest(tag_sets=[{"node":"analytics"}]) See :doc:`/examples/high_availability` for code examples. From 8abeb882b4ebd6f72ba2cf032ac545d70407a3b7 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 10 Oct 2022 12:48:42 -0700 Subject: [PATCH 0263/1588] PYTHON-3452 Skip SDAM test that relies on retryWrites on MMAPv1 (#1071) --- test/unified_format.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/unified_format.py b/test/unified_format.py index 3f51c335eb..005e91f6b6 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -936,6 +936,7 @@ def maybe_skip_test(self, spec): if ( "Dirty explicit session is discarded" in spec["description"] or "Dirty implicit session is discarded" in spec["description"] + or "Cancel server check" in spec["description"] ): self.skipTest("MMAPv1 does not support retryWrites=True") if "Client side error in command starting transaction" in spec["description"]: From 24a343b830e7382576691712873893902159ee4f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 11 Oct 2022 14:14:59 -0500 Subject: [PATCH 0264/1588] PYTHON-3468 Test failures in test_srv_polling.TestSrvPolling (#1073) --- test/test_srv_polling.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 0b54171dc9..7a6c61ad21 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -130,7 +130,14 @@ def assert_nodelist_nochange(self, expected_nodelist, client): (WAIT_TIME * 10) seconds. Also check that the resolver is called at least once. """ - sleep(WAIT_TIME * 10) + + def predicate(): + if set(expected_nodelist) == set(self.get_nodelist(client)): + return pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count >= 1 + return False + + wait_until(predicate, "Node list equals expected nodelist", timeout=100 * WAIT_TIME) + nodelist = self.get_nodelist(client) if set(expected_nodelist) != set(nodelist): msg = "Client nodelist %s changed unexpectedly (expected %s)" From 775c0203ca0df215b19b6d2e352e128dea14582d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 11 Oct 2022 14:16:08 -0500 Subject: [PATCH 0265/1588] PYTHON-3453 Test failure - Enterprise Auth Windows 64 Python 3.7 (#1072) --- .evergreen/config.yml | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 9d016f4d8a..3047ab475c 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -493,8 +493,27 @@ functions: silent: true working_dir: "src" script: | - # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) - PYTHON_BINARY=${PYTHON_BINARY} SASL_HOST=${sasl_host} SASL_PORT=${sasl_port} SASL_USER=${sasl_user} SASL_PASS=${sasl_pass} SASL_DB=${sasl_db} PRINCIPAL=${principal} GSSAPI_DB=${gssapi_db} KEYTAB_BASE64=${keytab_base64} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-enterprise-auth-tests.sh + cat < prepare_enterprise_auth.sh + export SASL_HOST=${sasl_host} + export SASL_PORT=${sasl_port} + export SASL_USER=${sasl_user} + export SASL_PASS=${sasl_pass} + export SASL_DB=${sasl_db} + export PRINCIPAL=${principal} + export GSSAPI_DB=${gssapi_db} + export KEYTAB_BASE64=${keytab_base64} + EOT + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + # Disable xtrace (just in case it was accidentally set). + set +x + . ./prepare_enterprise_auth.sh + rm -f ./prepare_enterprise_auth.sh + + PYTHON_BINARY=${PYTHON_BINARY} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-enterprise-auth-tests.sh "run atlas tests": - command: shell.exec From 4a5e0f6655c2ecf0e807a9614dd2b166c0e4e4f2 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 12 Oct 2022 10:21:06 -0500 Subject: [PATCH 0266/1588] PYTHON-3313 Cache AWS Credentials Where Possible (#982) --- .evergreen/run-mongodb-aws-ecs-test.sh | 2 +- .evergreen/run-mongodb-aws-test.sh | 2 +- doc/changelog.rst | 3 ++ pymongo/auth_aws.py | 17 ++++++++ test/auth_aws/test_auth_aws.py | 58 ++++++++++++++++++++++++++ 5 files changed, 80 insertions(+), 2 deletions(-) diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index 83f3975e9e..fcadea208c 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -30,7 +30,7 @@ authtest () { $PYTHON -m pip install --upgrade wheel setuptools pip cd src $PYTHON -m pip install '.[aws]' - $PYTHON test/auth_aws/test_auth_aws.py + $PYTHON test/auth_aws/test_auth_aws.py -v cd - } diff --git a/.evergreen/run-mongodb-aws-test.sh b/.evergreen/run-mongodb-aws-test.sh index 9a33507cc8..b2a4fd146a 100755 --- a/.evergreen/run-mongodb-aws-test.sh +++ b/.evergreen/run-mongodb-aws-test.sh @@ -61,7 +61,7 @@ authtest () { . venvaws/bin/activate fi python -m pip install '.[aws]' - python test/auth_aws/test_auth_aws.py + python test/auth_aws/test_auth_aws.py -v deactivate rm -rf venvaws } diff --git a/doc/changelog.rst b/doc/changelog.rst index eb9d1233bb..c11ac95888 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -26,6 +26,9 @@ PyMongo 4.3 brings a number of improvements including: now allow for new types of events (such as DDL and C2C replication events) to be recorded with the new parameter ``show_expanded_events`` that can be passed to methods such as :meth:`~pymongo.collection.Collection.watch`. +- PyMongo now internally caches AWS credentials that it fetches from AWS + endpoints, to avoid rate limitations. The cache is cleared when the + credentials expire or an error is encountered. Bug fixes ......... diff --git a/pymongo/auth_aws.py b/pymongo/auth_aws.py index 4b2af35ea4..e84465ea66 100644 --- a/pymongo/auth_aws.py +++ b/pymongo/auth_aws.py @@ -27,6 +27,17 @@ def __init__(self, credentials): _HAVE_MONGODB_AWS = False +try: + from pymongo_auth_aws.auth import set_cached_credentials, set_use_cached_credentials + + # Enable credential caching. + set_use_cached_credentials(True) +except ImportError: + + def set_cached_credentials(creds): + pass + + import bson from bson.binary import Binary from bson.son import SON @@ -88,7 +99,13 @@ def _authenticate_aws(credentials, sock_info): # SASL complete. break except PyMongoAuthAwsError as exc: + # Clear the cached credentials if we hit a failure in auth. + set_cached_credentials(None) # Convert to OperationFailure and include pymongo-auth-aws version. raise OperationFailure( "%s (pymongo-auth-aws version %s)" % (exc, pymongo_auth_aws.__version__) ) + except Exception: + # Clear the cached credentials if we hit a failure in auth. + set_cached_credentials(None) + raise diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index a63e60718c..372806bd24 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -20,6 +20,8 @@ sys.path[0:0] = [""] +from pymongo_auth_aws import AwsCredential, auth + from pymongo import MongoClient from pymongo.errors import OperationFailure from pymongo.uri_parser import parse_uri @@ -53,6 +55,62 @@ def test_connect_uri(self): with MongoClient(self.uri) as client: client.get_database().test.find_one() + def setup_cache(self): + if os.environ.get("AWS_ACCESS_KEY_ID", None) or "@" in self.uri: + self.skipTest("Not testing cached credentials") + if not hasattr(auth, "set_cached_credentials"): + self.skipTest("Cached credentials not available") + + # Ensure cleared credentials. + auth.set_cached_credentials(None) + self.assertEqual(auth.get_cached_credentials(), None) + + client = MongoClient(self.uri) + client.get_database().test.find_one() + client.close() + return auth.get_cached_credentials() + + def test_cache_credentials(self): + creds = self.setup_cache() + self.assertIsNotNone(creds) + + def test_cache_about_to_expire(self): + creds = self.setup_cache() + client = MongoClient(self.uri) + self.addCleanup(client.close) + + # Make the creds about to expire. + creds = auth.get_cached_credentials() + assert creds is not None + + creds = AwsCredential(creds.username, creds.password, creds.token, lambda x: True) + auth.set_cached_credentials(creds) + + client.get_database().test.find_one() + new_creds = auth.get_cached_credentials() + self.assertNotEqual(creds, new_creds) + + def test_poisoned_cache(self): + creds = self.setup_cache() + + client = MongoClient(self.uri) + self.addCleanup(client.close) + + # Poison the creds with invalid password. + assert creds is not None + creds = AwsCredential("a" * 24, "b" * 24, "c" * 24) + auth.set_cached_credentials(creds) + + with self.assertRaises(OperationFailure): + client.get_database().test.find_one() + + # Make sure the cache was cleared. + self.assertEqual(auth.get_cached_credentials(), None) + + # The next attempt should generate a new cred and succeed. + client.get_database().test.find_one() + self.assertNotEqual(auth.get_cached_credentials(), None) + class TestAWSLambdaExamples(unittest.TestCase): def test_shared_client(self): From 438539eaa2db753d6f5ebee3cf3cba3a6d530e21 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 12 Oct 2022 13:59:51 -0700 Subject: [PATCH 0267/1588] PYTHON-3445 Improve documentation for with_options (#1074) --- pymongo/database.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/database.py b/pymongo/database.py index 59328a1b53..d28578b4dd 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -162,11 +162,11 @@ def with_options( >>> db1.read_preference Primary() >>> from pymongo import ReadPreference - >>> db2 = db1.with_options(read_preference=ReadPreference.SECONDARY) + >>> db2 = db1.with_options(read_preference=Secondary([{'node': 'analytics'}])) >>> db1.read_preference Primary() >>> db2.read_preference - Secondary(tag_sets=None) + Secondary(tag_sets=[{'node': 'analytics'}], max_staleness=-1, hedge=None) :Parameters: - `codec_options` (optional): An instance of From 4e11bdaa3e27c2130db004a58751feb43fe9fa14 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 12 Oct 2022 15:30:37 -0700 Subject: [PATCH 0268/1588] PYTHON-3445 Fix documentation for with_options (#1075) --- pymongo/database.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/database.py b/pymongo/database.py index d28578b4dd..259c22d558 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -161,7 +161,7 @@ def with_options( >>> db1.read_preference Primary() - >>> from pymongo import ReadPreference + >>> from pymongo.read_preferences import Secondary >>> db2 = db1.with_options(read_preference=Secondary([{'node': 'analytics'}])) >>> db1.read_preference Primary() From df77653ccc6b1bbfa18f8fca41cd4a1bfecc25c6 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 13 Oct 2022 06:09:23 -0500 Subject: [PATCH 0269/1588] PYTHON-3347 Test against Python 3.11 prerelease (#1069) --- .evergreen/build-mac.sh | 1 + .evergreen/build-manylinux-internal.sh | 2 +- .evergreen/build-manylinux.sh | 3 ++- .evergreen/build-windows.sh | 2 +- .evergreen/config.yml | 12 ++++++++++++ bson/codec_options.py | 2 +- pymongo/topology_description.py | 2 +- setup.py | 1 + test/test_client.py | 3 ++- 9 files changed, 22 insertions(+), 6 deletions(-) diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index 270c92b59a..60846ae92a 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -9,6 +9,7 @@ mkdir -p validdist mv dist/* validdist || true VERSION=${VERSION:-3.10} + PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 rm -rf build diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh index 4fd43a67a3..7c3747f4e2 100755 --- a/.evergreen/build-manylinux-internal.sh +++ b/.evergreen/build-manylinux-internal.sh @@ -11,7 +11,7 @@ mv dist/* validdist || true # Compile wheels for PYTHON in /opt/python/*/bin/python; do - if [[ ! $PYTHON =~ (cp37|cp38|cp39|cp310) ]]; then + if [[ ! $PYTHON =~ (cp37|cp38|cp39|cp310|cp311) ]]; then continue fi # https://github.com/pypa/manylinux/issues/49 diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index cac435fb11..871151a5f3 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -37,7 +37,8 @@ unexpected=$(find dist \! \( -iname dist -or \ -iname '*cp37*' -or \ -iname '*cp38*' -or \ -iname '*cp39*' -or \ - -iname '*cp310*' \)) + -iname '*cp310*' -or \ + -iname '*cp311*' \)) if [ -n "$unexpected" ]; then echo "Unexpected files:" $unexpected exit 1 diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh index 09f5e7f0b4..aeb16892b1 100755 --- a/.evergreen/build-windows.sh +++ b/.evergreen/build-windows.sh @@ -8,7 +8,7 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 37 38 39 310; do +for VERSION in 37 38 39 310 311; do _pythons=("C:/Python/Python${VERSION}/python.exe" \ "C:/Python/32/Python${VERSION}/python.exe") for PYTHON in "${_pythons[@]}"; do diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 3047ab475c..d824b68f5c 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2080,6 +2080,10 @@ axes: display_name: "Python 3.10" variables: PYTHON_BINARY: "/opt/python/3.10/bin/python3" + - id: "3.11" + display_name: "Python 3.11" + variables: + PYTHON_BINARY: "/opt/python/3.11/bin/python3" - id: "pypy3.7" display_name: "PyPy 3.7" variables: @@ -2116,6 +2120,10 @@ axes: display_name: "Python 3.10" variables: PYTHON_BINARY: "C:/python/Python310/python.exe" + - id: "3.11" + display_name: "Python 3.11" + variables: + PYTHON_BINARY: "C:/python/Python311/python.exe" - id: python-version-windows-32 display_name: "Python" @@ -2136,6 +2144,10 @@ axes: display_name: "32-bit Python 3.10" variables: PYTHON_BINARY: "C:/python/32/Python310/python.exe" + - id: "3.11" + display_name: "32-bit Python 3.11" + variables: + PYTHON_BINARY: "C:/python/32/Python311/python.exe" # Choice of mod_wsgi version - id: mod-wsgi-version diff --git a/bson/codec_options.py b/bson/codec_options.py index efba8af78d..3c0a976a1b 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -199,7 +199,7 @@ def __eq__(self, other: Any) -> Any: ) -class DatetimeConversion(enum.IntEnum): +class DatetimeConversion(int, enum.Enum): """Options for decoding BSON datetimes.""" DATETIME = 1 diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index df11a6ec75..7503a72704 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -495,7 +495,7 @@ def _updated_topology_description_srv_polling(topology_description, seedlist): new_hosts = set(seedlist) - set(sds.keys()) n_to_add = topology_description.srv_max_hosts - len(sds) if n_to_add > 0: - seedlist = sample(new_hosts, min(n_to_add, len(new_hosts))) + seedlist = sample(sorted(new_hosts), min(n_to_add, len(new_hosts))) else: seedlist = [] # Add SDs corresponding to servers recently added to the SRV record. diff --git a/setup.py b/setup.py index 2706facf90..d895bf7dec 100755 --- a/setup.py +++ b/setup.py @@ -331,6 +331,7 @@ def build_extension(self, ext): "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database", diff --git a/test/test_client.py b/test/test_client.py index 7e7e14c0e5..5bb116dbda 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1641,7 +1641,8 @@ def server_description_count(): # If a bug like PYTHON-2433 is reintroduced then too many # ServerDescriptions will be kept alive and this test will fail: # AssertionError: 19 != 46 within 15 delta (27 difference) - self.assertAlmostEqual(initial_count, final_count, delta=15) + # On Python 3.11 we seem to get more of a delta. + self.assertAlmostEqual(initial_count, final_count, delta=20) @client_context.require_failCommand_fail_point def test_network_error_message(self): From f3fc409e2476684d8efec84a15717c4467bf466a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 13 Oct 2022 16:44:43 -0500 Subject: [PATCH 0270/1588] PYTHON-3474 Document changes to AWS Credential Handling (#1077) --- doc/changelog.rst | 4 ++++ doc/examples/authentication.rst | 19 +++++++++++++------ 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index c11ac95888..279c535180 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -29,6 +29,10 @@ PyMongo 4.3 brings a number of improvements including: - PyMongo now internally caches AWS credentials that it fetches from AWS endpoints, to avoid rate limitations. The cache is cleared when the credentials expire or an error is encountered. +- When using the ``MONGODB-AWS`` authentication mechanism with the + ``aws`` extra, the behavior of credential fetching has changed with + ``pymongo_auth_aws>=1.1.0``. Please see :doc:`examples/authentication` for + more information. Bug fixes ......... diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index db2dbd3d1f..9512b23e4b 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -264,16 +264,23 @@ security (or session) token. Credentials can be configured through the MongoDB URI, environment variables, or the local EC2 or ECS endpoint. The order in which the client searches for -credentials is: - -#. Credentials passed through the URI -#. Environment variables -#. ECS endpoint if and only if ``AWS_CONTAINER_CREDENTIALS_RELATIVE_URI`` is set. -#. EC2 endpoint +`credentials`_ is the same as the one used by the AWS ``boto3`` library +when using ``pymongo_auth_aws>=1.1.0``. + +Because we are now using ``boto3`` to handle credentials, the order and +locations of credentials are slightly different from previous versions. +Particularly, if you have a shared AWS credentials or config file, +then those credentials will be used by default if AWS auth environment +variables are not set. To override this behavior, set ``AWS_PROFILE=""`` in +your shell or add ``os.environ["AWS_PROFILE"] = ""`` to your script or +application. Alternatively, you can create an AWS profile specifically for +your MongoDB credentials and set ``AWS_PROFILE`` to that profile name. MONGODB-AWS authenticates against the "$external" virtual database, so none of the URIs in this section need to include the ``authSource`` URI option. +.. _credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html + AWS IAM credentials ~~~~~~~~~~~~~~~~~~~ From f79b90992e0f764ddec55ffa7748e8a81b236abb Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 13 Oct 2022 17:01:52 -0500 Subject: [PATCH 0271/1588] PYTHON-3453 Fix handling of enterprise auth vars (#1076) --- .evergreen/config.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index d824b68f5c..8b37663878 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -494,14 +494,14 @@ functions: working_dir: "src" script: | cat < prepare_enterprise_auth.sh - export SASL_HOST=${sasl_host} - export SASL_PORT=${sasl_port} - export SASL_USER=${sasl_user} - export SASL_PASS=${sasl_pass} - export SASL_DB=${sasl_db} - export PRINCIPAL=${principal} - export GSSAPI_DB=${gssapi_db} - export KEYTAB_BASE64=${keytab_base64} + export SASL_HOST='${sasl_host}' + export SASL_PORT='${sasl_port}' + export SASL_USER='${sasl_user}' + export SASL_PASS='${sasl_pass}' + export SASL_DB='${sasl_db}' + export PRINCIPAL='${principal}' + export GSSAPI_DB='${gssapi_db}' + export KEYTAB_BASE64='${keytab_base64}' EOT - command: shell.exec type: test From 3eb316ed3042d7b9690afbf04acb86b318b82658 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 13 Oct 2022 17:06:55 -0500 Subject: [PATCH 0272/1588] BUMP 4.3.0 --- pymongo/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 257c1dbac1..1b5d8abfce 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -84,7 +84,7 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 2, 1, ".dev0") +version_tuple: Tuple[Union[int, str], ...] = (4, 3, 0) def get_version_string() -> str: From 1c9193f226c02d3396e5658ce0e89c254c030ed1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 17 Oct 2022 09:46:25 -0500 Subject: [PATCH 0273/1588] BUMP 4.3.1 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 1b5d8abfce..a1624a49b0 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -84,7 +84,7 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 3, 0) +version_tuple: Tuple[Union[int, str], ...] = (4, 3, 1) def get_version_string() -> str: diff --git a/setup.py b/setup.py index d895bf7dec..6b23775664 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.2.1.dev0" +version = "4.3.1" f = open("README.rst") try: From 942e28170ade3fa86950c06a22384e1ae781f7a8 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 17 Oct 2022 14:49:02 -0500 Subject: [PATCH 0274/1588] PYTHON-1889 Single-source the version tuple/string (#1079) --- RELEASE.rst | 27 ++++++++++++--------------- pymongo/__init__.py | 17 ++--------------- pymongo/_version.py | 28 ++++++++++++++++++++++++++++ setup.py | 5 ++++- 4 files changed, 46 insertions(+), 31 deletions(-) create mode 100644 pymongo/_version.py diff --git a/RELEASE.rst b/RELEASE.rst index 83c6c0f1d4..4150126f22 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -43,18 +43,15 @@ Doing a Release 3. Add release notes to doc/changelog.rst. Generally just summarize/clarify the git log, but you might add some more long form notes for big changes. -4. Search and replace the "devN" version number w/ the new version number (see - note above in `Versioning`_). +4. Make sure version number is updated in ``pymongo/_version.py`` -5. Make sure version number is updated in setup.py and pymongo/__init__.py +5. Commit with a BUMP version_number message, eg ``git commit -m 'BUMP 3.11.0'``. -6. Commit with a BUMP version_number message, eg ``git commit -m 'BUMP 3.11.0'``. +6. Tag w/ version_number, eg, ``git tag -a '3.11.0' -m 'BUMP 3.11.0' ``. -7. Tag w/ version_number, eg, ``git tag -a '3.11.0' -m '3.11.0' ``. +7. Push commit / tag, eg ``git push && git push --tags``. -8. Push commit / tag, eg ``git push && git push --tags``. - -9. Pushing a tag will trigger a release process in Evergreen which builds +8. Pushing a tag will trigger a release process in Evergreen which builds wheels for manylinux, macOS, and Windows. Wait for the "release-combine" task to complete and then download the "Release files all" archive. See: https://evergreen.mongodb.com/waterfall/mongo-python-driver?bv_filter=release @@ -70,27 +67,27 @@ Doing a Release ... pymongo-.tar.gz -10. Upload all the release packages to PyPI with twine:: +9. Upload all the release packages to PyPI with twine:: $ python3 -m twine upload path/to/archive/* -11. Make sure the new version appears on https://pymongo.readthedocs.io/. If the +10. Make sure the new version appears on https://pymongo.readthedocs.io/. If the new version does not show up automatically, trigger a rebuild of "latest": https://readthedocs.org/projects/pymongo/builds/ -12. Bump the version number to .dev0 in setup.py/__init__.py, +11. Bump the version number to .dev0 in ``pymongo/_version.py``, commit, push. -13. Publish the release version in Jira. +12. Publish the release version in Jira. -14. Announce the release on: +13. Announce the release on: https://www.mongodb.com/community/forums/c/announcements/driver-releases/110 -15. File a ticket for DOCSP highlighting changes in server version and Python +14. File a ticket for DOCSP highlighting changes in server version and Python version compatibility or the lack thereof, for example: https://jira.mongodb.org/browse/DOCSP-13536 -16. Create a GitHub Release for the tag using +15. Create a GitHub Release for the tag using https://github.com/mongodb/mongo-python-driver/releases/new. The title should be "PyMongo X.Y.Z", and the description should contain a link to the release notes on the the community forum, e.g. diff --git a/pymongo/__init__.py b/pymongo/__init__.py index a1624a49b0..6394e8250e 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -14,7 +14,7 @@ """Python driver for MongoDB.""" -from typing import ContextManager, Optional, Tuple, Union +from typing import ContextManager, Optional __all__ = [ "ASCENDING", @@ -84,21 +84,8 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 3, 1) - - -def get_version_string() -> str: - if isinstance(version_tuple[-1], str): - return ".".join(map(str, version_tuple[:-1])) + version_tuple[-1] - return ".".join(map(str, version_tuple)) - - -__version__: str = get_version_string() -version = __version__ - -"""Current version of PyMongo.""" - from pymongo import _csot +from pymongo._version import __version__, get_version_string, version, version_tuple from pymongo.collection import ReturnDocument from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION from pymongo.cursor import CursorType diff --git a/pymongo/_version.py b/pymongo/_version.py new file mode 100644 index 0000000000..99b25d7dcd --- /dev/null +++ b/pymongo/_version.py @@ -0,0 +1,28 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Current version of PyMongo.""" +from typing import Tuple, Union + +version_tuple: Tuple[Union[int, str], ...] = (4, 3, 1) + + +def get_version_string() -> str: + if isinstance(version_tuple[-1], str): + return ".".join(map(str, version_tuple[:-1])) + version_tuple[-1] + return ".".join(map(str, version_tuple)) + + +__version__: str = get_version_string() +version = __version__ diff --git a/setup.py b/setup.py index 6b23775664..52892e8507 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,10 @@ except ImportError: _HAVE_SPHINX = False -version = "4.3.1" +version_ns = {} +with open("pymongo/_version.py") as fp: + exec(fp.read(), version_ns) +version = version_ns["__version__"] f = open("README.rst") try: From 5dec36195a73c4dc841388576a8e4b56a39c2cb2 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 17 Oct 2022 19:12:41 -0500 Subject: [PATCH 0275/1588] PYTHON-3474 Improve documentation about credential handling (#1080) --- doc/changelog.rst | 7 +++++-- doc/examples/authentication.rst | 9 +++++---- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 279c535180..4688a8fb65 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,8 +1,11 @@ Changelog ========= -Changes in Version 4.3 ----------------------- +Changes in Version 4.3 (4.3.2) +------------------------------ + +Note: We withheld uploading tags 4.3.0 and 4.3.1 to PyPI due to a +version handling error and a necessary documentation update. `dnspython `_ is now a required dependency. This change makes PyMongo easier to install for use with "mongodb+srv://" diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index 9512b23e4b..862ac40db2 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -268,11 +268,12 @@ or the local EC2 or ECS endpoint. The order in which the client searches for when using ``pymongo_auth_aws>=1.1.0``. Because we are now using ``boto3`` to handle credentials, the order and -locations of credentials are slightly different from previous versions. -Particularly, if you have a shared AWS credentials or config file, +locations of credentials are slightly different from before. Particularly, +if you have a shared AWS credentials or config file, then those credentials will be used by default if AWS auth environment -variables are not set. To override this behavior, set ``AWS_PROFILE=""`` in -your shell or add ``os.environ["AWS_PROFILE"] = ""`` to your script or +variables are not set. To override this behavior, set +``AWS_SHARED_CREDENTIALS_FILE=""`` in your shell or add +``os.environ["AWS_SHARED_CREDENTIALS_FILE"] = ""`` to your script or application. Alternatively, you can create an AWS profile specifically for your MongoDB credentials and set ``AWS_PROFILE`` to that profile name. From bed75044e8116abdbf4e5610db2bf7760ff07566 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 18 Oct 2022 09:09:45 -0500 Subject: [PATCH 0276/1588] BUMP 4.3.2 --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 99b25d7dcd..2df9b484d9 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 3, 1) +version_tuple: Tuple[Union[int, str], ...] = (4, 3, 2) def get_version_string() -> str: From 520b26fba346efefc7d38adb02e255146db7ea79 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 18 Oct 2022 09:50:46 -0500 Subject: [PATCH 0277/1588] BUMP 4.4.0.dev0 --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 2df9b484d9..7331d6ff25 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 3, 2) +version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, '.dev0') def get_version_string() -> str: From 1d117c1f39983ec88376816c7e3fd73fb140b863 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 19 Oct 2022 12:09:58 -0700 Subject: [PATCH 0278/1588] Fix pre-commit for _version.py --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 7331d6ff25..78c325a23c 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, '.dev0') +version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, ".dev0") def get_version_string() -> str: From 614e22c46c57deca443a6ebfa2123d5ce383ef8f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 19 Oct 2022 15:14:46 -0700 Subject: [PATCH 0279/1588] PYTHON-3478 Improve test_change_stream_can_resume_after_timeouts (#1083) --- test/test_csot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_csot.py b/test/test_csot.py index a9cf7a0124..c2a62aa7f2 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -76,10 +76,10 @@ def test_timeout_nested(self): @client_context.require_change_streams def test_change_stream_can_resume_after_timeouts(self): coll = self.db.test - with coll.watch(max_await_time_ms=150) as stream: + with coll.watch() as stream: with pymongo.timeout(0.1): with self.assertRaises(PyMongoError) as ctx: - stream.try_next() + stream.next() self.assertTrue(ctx.exception.timeout) self.assertTrue(stream.alive) with self.assertRaises(PyMongoError) as ctx: From 3d3ffaf6f3db3cdba3883ca34ba0d97cc376c49c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 20 Oct 2022 13:06:34 -0700 Subject: [PATCH 0280/1588] PYTHON-3410 Resync CSOT spec tests to be less flaky (#1087) --- test/csot/bulkWrite.json | 25 ++--- test/csot/retryability-legacy-timeouts.json | 102 ++++++++++---------- test/csot/retryability-timeoutMS.json | 50 +++++----- test/unified_format.py | 2 - 4 files changed, 89 insertions(+), 90 deletions(-) diff --git a/test/csot/bulkWrite.json b/test/csot/bulkWrite.json index 14d5b654f6..9a05809f77 100644 --- a/test/csot/bulkWrite.json +++ b/test/csot/bulkWrite.json @@ -19,7 +19,10 @@ "useMultipleMongoses": false, "observeEvents": [ "commandStartedEvent" - ] + ], + "uriOptions": { + "w": 1 + } } }, { @@ -48,6 +51,13 @@ { "description": "timeoutMS applied to entire bulkWrite, not individual commands", "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": {} + } + }, { "name": "failPoint", "object": "testRunner", @@ -69,15 +79,6 @@ } } }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - } - }, { "name": "bulkWrite", "object": "collection", @@ -114,10 +115,10 @@ "events": [ { "commandStartedEvent": { - "commandName": "find", + "commandName": "insert", "databaseName": "test", "command": { - "find": "coll" + "insert": "coll" } } }, diff --git a/test/csot/retryability-legacy-timeouts.json b/test/csot/retryability-legacy-timeouts.json index cd2af7fab6..63e8efccfc 100644 --- a/test/csot/retryability-legacy-timeouts.json +++ b/test/csot/retryability-legacy-timeouts.json @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "socketTimeoutMS": 50 + "socketTimeoutMS": 100 }, "useMultipleMongoses": false, "observeEvents": [ @@ -73,7 +73,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -132,7 +132,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -194,7 +194,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -255,7 +255,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -319,7 +319,7 @@ "delete" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -376,7 +376,7 @@ "delete" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -436,7 +436,7 @@ "update" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -496,7 +496,7 @@ "update" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -559,7 +559,7 @@ "update" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -621,7 +621,7 @@ "update" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -686,7 +686,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -743,7 +743,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -803,7 +803,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -863,7 +863,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -926,7 +926,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -988,7 +988,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1053,7 +1053,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1118,7 +1118,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1186,7 +1186,7 @@ "listDatabases" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1243,7 +1243,7 @@ "listDatabases" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1303,7 +1303,7 @@ "listDatabases" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1357,7 +1357,7 @@ "listDatabases" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1414,7 +1414,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1471,7 +1471,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1531,7 +1531,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1595,7 +1595,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1662,7 +1662,7 @@ "listCollections" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1719,7 +1719,7 @@ "listCollections" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1779,7 +1779,7 @@ "listCollections" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1836,7 +1836,7 @@ "listCollections" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1896,7 +1896,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1953,7 +1953,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2013,7 +2013,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2070,7 +2070,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2130,7 +2130,7 @@ "count" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2187,7 +2187,7 @@ "count" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2247,7 +2247,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2304,7 +2304,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2364,7 +2364,7 @@ "count" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2418,7 +2418,7 @@ "count" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2475,7 +2475,7 @@ "distinct" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2533,7 +2533,7 @@ "distinct" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2594,7 +2594,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2651,7 +2651,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2711,7 +2711,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2768,7 +2768,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2828,7 +2828,7 @@ "listIndexes" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2882,7 +2882,7 @@ "listIndexes" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2939,7 +2939,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2996,7 +2996,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } diff --git a/test/csot/retryability-timeoutMS.json b/test/csot/retryability-timeoutMS.json index 438ba6b8d2..642eca0ee9 100644 --- a/test/csot/retryability-timeoutMS.json +++ b/test/csot/retryability-timeoutMS.json @@ -137,7 +137,7 @@ "name": "insertOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "document": { "x": 1 } @@ -356,7 +356,7 @@ "name": "insertMany", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "documents": [ { "x": 1 @@ -575,7 +575,7 @@ "name": "deleteOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -789,7 +789,7 @@ "name": "replaceOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {}, "replacement": { "x": 1 @@ -1011,7 +1011,7 @@ "name": "updateOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {}, "update": { "$set": { @@ -1232,7 +1232,7 @@ "name": "findOneAndDelete", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -1446,7 +1446,7 @@ "name": "findOneAndReplace", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {}, "replacement": { "x": 1 @@ -1668,7 +1668,7 @@ "name": "findOneAndUpdate", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {}, "update": { "$set": { @@ -1897,7 +1897,7 @@ "name": "bulkWrite", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "requests": [ { "insertOne": { @@ -2124,7 +2124,7 @@ "name": "listDatabases", "object": "client", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -2332,7 +2332,7 @@ "name": "listDatabaseNames", "object": "client", "arguments": { - "timeoutMS": 500 + "timeoutMS": 1000 } } ], @@ -2541,7 +2541,7 @@ "name": "createChangeStream", "object": "client", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [] } } @@ -2759,7 +2759,7 @@ "name": "aggregate", "object": "database", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [ { "$listLocalSessions": {} @@ -2984,7 +2984,7 @@ "name": "listCollections", "object": "database", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -3195,7 +3195,7 @@ "name": "listCollectionNames", "object": "database", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -3406,7 +3406,7 @@ "name": "createChangeStream", "object": "database", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [] } } @@ -3617,7 +3617,7 @@ "name": "aggregate", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [] } } @@ -3828,7 +3828,7 @@ "name": "count", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -4039,7 +4039,7 @@ "name": "countDocuments", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -4247,7 +4247,7 @@ "name": "estimatedDocumentCount", "object": "collection", "arguments": { - "timeoutMS": 500 + "timeoutMS": 1000 } } ], @@ -4457,7 +4457,7 @@ "name": "distinct", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "fieldName": "x", "filter": {} } @@ -4670,7 +4670,7 @@ "name": "find", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -4881,7 +4881,7 @@ "name": "findOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -5089,7 +5089,7 @@ "name": "listIndexes", "object": "collection", "arguments": { - "timeoutMS": 500 + "timeoutMS": 1000 } } ], @@ -5298,7 +5298,7 @@ "name": "createChangeStream", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [] } } diff --git a/test/unified_format.py b/test/unified_format.py index 005e91f6b6..12eaceed35 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -963,8 +963,6 @@ def maybe_skip_test(self, spec): self.skipTest("CSOT not implemented for with_transaction") if "transaction" in class_name or "transaction" in description: self.skipTest("CSOT not implemented for transactions") - if "socket timeout" in description: - self.skipTest("CSOT not implemented for socket timeouts") # Some tests need to be skipped based on the operations they try to run. for op in spec["operations"]: From 45b809e41c0064676bdb2e327920ce9e497ebd06 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 20 Oct 2022 16:07:39 -0700 Subject: [PATCH 0281/1588] Mention crypt_shared in encryption examples pages and fix formatting (#1088) --- doc/examples/encryption.rst | 46 ++++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 5c3dc0864b..d7341b3ef4 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -19,7 +19,7 @@ encrypted data. .. seealso:: The MongoDB documentation on `Client Side Field Level Encryption `_. Dependencies ------------- +~~~~~~~~~~~~ To get started using client-side field level encryption in your project, you will need to install the @@ -34,8 +34,30 @@ support. For more information about installing pymongocrypt see `the installation instructions on the project's PyPI page `_. +Additionally, either `crypt_shared`_ or `mongocryptd`_ are required in order +to use *automatic* client-side encryption. + +crypt_shared +```````````` + +The Automatic Encryption Shared Library (crypt_shared) provides the same +functionality as `mongocryptd`_, but does not require you to spawn another +process to perform automatic encryption. + +By default, pymongo attempts to load crypt_shared from the system and if +found uses it automatically. To load crypt_shared from another location, +use the ``crypt_shared_lib_path`` argument to +:class:`~pymongo.encryption_options.AutoEncryptionOpts`. +If pymongo cannot load crypt_shared it will attempt to fallback to using +`mongocryptd`_ by default. Set ``crypt_shared_lib_required=True`` to make +the app always use crypt_shared and fail if it could not be loaded. + +For detailed installation instructions see +`the MongoDB documentation on Automatic Encryption Shared Library +`_. + mongocryptd ------------ +``````````` The ``mongocryptd`` binary is required for automatic client-side encryption and is included as a component in the `MongoDB Enterprise Server package @@ -341,19 +363,13 @@ data key and create a collection with the Automatic Queryable Encryption (Beta) ````````````````````````````````````` -PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB 6.0. +PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB >=6.0. Queryable Encryption is the second version of Client-Side Field Level Encryption. Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, which are further processed server-side. -You must have MongoDB 6.0rc8+ Enterprise to preview the capability. - -Until PyMongo 4.2 release is finalized, it can be installed using:: - - pip install "pymongo@git+ssh://git@github.com/mongodb/mongo-python-driver.git@4.2.0b0#egg=pymongo[encryption]" - -Additionally, ``libmongocrypt`` must be installed from `source `_. +You must have MongoDB 6.0 Enterprise to preview the capability. Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, as demonstrated by the following example:: @@ -412,20 +428,12 @@ automatically encrypted and decrypted. Explicit Queryable Encryption (Beta) ```````````````````````````````````` -PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB 6.0. +PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB >=6.0. Queryable Encryption is the second version of Client-Side Field Level Encryption. Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, which are further processed server-side. -You must have MongoDB 6.0rc8+ to preview the capability. - -Until PyMongo 4.2 release is finalized, it can be installed using:: - - pip install "pymongo@git+ssh://git@github.com/mongodb/mongo-python-driver.git@4.2.0b0#egg=pymongo[encryption]" - -Additionally, ``libmongocrypt`` must be installed from `source `_. - Explicit encryption in Queryable Encryption is performed using the ``encrypt`` and ``decrypt`` methods. Automatic encryption (to allow the ``find_one`` to automatically decrypt) is configured using an ``encrypted_fields`` mapping, as demonstrated by the following example:: From 84fbc1f3197012700bc94a1fc7972ce60a1456e5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 21 Oct 2022 05:22:39 -0500 Subject: [PATCH 0282/1588] PYTHON-3367 Add support for GCP attached service accounts when using GCP KMS (#1064) --- .evergreen/config.yml | 91 ++++++++++++++++++++++++++ .evergreen/run-mongodb-fle-gcp-auto.sh | 35 ++++++++++ test/test_on_demand_csfle.py | 67 +++++++++++++++++++ 3 files changed, 193 insertions(+) create mode 100644 .evergreen/run-mongodb-fle-gcp-auto.sh create mode 100644 test/test_on_demand_csfle.py diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 8b37663878..4d3024589f 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1035,6 +1035,43 @@ task_groups: tasks: - ".serverless" + - name: testgcpkms_task_group + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 # 30 minutes + setup_group: + - func: fetch source + - func: prepare resources + - func: fix absolute paths + - func: make files executable + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + echo '${testgcpkms_key_file}' > /tmp/testgcpkms_key_file.json + export GCPKMS_KEYFILE=/tmp/testgcpkms_key_file.json + export GCPKMS_DRIVERS_TOOLS=$DRIVERS_TOOLS + export GCPKMS_SERVICEACCOUNT="${testgcpkms_service_account}" + export GCPKMS_MACHINETYPE="e2-standard-4" + $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/create-and-setup-instance.sh + # Load the GCPKMS_GCLOUD, GCPKMS_INSTANCE, GCPKMS_REGION, and GCPKMS_ZONE expansions. + - command: expansions.update + params: + file: testgcpkms-expansions.yml + teardown_group: + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} + export GCPKMS_PROJECT=${GCPKMS_PROJECT} + export GCPKMS_ZONE=${GCPKMS_ZONE} + export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} + $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/delete-instance.sh + tasks: + - testgcpkms-task + tasks: # Wildcard task. Do you need to find out what tools are available and where? # Throw it here, and execute this task on all buildvariants @@ -1857,6 +1894,51 @@ tasks: commands: - func: "download and merge coverage" + - name: "testgcpkms-task" + commands: + - command: shell.exec + type: setup + params: + working_dir: "src" + shell: "bash" + script: | + ${PREPARE_SHELL} + echo "Copying files ... begin" + export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} + export GCPKMS_PROJECT=${GCPKMS_PROJECT} + export GCPKMS_ZONE=${GCPKMS_ZONE} + export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} + tar czf /tmp/mongo-python-driver.tgz . + GCPKMS_SRC=/tmp/mongo-python-driver.tgz GCPKMS_DST=$GCPKMS_INSTANCENAME: $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/copy-file.sh + echo "Copying files ... end" + echo "Untarring file ... begin" + GCPKMS_CMD="tar xf mongo-python-driver.tgz" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh + echo "Untarring file ... end" + - command: shell.exec + type: test + params: + working_dir: "src" + shell: "bash" + script: | + ${PREPARE_SHELL} + export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} + export GCPKMS_PROJECT=${GCPKMS_PROJECT} + export GCPKMS_ZONE=${GCPKMS_ZONE} + export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} + GCPKMS_CMD="SUCCESS=true ./.evergreen/run-mongodb-fle-gcp-auto.sh mongodb://localhost:27017" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh + + - name: "testgcpkms-fail-task" + # testgcpkms-fail-task runs in a non-GCE environment. + # It is expected to fail to obtain GCE credentials. + commands: + - command: shell.exec + type: test + params: + working_dir: "src" + shell: "bash" + script: | + ${PREPARE_SHELL} + SUCCESS=false ./.evergreen/run-mongodb-fle-gcp-auto.sh mongodb://localhost:27017 axes: # Choice of distro @@ -2821,6 +2903,15 @@ buildvariants: tasks: - name: "load-balancer-test" +- name: testgcpkms-variant + display_name: "GCP KMS" + run_on: + - debian11-small + tasks: + - name: testgcpkms_task_group + batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README + - testgcpkms-fail-task + - name: Release display_name: Release batchtime: 20160 # 14 days diff --git a/.evergreen/run-mongodb-fle-gcp-auto.sh b/.evergreen/run-mongodb-fle-gcp-auto.sh new file mode 100644 index 0000000000..81c4660275 --- /dev/null +++ b/.evergreen/run-mongodb-fle-gcp-auto.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +set -o xtrace +set -o errexit # Exit the script with error if any of the commands fail + +# Supported/used environment variables: +# MONGODB_URI Set the URI, including an optional username/password to use to connect to the server +# SUCCESS Whether the authentication is expected to succeed or fail. One of "true" or "false" +############################################ +# Main Program # +############################################ + +if [[ -z "$1" ]]; then + echo "usage: $0 " + exit 1 +fi +export MONGODB_URI="$1" + +if echo "$MONGODB_URI" | grep -q "@"; then + echo "MONGODB_URI unexpectedly contains user credentials in FLE GCP test!"; + exit 1 +fi +# Now we can safely enable xtrace +set -o xtrace + +authtest () { + echo "Running GCP Credential Acquisition Test with $PYTHON" + $PYTHON --version + $PYTHON -m pip install --upgrade wheel setuptools pip + $PYTHON -m pip install '.[encryption]' + $PYTHON -m pip install https://github.com/mongodb/libmongocrypt#subdirectory=bindings/python + TEST_FLE_GCP_AUTO=1 $PYTHON test/test_on_demand_csfle.py +} + +PYTHON="python3" authtest diff --git a/test/test_on_demand_csfle.py b/test/test_on_demand_csfle.py new file mode 100644 index 0000000000..408c942cc7 --- /dev/null +++ b/test/test_on_demand_csfle.py @@ -0,0 +1,67 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test client side encryption with on demand credentials.""" +import os +import sys +import unittest + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context + +from bson.codec_options import CodecOptions +from pymongo.encryption import _HAVE_PYMONGOCRYPT, ClientEncryption, EncryptionError + + +class TestonDemandGCPCredentials(IntegrationTest): + @classmethod + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @client_context.require_version_min(4, 2, -1) + def setUpClass(cls): + super(TestonDemandGCPCredentials, cls).setUpClass() + + def setUp(self): + super(TestonDemandGCPCredentials, self).setUp() + self.master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + } + + @unittest.skipIf(not os.getenv("TEST_FLE_GCP_AUTO"), "Not testing FLE GCP auto") + def test_01_failure(self): + if os.environ["SUCCESS"].lower() == "true": + self.skipTest("Expecting success") + self.client_encryption = ClientEncryption( + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + with self.assertRaises(EncryptionError): + self.client_encryption.create_data_key("gcp", self.master_key) + + @unittest.skipIf(not os.getenv("TEST_FLE_GCP_AUTO"), "Not testing FLE GCP auto") + def test_02_success(self): + if os.environ["SUCCESS"].lower() == "false": + self.skipTest("Expecting failure") + self.client_encryption = ClientEncryption( + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + self.client_encryption.create_data_key("gcp", self.master_key) From 228edd21f858fe20c3dd0bffdc6aff054a5726a0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 24 Oct 2022 12:10:22 -0500 Subject: [PATCH 0283/1588] PYTHON-3471 Test Support for Gevent in Python 3.11 (#1091) --- .evergreen/config.yml | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 4d3024589f..f0514681db 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2614,12 +2614,26 @@ buildvariants: exclude_spec: # Don't test green frameworks on these Python versions. - platform: ubuntu-18.04 - python-version: ["pypy3.7", "pypy3.8"] + python-version: ["pypy3.7", "pypy3.8", "3.11"] green-framework: "*" auth-ssl: "*" display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" tasks: *all-server-versions +- matrix_name: "tests-python-version-green-framework-ubuntu20" + matrix_spec: + platform: ubuntu-20.04 + python-version: ["3.11"] + green-framework: "*" + auth-ssl: "*" + display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" + tasks: + - ".rapid" + - ".latest" + - ".6.0" + - ".5.0" + - ".4.4" + - matrix_name: "tests-windows-python-version" matrix_spec: platform: windows-64-vsMulti-small From 3fc301cd22b2b235cc0e56800c507b7c798bfdca Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 24 Oct 2022 14:55:58 -0500 Subject: [PATCH 0284/1588] PYTHON-3256 Obtain AWS credentials for CSFLE in the same way as for MONGODB-AWS (#1035) --- .evergreen/run-tests.sh | 3 +++ README.rst | 3 ++- doc/examples/encryption.rst | 5 +++-- doc/installation.rst | 3 ++- setup.py | 6 ++++-- test/test_encryption.py | 31 +++++++++++++++++++++++++++++++ 6 files changed, 45 insertions(+), 6 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 9a0eb25e00..db20c9111e 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -147,6 +147,9 @@ if [ -n "$TEST_ENCRYPTION" ]; then python -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" # PATH is updated by PREPARE_SHELL for access to mongocryptd. + # Need aws dependency for On-Demand KMS Credentials. + python -m pip install '.[aws]' + # Get access to the AWS temporary credentials: # CSFLE_AWS_TEMP_ACCESS_KEY_ID, CSFLE_AWS_TEMP_SECRET_ACCESS_KEY, CSFLE_AWS_TEMP_SESSION_TOKEN . $DRIVERS_TOOLS/.evergreen/csfle/set-temp-creds.sh diff --git a/README.rst b/README.rst index 115085ac13..530829f957 100644 --- a/README.rst +++ b/README.rst @@ -130,7 +130,8 @@ Wire protocol compression with zstandard requires `zstandard $ python -m pip install "pymongo[zstd]" Client-Side Field Level Encryption requires `pymongocrypt -`_:: +`_ and +`pymongo-auth-aws `_:: $ python -m pip install "pymongo[encryption]" diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index d7341b3ef4..72205ad119 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -23,9 +23,10 @@ Dependencies To get started using client-side field level encryption in your project, you will need to install the -`pymongocrypt `_ library +`pymongocrypt `_ and +`pymongo-auth-aws `_ libraries as well as the driver itself. Install both the driver and a compatible -version of pymongocrypt like this:: +version of the dependencies like this:: $ python -m pip install 'pymongo[encryption]' diff --git a/doc/installation.rst b/doc/installation.rst index b02949335b..4810353f98 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -70,7 +70,8 @@ Wire protocol compression with zstandard requires `zstandard $ python3 -m pip install "pymongo[zstd]" :ref:`Client-Side Field Level Encryption` requires `pymongocrypt -`_:: +`_ and +`pymongo-auth-aws `_:: $ python3 -m pip install "pymongo[encryption]" diff --git a/setup.py b/setup.py index 52892e8507..6d1a711708 100755 --- a/setup.py +++ b/setup.py @@ -278,12 +278,14 @@ def build_extension(self, ext): # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths pyopenssl_reqs.append("certifi") +aws_reqs = ["pymongo-auth-aws<2.0.0"] + extras_require = { - "encryption": ["pymongocrypt>=1.3.0,<2.0.0"], + "encryption": ["pymongocrypt>=1.3.0,<2.0.0"] + aws_reqs, "ocsp": pyopenssl_reqs, "snappy": ["python-snappy"], "zstd": ["zstandard"], - "aws": ["pymongo-auth-aws<2.0.0"], + "aws": aws_reqs, "srv": [], # PYTHON-3423 Removed in 4.3 but kept here to avoid pip warnings. "tls": [], # PYTHON-2133 Removed in 4.0 but kept here to avoid pip warnings. } diff --git a/test/test_encryption.py b/test/test_encryption.py index 567d606893..6c54a90f7a 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2304,6 +2304,37 @@ def run_test(self, src_provider, dst_provider): self.assertEqual(decrypt_result2, "test") +# https://github.com/mongodb/specifications/blob/5cf3ed/source/client-side-encryption/tests/README.rst#on-demand-aws-credentials +class TestOnDemandAWSCredentials(EncryptionIntegrationTest): + def setUp(self): + super(TestOnDemandAWSCredentials, self).setUp() + self.master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + } + + @unittest.skipIf(any(AWS_CREDS.values()), "AWS environment credentials are set") + def test_01_failure(self): + self.client_encryption = ClientEncryption( + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=OPTS, + ) + with self.assertRaises(EncryptionError): + self.client_encryption.create_data_key("aws", self.master_key) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_02_success(self): + self.client_encryption = ClientEncryption( + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=OPTS, + ) + self.client_encryption.create_data_key("aws", self.master_key) + + class TestQueryableEncryptionDocsExample(EncryptionIntegrationTest): # Queryable Encryption is not supported on Standalone topology. @client_context.require_no_standalone From f08776c5222fc691219b2fa54a147354cf0be2e9 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 24 Oct 2022 14:57:01 -0500 Subject: [PATCH 0285/1588] PYTHON-3367 Use zip url for install (#1093) --- .evergreen/run-mongodb-fle-gcp-auto.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.evergreen/run-mongodb-fle-gcp-auto.sh b/.evergreen/run-mongodb-fle-gcp-auto.sh index 81c4660275..8b92551c10 100644 --- a/.evergreen/run-mongodb-fle-gcp-auto.sh +++ b/.evergreen/run-mongodb-fle-gcp-auto.sh @@ -28,7 +28,7 @@ authtest () { $PYTHON --version $PYTHON -m pip install --upgrade wheel setuptools pip $PYTHON -m pip install '.[encryption]' - $PYTHON -m pip install https://github.com/mongodb/libmongocrypt#subdirectory=bindings/python + $PYTHON -m pip install https://github.com/mongodb/libmongocrypt/archive/refs/heads/master.zip#subdirectory=bindings/python TEST_FLE_GCP_AUTO=1 $PYTHON test/test_on_demand_csfle.py } From 908382130045f7dabd2dd17914b600f5dcde2f26 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 1 Nov 2022 14:33:21 -0700 Subject: [PATCH 0286/1588] PYTHON-3454 Specifying a generic type for a collection does not correctly enforce type safety when inserting data (#1081) --- .github/workflows/test-python.yml | 2 ++ doc/examples/type_hints.rst | 9 +++++++-- pymongo/client_session.py | 2 +- pymongo/collection.py | 10 +++++----- pymongo/helpers.py | 4 ++-- pymongo/monitoring.py | 2 +- test/__init__.py | 23 +++++++++++++---------- test/test_collection.py | 2 +- test/test_mypy.py | 28 ++++++++++++++++++++++++---- test/utils.py | 4 ++-- 10 files changed, 58 insertions(+), 28 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index d451197e4e..cbebc94e6f 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -67,6 +67,8 @@ jobs: # Test overshadowed codec_options.py file mypy --install-types --non-interactive bson/codec_options.py mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test + python -m pip install -U typing_extensions + mypy --install-types --non-interactive test/test_mypy.py linkcheck: name: Check Links diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst index 6858e95290..e829441976 100644 --- a/doc/examples/type_hints.rst +++ b/doc/examples/type_hints.rst @@ -92,7 +92,9 @@ Note that when using :class:`~bson.son.SON`, the key and value types must be giv Typed Collection ---------------- -You can use :py:class:`~typing.TypedDict` (Python 3.8+) when using a well-defined schema for the data in a :class:`~pymongo.collection.Collection`: +You can use :py:class:`~typing.TypedDict` (Python 3.8+) when using a well-defined schema for the data in a +:class:`~pymongo.collection.Collection`. Note that all `schema validation`_ for inserts and updates is done on the server. +These methods automatically add an "_id" field. .. doctest:: @@ -105,10 +107,12 @@ You can use :py:class:`~typing.TypedDict` (Python 3.8+) when using a well-define ... >>> client: MongoClient = MongoClient() >>> collection: Collection[Movie] = client.test.test - >>> inserted = collection.insert_one({"name": "Jurassic Park", "year": 1993 }) + >>> inserted = collection.insert_one(Movie(name="Jurassic Park", year=1993)) >>> result = collection.find_one({"name": "Jurassic Park"}) >>> assert result is not None >>> assert result["year"] == 1993 + >>> # This will not be type checked, despite being present, because it is added by PyMongo. + >>> assert type(result["_id"]) == ObjectId Typed Database -------------- @@ -243,3 +247,4 @@ Another example is trying to set a value on a :class:`~bson.raw_bson.RawBSONDocu .. _limitations in mypy: https://github.com/python/mypy/issues/3737 .. _mypy config: https://mypy.readthedocs.io/en/stable/config_file.html .. _test_mypy module: https://github.com/mongodb/mongo-python-driver/blob/master/test/test_mypy.py +.. _schema validation: https://www.mongodb.com/docs/manual/core/schema-validation/#when-to-use-schema-validation diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 3ff98a579f..d2479942e4 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -435,7 +435,7 @@ def _max_time_expired_error(exc): # From the transactions spec, all the retryable writes errors plus # WriteConcernFailed. -_UNKNOWN_COMMIT_ERROR_CODES = _RETRYABLE_ERROR_CODES | frozenset( +_UNKNOWN_COMMIT_ERROR_CODES: frozenset = _RETRYABLE_ERROR_CODES | frozenset( [ 64, # WriteConcernFailed 50, # MaxTimeMSExpired diff --git a/pymongo/collection.py b/pymongo/collection.py index 8f1afc575d..23efe8fd35 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -71,7 +71,7 @@ InsertOneResult, UpdateResult, ) -from pymongo.typings import _CollationIn, _DocumentIn, _DocumentType, _Pipeline +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline from pymongo.write_concern import WriteConcern _FIND_AND_MODIFY_DOC_FIELDS = {"value": 1} @@ -566,7 +566,7 @@ def _insert_command(session, sock_info, retryable_write): def insert_one( self, - document: _DocumentIn, + document: Union[_DocumentType, RawBSONDocument], bypass_document_validation: bool = False, session: Optional["ClientSession"] = None, comment: Optional[Any] = None, @@ -614,7 +614,7 @@ def insert_one( """ common.validate_is_document_type("document", document) if not (isinstance(document, RawBSONDocument) or "_id" in document): - document["_id"] = ObjectId() + document["_id"] = ObjectId() # type: ignore[index] write_concern = self._write_concern_for(session) return InsertOneResult( @@ -633,7 +633,7 @@ def insert_one( @_csot.apply def insert_many( self, - documents: Iterable[_DocumentIn], + documents: Iterable[Union[_DocumentType, RawBSONDocument]], ordered: bool = True, bypass_document_validation: bool = False, session: Optional["ClientSession"] = None, @@ -697,7 +697,7 @@ def gen(): common.validate_is_document_type("document", document) if not isinstance(document, RawBSONDocument): if "_id" not in document: - document["_id"] = ObjectId() + document["_id"] = ObjectId() # type: ignore[index] inserted_ids.append(document["_id"]) yield (message._INSERT, document) diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 4df8ab8e7a..dd210db188 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -44,7 +44,7 @@ # From the SDAM spec, the "not primary" error codes are combined with the # "node is recovering" error codes (of which the "node is shutting down" # errors are a subset). -_NOT_PRIMARY_CODES = ( +_NOT_PRIMARY_CODES: frozenset = ( frozenset( [ 10058, # LegacyNotPrimary <=3.2 "not primary" error code @@ -58,7 +58,7 @@ | _SHUTDOWN_CODES ) # From the retryable writes spec. -_RETRYABLE_ERROR_CODES = _NOT_PRIMARY_CODES | frozenset( +_RETRYABLE_ERROR_CODES: frozenset = _NOT_PRIMARY_CODES | frozenset( [ 7, # HostNotFound 6, # HostUnreachable diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index c53e7e5727..5b729652ad 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -528,7 +528,7 @@ def register(listener: _EventListener) -> None: # Note - to avoid bugs from forgetting which if these is all lowercase and # which are camelCase, and at the same time avoid having to add a test for # every command, use all lowercase here and test against command_name.lower(). -_SENSITIVE_COMMANDS = set( +_SENSITIVE_COMMANDS: set = set( [ "authenticate", "saslstart", diff --git a/test/__init__.py b/test/__init__.py index b89cd88d26..eb66e45667 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -43,11 +43,10 @@ HAVE_IPADDRESS = True except ImportError: HAVE_IPADDRESS = False - from contextlib import contextmanager from functools import wraps from test.version import Version -from typing import Callable, Dict, Generator, no_type_check +from typing import Any, Callable, Dict, Generator, no_type_check from unittest import SkipTest from urllib.parse import quote_plus @@ -331,7 +330,9 @@ def hello(self): def _connect(self, host, port, **kwargs): kwargs.update(self.default_client_options) - client = pymongo.MongoClient(host, port, serverSelectionTimeoutMS=5000, **kwargs) + client: MongoClient = pymongo.MongoClient( + host, port, serverSelectionTimeoutMS=5000, **kwargs + ) try: try: client.admin.command(HelloCompat.LEGACY_CMD) # Can we connect? @@ -356,7 +357,7 @@ def _init_client(self): if self.client is not None: # Return early when connected to dataLake as mongohoused does not # support the getCmdLineOpts command and is tested without TLS. - build_info = self.client.admin.command("buildInfo") + build_info: Any = self.client.admin.command("buildInfo") if "dataLake" in build_info: self.is_data_lake = True self.auth_enabled = True @@ -521,14 +522,16 @@ def has_secondaries(self): @property def storage_engine(self): try: - return self.server_status.get("storageEngine", {}).get("name") + return self.server_status.get("storageEngine", {}).get( # type:ignore[union-attr] + "name" + ) except AttributeError: # Raised if self.server_status is None. return None def _check_user_provided(self): """Return True if db_user/db_password is already an admin user.""" - client = pymongo.MongoClient( + client: MongoClient = pymongo.MongoClient( host, port, username=db_user, @@ -694,7 +697,7 @@ def supports_secondary_read_pref(self): if self.has_secondaries: return True if self.is_mongos: - shard = self.client.config.shards.find_one()["host"] + shard = self.client.config.shards.find_one()["host"] # type:ignore[index] num_members = shard.count(",") + 1 return num_members > 1 return False @@ -1015,12 +1018,12 @@ def fork( """ def _print_threads(*args: object) -> None: - if _print_threads.called: + if _print_threads.called: # type:ignore[attr-defined] return - _print_threads.called = True + _print_threads.called = True # type:ignore[attr-defined] print_thread_tracebacks() - _print_threads.called = False + _print_threads.called = False # type:ignore[attr-defined] def _target() -> None: signal.signal(signal.SIGUSR1, _print_threads) diff --git a/test/test_collection.py b/test/test_collection.py index 37f1b1eae2..e7ac248124 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -785,7 +785,7 @@ def test_insert_many_invalid(self): db.test.insert_many(1) # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): - db.test.insert_many(RawBSONDocument(encode({"_id": 2}))) # type: ignore[arg-type] + db.test.insert_many(RawBSONDocument(encode({"_id": 2}))) def test_delete_one(self): self.db.test.drop() diff --git a/test/test_mypy.py b/test/test_mypy.py index c692c70789..a1e94937b2 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -14,22 +14,20 @@ """Test that each file in mypy_fails/ actually fails mypy, and test some sample client code that uses PyMongo typings.""" - import os import tempfile import unittest from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List try: - from typing import TypedDict # type: ignore[attr-defined] + from typing_extensions import TypedDict - # Not available in Python 3.7 class Movie(TypedDict): # type: ignore[misc] name: str year: int except ImportError: - TypeDict = None + TypedDict = None try: @@ -304,6 +302,28 @@ def test_typeddict_document_type(self) -> None: assert retreived["year"] == 1 assert retreived["name"] == "a" + @only_type_check + def test_typeddict_document_type_insertion(self) -> None: + client: MongoClient[Movie] = MongoClient() + coll = client.test.test + mov = {"name": "THX-1138", "year": 1971} + movie = Movie(name="THX-1138", year=1971) + coll.insert_one(mov) # type: ignore[arg-type] + coll.insert_one({"name": "THX-1138", "year": 1971}) # This will work because it is in-line. + coll.insert_one(movie) + coll.insert_many([mov]) # type: ignore[list-item] + coll.insert_many([movie]) + bad_mov = {"name": "THX-1138", "year": "WRONG TYPE"} + bad_movie = Movie(name="THX-1138", year="WRONG TYPE") # type: ignore[typeddict-item] + coll.insert_one(bad_mov) # type:ignore[arg-type] + coll.insert_one({"name": "THX-1138", "year": "WRONG TYPE"}) # type: ignore[typeddict-item] + coll.insert_one(bad_movie) + coll.insert_many([bad_mov]) # type: ignore[list-item] + coll.insert_many( + [{"name": "THX-1138", "year": "WRONG TYPE"}] # type: ignore[typeddict-item] + ) + coll.insert_many([bad_movie]) + @only_type_check def test_raw_bson_document_type(self) -> None: client = MongoClient(document_class=RawBSONDocument) diff --git a/test/utils.py b/test/utils.py index 6f35b48538..59349f4fdc 100644 --- a/test/utils.py +++ b/test/utils.py @@ -601,7 +601,7 @@ def ensure_all_connected(client: MongoClient) -> None: Depending on the use-case, the caller may need to clear any event listeners that are configured on the client. """ - hello = client.admin.command(HelloCompat.LEGACY_CMD) + hello: dict = client.admin.command(HelloCompat.LEGACY_CMD) if "setName" not in hello: raise ConfigurationError("cluster is not a replica set") @@ -612,7 +612,7 @@ def ensure_all_connected(client: MongoClient) -> None: def discover(): i = 0 while i < 100 and connected_host_list != target_host_list: - hello = client.admin.command( + hello: dict = client.admin.command( HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY ) connected_host_list.update([hello["me"]]) From 04356b0ffda966feed19057e21a02526312c08ad Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 1 Nov 2022 17:37:41 -0500 Subject: [PATCH 0287/1588] PYTHON-3498 Error installing virtual environment on zseries hosts (#1101) (#1103) --- .evergreen/utils.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 67fa272683..30013ed06b 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -18,7 +18,8 @@ createvirtualenv () { echo "Cannot test without virtualenv" exit 1 fi - $VIRTUALENV $VENVPATH + # Workaround for bug in older versions of virtualenv. + $VIRTUALENV $VENVPATH || $PYTHON -m venv $VENVPATH if [ "Windows_NT" = "$OS" ]; then # Workaround https://bugs.python.org/issue32451: # mongovenv/Scripts/activate: line 3: $'\r': command not found From a00aabfa0d88ef239484cde8535c13ad77017955 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 4 Nov 2022 13:47:32 -0500 Subject: [PATCH 0288/1588] PYTHON-3502 GridFSBucket.download_to_stream slow (#1108) --- doc/changelog.rst | 8 ++++++++ gridfs/__init__.py | 10 ++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 4688a8fb65..4f4e5ace71 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,14 @@ Changelog ========= +Changes in Version 4.3.3 +------------------------ + +- Fixed a performance regression in :meth:`~gridfs.GridOut.download_to_stream` + and :meth:`~gridfs.GridOut.download_to_stream_by_name` by reading in chunks + instead of line by line. + + Changes in Version 4.3 (4.3.2) ------------------------------ diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 6ab843a85e..692567b2de 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -796,7 +796,10 @@ def download_to_stream( Added ``session`` parameter. """ with self.open_download_stream(file_id, session=session) as gout: - for chunk in gout: + while True: + chunk = gout.readchunk() + if not len(chunk): + break destination.write(chunk) @_csot.apply @@ -977,7 +980,10 @@ def download_to_stream_by_name( Added ``session`` parameter. """ with self.open_download_stream_by_name(filename, revision, session=session) as gout: - for chunk in gout: + while True: + chunk = gout.readchunk() + if not len(chunk): + break destination.write(chunk) def rename( From ff94b0e3094f6bf08645ff0a491ec9b51f504b53 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 4 Nov 2022 14:25:36 -0500 Subject: [PATCH 0289/1588] PYTHON-3501 Ensure Auth Environment Variables are Always Dynamic (#1107) --- test/auth_aws/test_auth_aws.py | 58 ++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index 372806bd24..e0329a783e 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -17,6 +17,7 @@ import os import sys import unittest +from unittest.mock import patch sys.path[0:0] = [""] @@ -111,6 +112,63 @@ def test_poisoned_cache(self): client.get_database().test.find_one() self.assertNotEqual(auth.get_cached_credentials(), None) + def test_environment_variables_ignored(self): + creds = self.setup_cache() + self.assertIsNotNone(creds) + prev = os.environ.copy() + + client = MongoClient(self.uri) + self.addCleanup(client.close) + + client.get_database().test.find_one() + + self.assertIsNotNone(auth.get_cached_credentials()) + + mock_env = dict( + AWS_ACCESS_KEY_ID="foo", AWS_SECRET_ACCESS_KEY="bar", AWS_SESSION_TOKEN="baz" + ) + + with patch.dict("os.environ", mock_env): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") + client.get_database().test.find_one() + + auth.set_cached_credentials(None) + + client2 = MongoClient(self.uri) + self.addCleanup(client2.close) + + with patch.dict("os.environ", mock_env): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") + with self.assertRaises(OperationFailure): + client2.get_database().test.find_one() + + def test_no_cache_environment_variables(self): + creds = self.setup_cache() + self.assertIsNotNone(creds) + auth.set_cached_credentials(None) + + mock_env = dict(AWS_ACCESS_KEY_ID=creds.username, AWS_SECRET_ACCESS_KEY=creds.password) + if creds.token: + mock_env["AWS_SESSION_TOKEN"] = creds.token + + client = MongoClient(self.uri) + self.addCleanup(client.close) + + with patch.dict(os.environ, mock_env): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], creds.username) + client.get_database().test.find_one() + + self.assertIsNone(auth.get_cached_credentials()) + + mock_env["AWS_ACCESS_KEY_ID"] = "foo" + + client2 = MongoClient(self.uri) + self.addCleanup(client2.close) + + with patch.dict("os.environ", mock_env), self.assertRaises(OperationFailure): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") + client2.get_database().test.find_one() + class TestAWSLambdaExamples(unittest.TestCase): def test_shared_client(self): From da4df7955529ce6edcebc65e10dbaab977a132fc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 7 Nov 2022 10:37:33 -0800 Subject: [PATCH 0290/1588] PYTHON-3508 Improve the performance of GridOut.readline and GridOut.read (#1109) --- doc/changelog.rst | 19 ++++++-- gridfs/grid_file.py | 109 ++++++++++++++++++++++---------------------- 2 files changed, 71 insertions(+), 57 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 4f4e5ace71..b3587e04ca 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,10 +4,23 @@ Changelog Changes in Version 4.3.3 ------------------------ -- Fixed a performance regression in :meth:`~gridfs.GridOut.download_to_stream` - and :meth:`~gridfs.GridOut.download_to_stream_by_name` by reading in chunks - instead of line by line. +Version 4.3.3 fixes a number of bugs: +- Fixed a performance regression in :meth:`~gridfs.GridFSBucket.download_to_stream` + and :meth:`~gridfs.GridFSBucket.download_to_stream_by_name` by reading in chunks + instead of line by line (`PYTHON-3502`_). +- Improved performance of :meth:`gridfs.grid_file.GridOut.read` and + :meth:`gridfs.grid_file.GridOut.readline` (`PYTHON-3508`_). + +Issues Resolved +............... + +See the `PyMongo 4.3.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-3502: https://jira.mongodb.org/browse/PYTHON-3502 +.. _PYTHON-3508: https://jira.mongodb.org/browse/PYTHON-3508 +.. _PyMongo 4.3.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=34709 Changes in Version 4.3 (4.3.2) ------------------------------ diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index cec7d57a22..50efc0cd23 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -463,7 +463,10 @@ def __init__( self.__files = root_collection.files self.__file_id = file_id self.__buffer = EMPTY + # Start position within the current buffered chunk. + self.__buffer_pos = 0 self.__chunk_iter = None + # Position within the total file. self.__position = 0 self._file = file_document self._session = session @@ -510,12 +513,12 @@ def readchunk(self) -> bytes: """Reads a chunk at a time. If the current position is within a chunk the remainder of the chunk is returned. """ - received = len(self.__buffer) + received = len(self.__buffer) - self.__buffer_pos chunk_data = EMPTY chunk_size = int(self.chunk_size) if received > 0: - chunk_data = self.__buffer + chunk_data = self.__buffer[self.__buffer_pos :] elif self.__position < int(self.length): chunk_number = int((received + self.__position) / chunk_size) if self.__chunk_iter is None: @@ -531,25 +534,12 @@ def readchunk(self) -> bytes: self.__position += len(chunk_data) self.__buffer = EMPTY + self.__buffer_pos = 0 return chunk_data - def read(self, size: int = -1) -> bytes: - """Read at most `size` bytes from the file (less if there - isn't enough data). - - The bytes are returned as an instance of :class:`str` (:class:`bytes` - in python 3). If `size` is negative or omitted all data is read. - - :Parameters: - - `size` (optional): the number of bytes to read - - .. versionchanged:: 3.8 - This method now only checks for extra chunks after reading the - entire file. Previously, this method would check for extra chunks - on every call. - """ + def _read_size_or_line(self, size: int = -1, line: bool = False) -> bytes: + """Internal read() and readline() helper.""" self._ensure_file() - remainder = int(self.length) - self.__position if size < 0 or size > remainder: size = remainder @@ -558,11 +548,36 @@ def read(self, size: int = -1) -> bytes: return EMPTY received = 0 - data = io.BytesIO() + data = [] while received < size: - chunk_data = self.readchunk() + needed = size - received + if self.__buffer: + # Optimization: Read the buffer with zero byte copies. + buf = self.__buffer + chunk_start = self.__buffer_pos + chunk_data = memoryview(buf)[self.__buffer_pos :] + self.__buffer = EMPTY + self.__buffer_pos = 0 + self.__position += len(chunk_data) + else: + buf = self.readchunk() + chunk_start = 0 + chunk_data = memoryview(buf) + if line: + pos = buf.find(NEWLN, chunk_start, chunk_start + needed) - chunk_start + if pos >= 0: + # Decrease size to exit the loop. + size = received + pos + 1 + needed = pos + 1 + if len(chunk_data) > needed: + data.append(chunk_data[:needed]) + # Optimization: Save the buffer with zero byte copies. + self.__buffer = buf + self.__buffer_pos = chunk_start + needed + self.__position -= len(self.__buffer) - self.__buffer_pos + else: + data.append(chunk_data) received += len(chunk_data) - data.write(chunk_data) # Detect extra chunks after reading the entire file. if size == remainder and self.__chunk_iter: @@ -571,13 +586,24 @@ def read(self, size: int = -1) -> bytes: except StopIteration: pass - self.__position -= received - size + return b"".join(data) + + def read(self, size: int = -1) -> bytes: + """Read at most `size` bytes from the file (less if there + isn't enough data). + + The bytes are returned as an instance of :class:`str` (:class:`bytes` + in python 3). If `size` is negative or omitted all data is read. + + :Parameters: + - `size` (optional): the number of bytes to read - # Return 'size' bytes and store the rest. - data.seek(size) - self.__buffer = data.read() - data.seek(0) - return data.read(size) + .. versionchanged:: 3.8 + This method now only checks for extra chunks after reading the + entire file. Previously, this method would check for extra chunks + on every call. + """ + return self._read_size_or_line(size=size) def readline(self, size: int = -1) -> bytes: # type: ignore[override] """Read one line or up to `size` bytes from the file. @@ -585,33 +611,7 @@ def readline(self, size: int = -1) -> bytes: # type: ignore[override] :Parameters: - `size` (optional): the maximum number of bytes to read """ - remainder = int(self.length) - self.__position - if size < 0 or size > remainder: - size = remainder - - if size == 0: - return EMPTY - - received = 0 - data = io.BytesIO() - while received < size: - chunk_data = self.readchunk() - pos = chunk_data.find(NEWLN, 0, size) - if pos != -1: - size = received + pos + 1 - - received += len(chunk_data) - data.write(chunk_data) - if pos != -1: - break - - self.__position -= received - size - - # Return 'size' bytes and store the rest. - data.seek(size) - self.__buffer = data.read() - data.seek(0) - return data.read(size) + return self._read_size_or_line(size=size, line=True) def tell(self) -> int: """Return the current position of this file.""" @@ -651,6 +651,7 @@ def seek(self, pos: int, whence: int = _SEEK_SET) -> int: self.__position = new_pos self.__buffer = EMPTY + self.__buffer_pos = 0 if self.__chunk_iter: self.__chunk_iter.close() self.__chunk_iter = None From 1abcd3fc0c3f20c051cda36cdd01cc553dce5c53 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 7 Nov 2022 13:01:56 -0800 Subject: [PATCH 0291/1588] PYTHON-3513 Correctly pin to mypy==0.990 (#1110) --- .github/workflows/test-python.yml | 3 +-- pymongo/monitor.py | 4 ++-- test/mockupdb/test_mixed_version_sharded.py | 3 ++- test/mockupdb/test_mongos_command_read_mode.py | 3 ++- test/mockupdb/test_op_msg_read_preference.py | 3 ++- test/mockupdb/test_reset_and_request_check.py | 3 ++- test/mockupdb/test_slave_okay_rs.py | 3 ++- test/mockupdb/test_slave_okay_sharded.py | 3 ++- test/mockupdb/test_slave_okay_single.py | 3 ++- 9 files changed, 17 insertions(+), 11 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index cbebc94e6f..414eef7a1b 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -39,7 +39,6 @@ jobs: mongodb-version: 4.4 - name: Run tests run: | - pip install mypy==0.942 python setup.py test mypytest: @@ -59,7 +58,7 @@ jobs: cache-dependency-path: 'setup.py' - name: Install dependencies run: | - python -m pip install -U pip mypy + python -m pip install -U pip mypy==0.990 pip install -e ".[zstd, encryption, ocsp]" - name: Run mypy run: | diff --git a/pymongo/monitor.py b/pymongo/monitor.py index b7d2b19118..44390e9180 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -430,10 +430,10 @@ def _shutdown_monitors(): def _shutdown_resources(): # _shutdown_monitors/_shutdown_executors may already be GC'd at shutdown. shutdown = _shutdown_monitors - if shutdown: + if shutdown: # type:ignore[truthy-function] shutdown() shutdown = _shutdown_executors - if shutdown: + if shutdown: # type:ignore[truthy-function] shutdown() diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index d5fb9913cc..7e12fcab35 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -19,10 +19,11 @@ from queue import Queue from mockupdb import MockupDB, go -from operations import upgrades from pymongo import MongoClient +from .operations import upgrades + class TestMixedVersionSharded(unittest.TestCase): def setup_server(self, upgrade): diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py index b7f8532e38..a84907d8cf 100644 --- a/test/mockupdb/test_mongos_command_read_mode.py +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -16,7 +16,6 @@ import unittest from mockupdb import MockupDB, OpMsg, going -from operations import operations from pymongo import MongoClient, ReadPreference from pymongo.read_preferences import ( @@ -25,6 +24,8 @@ read_pref_mode_from_name, ) +from .operations import operations + class TestMongosCommandReadMode(unittest.TestCase): def test_aggregate(self): diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index b8d1348b97..37882912bb 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -18,7 +18,6 @@ from typing import Any from mockupdb import CommandBase, MockupDB, going -from operations import operations from pymongo import MongoClient, ReadPreference from pymongo.read_preferences import ( @@ -27,6 +26,8 @@ read_pref_mode_from_name, ) +from .operations import operations + class OpMsgReadPrefBase(unittest.TestCase): single_mongod = False diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py index 778be3d5ca..bc00e38a09 100755 --- a/test/mockupdb/test_reset_and_request_check.py +++ b/test/mockupdb/test_reset_and_request_check.py @@ -17,12 +17,13 @@ import unittest from mockupdb import MockupDB, going, wait_until -from operations import operations from pymongo import MongoClient from pymongo.errors import ConnectionFailure from pymongo.server_type import SERVER_TYPE +from .operations import operations + class TestResetAndRequestCheck(unittest.TestCase): def __init__(self, *args, **kwargs): diff --git a/test/mockupdb/test_slave_okay_rs.py b/test/mockupdb/test_slave_okay_rs.py index 5a162c08e3..7ac489117a 100644 --- a/test/mockupdb/test_slave_okay_rs.py +++ b/test/mockupdb/test_slave_okay_rs.py @@ -20,10 +20,11 @@ import unittest from mockupdb import MockupDB, going -from operations import operations from pymongo import MongoClient +from .operations import operations + class TestSlaveOkayRS(unittest.TestCase): def setup_server(self): diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py index 52c643b417..51e422595e 100644 --- a/test/mockupdb/test_slave_okay_sharded.py +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -23,11 +23,12 @@ from queue import Queue from mockupdb import MockupDB, going -from operations import operations from pymongo import MongoClient from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name +from .operations import operations + class TestSlaveOkaySharded(unittest.TestCase): def setup_server(self): diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py index 07cd6c7448..bd36c77a04 100644 --- a/test/mockupdb/test_slave_okay_single.py +++ b/test/mockupdb/test_slave_okay_single.py @@ -23,12 +23,13 @@ import unittest from mockupdb import MockupDB, going -from operations import operations from pymongo import MongoClient from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name from pymongo.topology_description import TOPOLOGY_TYPE +from .operations import operations + def topology_type_name(client): topology_type = client._topology._description.topology_type From bcb0ac0170a712391b353531d865f6cb43d83bc1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 8 Nov 2022 12:10:44 -0600 Subject: [PATCH 0292/1588] PYTHON-3396 Support the Azure VM-assigned Managed Identity for Automatic KMS Credentials (#1105) --- .evergreen/config.yml | 121 ++++++++++++++++++++++++- .evergreen/run-mongodb-fle-gcp-auto.sh | 35 ------- .evergreen/run-tests.sh | 19 +++- test/test_on_demand_csfle.py | 44 +++++++++ 4 files changed, 180 insertions(+), 39 deletions(-) delete mode 100644 .evergreen/run-mongodb-fle-gcp-auto.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index f0514681db..28e54e2ded 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1072,6 +1072,50 @@ task_groups: tasks: - testgcpkms-task + - name: testazurekms_task_group + setup_group: + - func: fetch source + - func: prepare resources + - func: fix absolute paths + - func: make files executable + - command: shell.exec + params: + silent: true + shell: bash + script: |- + set -o errexit + ${PREPARE_SHELL} + echo '${testazurekms_publickey}' > /tmp/testazurekms_publickey + echo '${testazurekms_privatekey}' > /tmp/testazurekms_privatekey + # Set 600 permissions on private key file. Otherwise ssh / scp may error with permissions "are too open". + chmod 600 /tmp/testazurekms_privatekey + export AZUREKMS_CLIENTID="${testazurekms_clientid}" + export AZUREKMS_TENANTID="${testazurekms_tenantid}" + export AZUREKMS_SECRET="${testazurekms_secret}" + export AZUREKMS_DRIVERS_TOOLS="$DRIVERS_TOOLS" + export AZUREKMS_RESOURCEGROUP="${testazurekms_resourcegroup}" + export AZUREKMS_PUBLICKEYPATH="/tmp/testazurekms_publickey" + export AZUREKMS_PRIVATEKEYPATH="/tmp/testazurekms_privatekey" + export AZUREKMS_SCOPE="${testazurekms_scope}" + export AZUREKMS_VMNAME_PREFIX="PYTHON_DRIVER" + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/create-and-setup-vm.sh + - command: expansions.update + params: + file: testazurekms-expansions.yml + teardown_group: + - command: shell.exec + params: + shell: bash + script: |- + ${PREPARE_SHELL} + export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} + export AZUREKMS_RESOURCEGROUP=${testazurekms_resourcegroup} + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/delete-vm.sh + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 + tasks: + - testazurekms-task + tasks: # Wildcard task. Do you need to find out what tools are available and where? # Throw it here, and execute this task on all buildvariants @@ -1925,12 +1969,16 @@ tasks: export GCPKMS_PROJECT=${GCPKMS_PROJECT} export GCPKMS_ZONE=${GCPKMS_ZONE} export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} - GCPKMS_CMD="SUCCESS=true ./.evergreen/run-mongodb-fle-gcp-auto.sh mongodb://localhost:27017" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh + GCPKMS_CMD="SUCCESS=true TEST_FLE_GCP_AUTO=1 LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz ./.evergreen/run-tests.sh" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh - name: "testgcpkms-fail-task" # testgcpkms-fail-task runs in a non-GCE environment. # It is expected to fail to obtain GCE credentials. commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "latest" + TOPOLOGY: "server" - command: shell.exec type: test params: @@ -1938,7 +1986,66 @@ tasks: shell: "bash" script: | ${PREPARE_SHELL} - SUCCESS=false ./.evergreen/run-mongodb-fle-gcp-auto.sh mongodb://localhost:27017 + export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-64/master/latest/libmongocrypt.tar.gz + SUCCESS=false TEST_FLE_GCP_AUTO=1 ./.evergreen/run-tests.sh + + - name: testazurekms-task + commands: + - command: shell.exec + params: + shell: bash + script: |- + set -o errexit + ${PREPARE_SHELL} + cd src + echo "Copying files ... begin" + export AZUREKMS_RESOURCEGROUP=${testazurekms_resourcegroup} + export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} + export AZUREKMS_PRIVATEKEYPATH=/tmp/testazurekms_privatekey + tar czf /tmp/mongo-python-driver.tgz . + AZUREKMS_SRC="/tmp/mongo-python-driver.tgz" \ + AZUREKMS_DST="~/" \ + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/copy-file.sh + echo "Copying files ... end" + echo "Untarring file ... begin" + AZUREKMS_CMD="tar xf mongo-python-driver.tgz" \ + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh + echo "Untarring file ... end" + - command: shell.exec + type: test + params: + shell: bash + script: |- + set -o errexit + ${PREPARE_SHELL} + export AZUREKMS_RESOURCEGROUP=${testazurekms_resourcegroup} + export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} + export AZUREKMS_PRIVATEKEYPATH=/tmp/testazurekms_privatekey + AZUREKMS_CMD="KEY_NAME='${testazurekms_keyname}' KEY_VAULT_ENDPOINT='${testazurekms_keyvaultendpoint}' LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz SUCCESS=true TEST_FLE_AZURE_AUTO=1 ./.evergreen/run-tests.sh" \ + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh + + - name: testazurekms-fail-task + commands: + - func: fetch source + - func: make files executable + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "latest" + TOPOLOGY: "server" + - command: shell.exec + type: test + params: + shell: bash + script: |- + set -o errexit + ${PREPARE_SHELL} + cd src + PYTHON_BINARY= + KEY_NAME='${testazurekms_keyname}' \ + KEY_VAULT_ENDPOINT='${testazurekms_keyvaultendpoint}' \ + LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-64/master/latest/libmongocrypt.tar.gz \ + SUCCESS=false TEST_FLE_AZURE_AUTO=1 \ + ./.evergreen/run-tests.sh axes: # Choice of distro @@ -2920,12 +3027,20 @@ buildvariants: - name: testgcpkms-variant display_name: "GCP KMS" run_on: - - debian11-small + - ubuntu1804-test tasks: - name: testgcpkms_task_group batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README - testgcpkms-fail-task +- name: testazurekms-variant + display_name: "Azure KMS" + run_on: ubuntu1804-test + tasks: + - name: testazurekms_task_group + batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README + - testazurekms-fail-task + - name: Release display_name: Release batchtime: 20160 # 14 days diff --git a/.evergreen/run-mongodb-fle-gcp-auto.sh b/.evergreen/run-mongodb-fle-gcp-auto.sh deleted file mode 100644 index 8b92551c10..0000000000 --- a/.evergreen/run-mongodb-fle-gcp-auto.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit # Exit the script with error if any of the commands fail - -# Supported/used environment variables: -# MONGODB_URI Set the URI, including an optional username/password to use to connect to the server -# SUCCESS Whether the authentication is expected to succeed or fail. One of "true" or "false" -############################################ -# Main Program # -############################################ - -if [[ -z "$1" ]]; then - echo "usage: $0 " - exit 1 -fi -export MONGODB_URI="$1" - -if echo "$MONGODB_URI" | grep -q "@"; then - echo "MONGODB_URI unexpectedly contains user credentials in FLE GCP test!"; - exit 1 -fi -# Now we can safely enable xtrace -set -o xtrace - -authtest () { - echo "Running GCP Credential Acquisition Test with $PYTHON" - $PYTHON --version - $PYTHON -m pip install --upgrade wheel setuptools pip - $PYTHON -m pip install '.[encryption]' - $PYTHON -m pip install https://github.com/mongodb/libmongocrypt/archive/refs/heads/master.zip#subdirectory=bindings/python - TEST_FLE_GCP_AUTO=1 $PYTHON test/test_on_demand_csfle.py -} - -PYTHON="python3" authtest diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index db20c9111e..959ad901ad 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -101,7 +101,8 @@ if [ -n "$TEST_PYOPENSSL" ]; then python -m pip install --prefer-binary pyopenssl requests service_identity fi -if [ -n "$TEST_ENCRYPTION" ]; then +if [ -n "$TEST_ENCRYPTION" ] || [ -n "$TEST_FLE_AZURE_AUTO" ] || [ -n "$TEST_FLE_GCP_AUTO" ]; then + createvirtualenv $PYTHON venv-encryption trap "deactivate; rm -rf venv-encryption" EXIT HUP PYTHON=python @@ -146,7 +147,9 @@ if [ -n "$TEST_ENCRYPTION" ]; then python -c "import pymongocrypt; print('pymongocrypt version: '+pymongocrypt.__version__)" python -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" # PATH is updated by PREPARE_SHELL for access to mongocryptd. +fi +if [ -n "$TEST_ENCRYPTION" ]; then # Need aws dependency for On-Demand KMS Credentials. python -m pip install '.[aws]' @@ -171,6 +174,20 @@ if [ -n "$TEST_ENCRYPTION" ]; then TEST_ARGS="-s test.test_encryption" fi +if [ -n "$TEST_FLE_AZURE_AUTO" ] || [ -n "$TEST_FLE_GCP_AUTO" ]; then + if [[ -z "$SUCCESS" ]]; then + echo "Must define SUCCESS" + exit 1 + fi + + if echo "$MONGODB_URI" | grep -q "@"; then + echo "MONGODB_URI unexpectedly contains user credentials in FLE test!"; + exit 1 + fi + + TEST_ARGS="-s test.test_on_demand_csfle" +fi + if [ -n "$DATA_LAKE" ]; then TEST_ARGS="-s test.test_data_lake" fi diff --git a/test/test_on_demand_csfle.py b/test/test_on_demand_csfle.py index 408c942cc7..d5668199a3 100644 --- a/test/test_on_demand_csfle.py +++ b/test/test_on_demand_csfle.py @@ -65,3 +65,47 @@ def test_02_success(self): codec_options=CodecOptions(), ) self.client_encryption.create_data_key("gcp", self.master_key) + + +class TestonDemandAzureCredentials(IntegrationTest): + @classmethod + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @client_context.require_version_min(4, 2, -1) + def setUpClass(cls): + super(TestonDemandAzureCredentials, cls).setUpClass() + + def setUp(self): + super(TestonDemandAzureCredentials, self).setUp() + self.master_key = { + "keyVaultEndpoint": "https://keyvault-drivers-2411.vault.azure.net/keys/", + "keyName": "KEY-NAME", + } + + @unittest.skipIf(not os.getenv("TEST_FLE_AZURE_AUTO"), "Not testing FLE Azure auto") + def test_01_failure(self): + if os.environ["SUCCESS"].lower() == "true": + self.skipTest("Expecting success") + self.client_encryption = ClientEncryption( + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + with self.assertRaises(EncryptionError): + self.client_encryption.create_data_key("azure", self.master_key) + + @unittest.skipIf(not os.getenv("TEST_FLE_AZURE_AUTO"), "Not testing FLE Azure auto") + def test_02_success(self): + if os.environ["SUCCESS"].lower() == "false": + self.skipTest("Expecting failure") + self.client_encryption = ClientEncryption( + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + self.client_encryption.create_data_key("azure", self.master_key) + + +if __name__ == "__main__": + unittest.main(verbosity=2) From c106c08c1d504866ca1a40043cf7e93c675d4ccf Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 8 Nov 2022 11:13:34 -0800 Subject: [PATCH 0293/1588] PYTHON-3295 Add CSOT docs page (#1111) --- doc/changelog.rst | 4 +- doc/examples/index.rst | 1 + doc/examples/timeouts.rst | 162 ++++++++++++++++++++++++++++++++++++ doc/examples/type_hints.rst | 2 +- pymongo/__init__.py | 2 + 5 files changed, 169 insertions(+), 2 deletions(-) create mode 100644 doc/examples/timeouts.rst diff --git a/doc/changelog.rst b/doc/changelog.rst index b3587e04ca..8220b6897a 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -11,6 +11,8 @@ Version 4.3.3 fixes a number of bugs: instead of line by line (`PYTHON-3502`_). - Improved performance of :meth:`gridfs.grid_file.GridOut.read` and :meth:`gridfs.grid_file.GridOut.readline` (`PYTHON-3508`_). +- Added the :ref:`timeout-example` example page to improve the documentation + for :func:`pymongo.timeout`. Issues Resolved ............... @@ -90,7 +92,7 @@ PyMongo 4.2 brings a number of improvements including: - Support for the Queryable Encryption beta with MongoDB 6.0. Note that backwards-breaking changes may be made before the final release. See :ref:`automatic-queryable-client-side-encryption` for example usage. - Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout - to an entire block of pymongo operations. + to an entire block of pymongo operations. See :ref:`timeout-example` for examples. - Added the ``timeoutMS`` URI and keyword argument to :class:`~pymongo.mongo_client.MongoClient`. - Added the :attr:`pymongo.errors.PyMongoError.timeout` property which is ``True`` when the error was caused by a timeout. diff --git a/doc/examples/index.rst b/doc/examples/index.rst index 6cdeafc201..ee4aa27284 100644 --- a/doc/examples/index.rst +++ b/doc/examples/index.rst @@ -30,6 +30,7 @@ MongoDB, you can start it like so: mod_wsgi server_selection tailable + timeouts tls type_hints encryption diff --git a/doc/examples/timeouts.rst b/doc/examples/timeouts.rst new file mode 100644 index 0000000000..73095e5af5 --- /dev/null +++ b/doc/examples/timeouts.rst @@ -0,0 +1,162 @@ + +.. _timeout-example: + +Client Side Operation Timeout +============================= + +PyMongo 4.2 introduced :meth:`~pymongo.timeout` and the ``timeoutMS`` +URI and keyword argument to :class:`~pymongo.mongo_client.MongoClient`. +These features allow applications to more easily limit the amount of time that +one or more operations can execute before control is returned to the app. This +timeout applies to all of the work done to execute the operation, including +but not limited to server selection, connection checkout, serialization, and +server-side execution. + +Basic Usage +----------- + +The following example uses :meth:`~pymongo.timeout` to configure a 10-second +timeout for an :meth:`~pymongo.collection.Collection.insert_one` operation:: + + import pymongo + with pymongo.timeout(10): + coll.insert_one({"name": "Nunu"}) + +The :meth:`~pymongo.timeout` applies to all pymongo operations within the block. +The following example ensures that both the `insert` and the `find` complete +within 10 seconds total, or raise a timeout error:: + + with pymongo.timeout(10): + coll.insert_one({"name": "Nunu"}) + coll.find_one({"name": "Nunu"}) + +When nesting :func:`~pymongo.timeout`, the nested deadline is capped by the outer +deadline. The deadline can only be shortened, not extended. +When exiting the block, the previous deadline is restored:: + + with pymongo.timeout(5): + coll.find_one() # Uses the 5 second deadline. + with pymongo.timeout(3): + coll.find_one() # Uses the 3 second deadline. + coll.find_one() # Uses the original 5 second deadline. + with pymongo.timeout(10): + coll.find_one() # Still uses the original 5 second deadline. + coll.find_one() # Uses the original 5 second deadline. + +Timeout errors +-------------- + +When the :meth:`~pymongo.timeout` with-statement is entered, a deadline is set +for the entire block. When that deadline is exceeded, any blocking pymongo operation +will raise a timeout exception. For example:: + + try: + with pymongo.timeout(10): + coll.insert_one({"name": "Nunu"}) + time.sleep(10) + # The deadline has now expired, the next operation will raise + # a timeout exception. + coll.find_one({"name": "Nunu"}) + except PyMongoError as exc: + if exc.timeout: + print(f"block timed out: {exc!r}") + else: + print(f"failed with non-timeout error: {exc!r}") + +The :attr:`pymongo.errors.PyMongoError.timeout` property (added in PyMongo 4.2) +will be ``True`` when the error was caused by a timeout and ``False`` otherwise. + +The timeoutMS URI option +------------------------ + +PyMongo 4.2 also added support for the ``timeoutMS`` URI and keyword argument to +:class:`~pymongo.mongo_client.MongoClient`. When this option is configured, the +client will automatically apply the timeout to each API call. For example:: + + client = MongoClient("mongodb://localhost/?timeoutMS=10000") + coll = client.test.test + coll.insert_one({"name": "Nunu"}) # Uses a 10-second timeout. + coll.find_one({"name": "Nunu"}) # Also uses a 10-second timeout. + +The above is roughly equivalent to:: + + client = MongoClient() + coll = client.test.test + with pymongo.timeout(10): + coll.insert_one({"name": "Nunu"}) + with pymongo.timeout(10): + coll.find_one({"name": "Nunu"}) + +pymongo.timeout overrides timeoutMS +----------------------------------- + +:meth:`~pymongo.timeout` overrides ``timeoutMS``; within a +:meth:`~pymongo.timeout` block a client's ``timeoutMS`` option is ignored:: + + client = MongoClient("mongodb://localhost/?timeoutMS=10000") + coll = client.test.test + coll.insert_one({"name": "Nunu"}) # Uses the client's 10-second timeout. + # pymongo.timeout overrides the client's timeoutMS. + with pymongo.timeout(20): + coll.insert_one({"name": "Nunu"}) # Uses the 20-second timeout. + with pymongo.timeout(5): + coll.find_one({"name": "Nunu"}) # Uses the 5-second timeout. + +pymongo.timeout is thread safe +------------------------------ + +:meth:`~pymongo.timeout` is thread safe; the timeout only applies to current +thread and multiple threads can configure different timeouts in parallel. + +pymongo.timeout is asyncio safe +------------------------------- + +:meth:`~pymongo.timeout` is asyncio safe; the timeout only applies to current +Task and multiple Tasks can configure different timeouts concurrently. +:meth:`~pymongo.timeout` can be used identically in +`Motor `_, for example:: + + import motor.motor_asyncio + client = motor.motor_asyncio.AsyncIOMotorClient() + coll = client.test.test + with pymongo.timeout(10): + await coll.insert_one({"name": "Nunu"}) + await coll.find_one({"name": "Nunu"}) + +Troubleshooting +--------------- + +There are many timeout errors that can be raised depending on when the timeout +expires. In code, these can be identified with the :attr:`pymongo.errors.PyMongoError.timeout` +property. Some specific timeout errors examples are described below. + +When the client was unable to find an available server to run the operation +within the given timeout:: + + pymongo.errors.ServerSelectionTimeoutError: No servers found yet, Timeout: -0.00202266700216569s, Topology Description: ]> + +When either the client was unable to establish a connection within the given +timeout or the operation was sent but the server was not able to respond in time:: + + pymongo.errors.NetworkTimeout: localhost:27017: timed out + +When the server cancelled the operation because it exceeded the given timeout. +Note that the operation may have partially completed on the server (depending +on the operation):: + + pymongo.errors.ExecutionTimeout: operation exceeded time limit, full error: {'ok': 0.0, 'errmsg': 'operation exceeded time limit', 'code': 50, 'codeName': 'MaxTimeMSExpired'} + +When the client cancelled the operation because it was not possible to complete +within the given timeout:: + + pymongo.errors.ExecutionTimeout: operation would exceed time limit, remaining timeout:0.00196 <= network round trip time:0.00427 + +When the client attempted a write operation but the server could not replicate +that write (according to the configured write concern) within the given timeout:: + + pymongo.errors.WTimeoutError: operation exceeded time limit, full error: {'code': 50, 'codeName': 'MaxTimeMSExpired', 'errmsg': 'operation exceeded time limit', 'errInfo': {'writeConcern': {'w': 1, 'wtimeout': 0}}} + +The same error as above but for :meth:`~pymongo.collection.Collection.insert_many` +or :meth:`~pymongo.collection.Collection.bulk_write`:: + + pymongo.errors.BulkWriteError: batch op errors occurred, full error: {'writeErrors': [], 'writeConcernErrors': [{'code': 50, 'codeName': 'MaxTimeMSExpired', 'errmsg': 'operation exceeded time limit', 'errInfo': {'writeConcern': {'w': 1, 'wtimeout': 0}}}], 'nInserted': 2, 'nUpserted': 0, 'nMatched': 0, 'nModified': 0, 'nRemoved': 0, 'upserted': []} diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst index e829441976..cd178038ad 100644 --- a/doc/examples/type_hints.rst +++ b/doc/examples/type_hints.rst @@ -2,7 +2,7 @@ .. _type_hints-example: Type Hints -=========== +========== As of version 4.1, PyMongo ships with `type hints`_. With type hints, Python type checkers can easily find bugs before they reveal themselves in your code. diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 6394e8250e..12b62fe9f5 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -161,6 +161,8 @@ def timeout(seconds: Optional[float]) -> ContextManager: :Raises: - :py:class:`ValueError`: When `seconds` is negative. + See :ref:`timeout-example` for more examples. + .. versionadded:: 4.2 """ if not isinstance(seconds, (int, float, type(None))): From 0d301f13c51791c52a57e5c1c07abcabe19d0fd5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 8 Nov 2022 12:46:52 -0800 Subject: [PATCH 0294/1588] PYTHON-3295 Improve description of nested timeout() calls --- pymongo/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 12b62fe9f5..789df62071 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -142,8 +142,8 @@ def timeout(seconds: Optional[float]) -> ContextManager: else: print(f"failed with non-timeout error: {exc!r}") - When nesting :func:`~pymongo.timeout`, the newly computed deadline is capped to at most - the existing deadline. The deadline can only be shortened, not extended. + When nesting :func:`~pymongo.timeout`, the nested deadline is capped by + the outer deadline. The deadline can only be shortened, not extended. When exiting the block, the previous deadline is restored:: with pymongo.timeout(5): From 87b09847a476fdc24ed7e847dbb94076c4eb9c83 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 10 Nov 2022 09:53:19 -0800 Subject: [PATCH 0295/1588] PYTHON-3494 Improve Documentation Surrounding Type-Checking "_id" (#1104) --- doc/examples/type_hints.rst | 66 +++++++++++++++++++++++++++++++++++-- test/test_mypy.py | 61 +++++++++++++++++++++++++++++++--- 2 files changed, 120 insertions(+), 7 deletions(-) diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst index cd178038ad..38349038b1 100644 --- a/doc/examples/type_hints.rst +++ b/doc/examples/type_hints.rst @@ -97,6 +97,7 @@ You can use :py:class:`~typing.TypedDict` (Python 3.8+) when using a well-define These methods automatically add an "_id" field. .. doctest:: + :pyversion: >= 3.8 >>> from typing import TypedDict >>> from pymongo import MongoClient @@ -111,14 +112,73 @@ These methods automatically add an "_id" field. >>> result = collection.find_one({"name": "Jurassic Park"}) >>> assert result is not None >>> assert result["year"] == 1993 - >>> # This will not be type checked, despite being present, because it is added by PyMongo. - >>> assert type(result["_id"]) == ObjectId + >>> # This will raise a type-checking error, despite being present, because it is added by PyMongo. + >>> assert result["_id"] # type:ignore[typeddict-item] + +Modeling Document Types with TypedDict +-------------------------------------- + +You can use :py:class:`~typing.TypedDict` (Python 3.8+) to model structured data. +As noted above, PyMongo will automatically add an `_id` field if it is not present. This also applies to TypedDict. +There are three approaches to this: + + 1. Do not specify `_id` at all. It will be inserted automatically, and can be retrieved at run-time, but will yield a type-checking error unless explicitly ignored. + + 2. Specify `_id` explicitly. This will mean that every instance of your custom TypedDict class will have to pass a value for `_id`. + + 3. Make use of :py:class:`~typing.NotRequired`. This has the flexibility of option 1, but with the ability to access the `_id` field without causing a type-checking error. + +Note: to use :py:class:`~typing.TypedDict` and :py:class:`~typing.NotRequired` in earlier versions of Python (<3.8, <3.11), use the `typing_extensions` package. + +.. doctest:: typed-dict-example + :pyversion: >= 3.11 + + >>> from typing import TypedDict, NotRequired + >>> from pymongo import MongoClient + >>> from pymongo.collection import Collection + >>> from bson import ObjectId + >>> class Movie(TypedDict): + ... name: str + ... year: int + ... + >>> class ExplicitMovie(TypedDict): + ... _id: ObjectId + ... name: str + ... year: int + ... + >>> class NotRequiredMovie(TypedDict): + ... _id: NotRequired[ObjectId] + ... name: str + ... year: int + ... + >>> client: MongoClient = MongoClient() + >>> collection: Collection[Movie] = client.test.test + >>> inserted = collection.insert_one(Movie(name="Jurassic Park", year=1993)) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> # This will yield a type-checking error, despite being present, because it is added by PyMongo. + >>> assert result["_id"] # type:ignore[typeddict-item] + >>> collection: Collection[ExplicitMovie] = client.test.test + >>> # Note that the _id keyword argument must be supplied + >>> inserted = collection.insert_one(ExplicitMovie(_id=ObjectId(), name="Jurassic Park", year=1993)) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> # This will not raise a type-checking error. + >>> assert result["_id"] + >>> collection: Collection[NotRequiredMovie] = client.test.test + >>> # Note the lack of _id, similar to the first example + >>> inserted = collection.insert_one(NotRequiredMovie(name="Jurassic Park", year=1993)) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> # This will not raise a type-checking error, despite not being provided explicitly. + >>> assert result["_id"] + Typed Database -------------- While less common, you could specify that the documents in an entire database -match a well-defined shema using :py:class:`~typing.TypedDict` (Python 3.8+). +match a well-defined schema using :py:class:`~typing.TypedDict` (Python 3.8+). .. doctest:: diff --git a/test/test_mypy.py b/test/test_mypy.py index a1e94937b2..807f0e8ef3 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -20,14 +20,30 @@ from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List try: - from typing_extensions import TypedDict + from typing_extensions import NotRequired, TypedDict - class Movie(TypedDict): # type: ignore[misc] + from bson import ObjectId + + class Movie(TypedDict): name: str year: int -except ImportError: - TypedDict = None + class MovieWithId(TypedDict): + _id: ObjectId + name: str + year: int + + class ImplicitMovie(TypedDict): + _id: NotRequired[ObjectId] + name: str + year: int + +except ImportError as exc: + Movie = dict # type:ignore[misc,assignment] + ImplicitMovie = dict # type: ignore[assignment,misc] + MovieWithId = dict # type: ignore[assignment,misc] + TypedDict = None # type: ignore[assignment] + NotRequired = None # type: ignore[assignment] try: @@ -324,6 +340,43 @@ def test_typeddict_document_type_insertion(self) -> None: ) coll.insert_many([bad_movie]) + @only_type_check + def test_typeddict_explicit_document_type(self) -> None: + out = MovieWithId(_id=ObjectId(), name="THX-1138", year=1971) + assert out is not None + # This should fail because the output is a Movie. + assert out["foo"] # type:ignore[typeddict-item] + assert out["_id"] + + # This should work the same as the test above, but this time using NotRequired to allow + # automatic insertion of the _id field by insert_one. + @only_type_check + def test_typeddict_not_required_document_type(self) -> None: + out = ImplicitMovie(name="THX-1138", year=1971) + assert out is not None + # This should fail because the output is a Movie. + assert out["foo"] # type:ignore[typeddict-item] + assert out["_id"] + + @only_type_check + def test_typeddict_empty_document_type(self) -> None: + out = Movie(name="THX-1138", year=1971) + assert out is not None + # This should fail because the output is a Movie. + assert out["foo"] # type:ignore[typeddict-item] + # This should fail because _id is not included in our TypedDict definition. + assert out["_id"] # type:ignore[typeddict-item] + + def test_typeddict_find_notrequired(self): + if NotRequired is None or ImplicitMovie is None: + raise unittest.SkipTest("Python 3.11+ is required to use NotRequired.") + client: MongoClient[ImplicitMovie] = rs_or_single_client() + coll = client.test.test + coll.insert_one(ImplicitMovie(name="THX-1138", year=1971)) + out = coll.find_one({}) + assert out is not None + assert out["_id"] + @only_type_check def test_raw_bson_document_type(self) -> None: client = MongoClient(document_class=RawBSONDocument) From 133c55d8cb8ca87beb44f22932c4391803c34694 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 10 Nov 2022 13:31:14 -0800 Subject: [PATCH 0296/1588] PYTHON-3500 Improve test coverage for retryable handshake errors (#1112) --- .../unified/handshakeError.json | 2888 ++++++++++++++++- .../unified/handshakeError.json | 1648 +++++++++- 2 files changed, 4418 insertions(+), 118 deletions(-) diff --git a/test/retryable_reads/unified/handshakeError.json b/test/retryable_reads/unified/handshakeError.json index 2cf1d173f8..58bbce66a8 100644 --- a/test/retryable_reads/unified/handshakeError.json +++ b/test/retryable_reads/unified/handshakeError.json @@ -15,25 +15,27 @@ "createEntities": [ { "client": { - "id": "client0", + "id": "client", "useMultipleMongoses": false, "observeEvents": [ + "connectionCheckOutStartedEvent", "commandStartedEvent", - "connectionCheckOutStartedEvent" + "commandSucceededEvent", + "commandFailedEvent" ] } }, { "database": { - "id": "database0", - "client": "client0", - "databaseName": "retryable-handshake-tests" + "id": "database", + "client": "client", + "databaseName": "retryable-reads-handshake-tests" } }, { "collection": { - "id": "collection0", - "database": "database0", + "id": "collection", + "database": "database", "collectionName": "coll" } } @@ -41,7 +43,7 @@ "initialData": [ { "collectionName": "coll", - "databaseName": "retryable-handshake-tests", + "databaseName": "retryable-reads-handshake-tests", "documents": [ { "_id": 1, @@ -59,6 +61,2060 @@ } ], "tests": [ + { + "description": "listDatabases succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "listDatabases succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "listDatabaseNames succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "listDatabaseNames succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "createChangeStream succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "aggregate succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "aggregate succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "listCollections succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "listCollections succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "listCollectionNames succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "listCollectionNames succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "createChangeStream succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "aggregate succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "aggregate succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "countDocuments succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "countDocuments succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "count" + } + }, + { + "commandSucceededEvent": { + "commandName": "count" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "count" + } + }, + { + "commandSucceededEvent": { + "commandName": "count" + } + } + ] + } + ] + }, + { + "description": "distinct succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "distinct" + } + }, + { + "commandSucceededEvent": { + "commandName": "distinct" + } + } + ] + } + ] + }, + { + "description": "distinct succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "distinct" + } + }, + { + "commandSucceededEvent": { + "commandName": "distinct" + } + } + ] + } + ] + }, { "description": "find succeeds after retryable handshake network error", "operations": [ @@ -66,7 +2122,469 @@ "name": "failPoint", "object": "testRunner", "arguments": { - "client": "client0", + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "find succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "findOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "findOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "listIndexes succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" + } + } + ] + } + ] + }, + { + "description": "listIndexes succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -74,8 +2592,8 @@ }, "data": { "failCommands": [ - "saslContinue", - "ping" + "ping", + "saslContinue" ], "closeConnection": true } @@ -84,7 +2602,7 @@ }, { "name": "runCommand", - "object": "database0", + "object": "database", "arguments": { "commandName": "ping", "command": { @@ -96,24 +2614,103 @@ } }, { - "name": "find", - "object": "collection0", - "arguments": { - "filter": { - "_id": 2 + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} } - }, - "expectResult": [ + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, { - "_id": 2, - "x": 22 + "commandSucceededEvent": { + "commandName": "listIndexes" + } } ] } + ] + }, + { + "description": "listIndexNames succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listIndexNames", + "object": "collection" + } ], "expectEvents": [ { - "client": "client0", + "client": "client", "eventType": "cmap", "events": [ { @@ -131,25 +2728,119 @@ ] }, { - "client": "client0", + "client": "client", "events": [ { "commandStartedEvent": { "command": { "ping": 1 }, - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" + } + } + ] + } + ] + }, + { + "description": "listIndexNames succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ { "commandStartedEvent": { "command": { - "find": "coll", - "filter": { - "_id": 2 - } + "ping": 1 }, - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" } } ] @@ -157,13 +2848,13 @@ ] }, { - "description": "find succeeds after retryable handshake network error (ShutdownInProgress)", + "description": "createChangeStream succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", "object": "testRunner", "arguments": { - "client": "client0", + "client": "client", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -171,17 +2862,17 @@ }, "data": { "failCommands": [ - "saslContinue", - "ping" + "ping", + "saslContinue" ], - "errorCode": 91 + "closeConnection": true } } } }, { "name": "runCommand", - "object": "database0", + "object": "database", "arguments": { "commandName": "ping", "command": { @@ -193,24 +2884,111 @@ } }, { - "name": "find", - "object": "collection0", + "name": "createChangeStream", + "object": "collection", "arguments": { - "filter": { - "_id": 2 - } + "pipeline": [] }, - "expectResult": [ + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, { - "_id": 2, - "x": 22 + "commandSucceededEvent": { + "commandName": "aggregate" + } } ] } + ] + }, + { + "description": "createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } ], "expectEvents": [ { - "client": "client0", + "client": "client", "eventType": "cmap", "events": [ { @@ -228,25 +3006,29 @@ ] }, { - "client": "client0", + "client": "client", "events": [ { "commandStartedEvent": { "command": { "ping": 1 }, - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" } }, { "commandStartedEvent": { - "command": { - "find": "coll", - "filter": { - "_id": 2 - } - }, - "databaseName": "retryable-handshake-tests" + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" } } ] diff --git a/test/retryable_writes/unified/handshakeError.json b/test/retryable_writes/unified/handshakeError.json index 6d6b4ac491..e07e5412b2 100644 --- a/test/retryable_writes/unified/handshakeError.json +++ b/test/retryable_writes/unified/handshakeError.json @@ -15,25 +15,27 @@ "createEntities": [ { "client": { - "id": "client0", + "id": "client", "useMultipleMongoses": false, "observeEvents": [ + "connectionCheckOutStartedEvent", "commandStartedEvent", - "connectionCheckOutStartedEvent" + "commandSucceededEvent", + "commandFailedEvent" ] } }, { "database": { - "id": "database0", - "client": "client0", - "databaseName": "retryable-handshake-tests" + "id": "database", + "client": "client", + "databaseName": "retryable-writes-handshake-tests" } }, { "collection": { - "id": "collection0", - "database": "database0", + "id": "collection", + "database": "database", "collectionName": "coll" } } @@ -41,7 +43,7 @@ "initialData": [ { "collectionName": "coll", - "databaseName": "retryable-handshake-tests", + "databaseName": "retryable-writes-handshake-tests", "documents": [ { "_id": 1, @@ -52,13 +54,13 @@ ], "tests": [ { - "description": "InsertOne succeeds after retryable handshake error", + "description": "insertOne succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", "object": "testRunner", "arguments": { - "client": "client0", + "client": "client", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -66,8 +68,8 @@ }, "data": { "failCommands": [ - "saslContinue", - "ping" + "ping", + "saslContinue" ], "closeConnection": true } @@ -76,7 +78,7 @@ }, { "name": "runCommand", - "object": "database0", + "object": "database", "arguments": { "commandName": "ping", "command": { @@ -89,7 +91,7 @@ }, { "name": "insertOne", - "object": "collection0", + "object": "collection", "arguments": { "document": { "_id": 2, @@ -100,7 +102,7 @@ ], "expectEvents": [ { - "client": "client0", + "client": "client", "eventType": "cmap", "events": [ { @@ -118,59 +120,1385 @@ ] }, { - "client": "client0", + "client": "client", "events": [ { "commandStartedEvent": { "command": { "ping": 1 }, - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-writes-handshake-tests" } }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "insertOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "insertMany succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "insertMany succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "deleteOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "delete" + } + }, + { + "commandSucceededEvent": { + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "deleteOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "delete" + } + }, + { + "commandSucceededEvent": { + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "replaceOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "replaceOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "updateOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "updateOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "findOneAndDelete succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "findOneAndDelete succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "findOneAndReplace succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "findOneAndReplace succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ { "commandStartedEvent": { "command": { - "insert": "coll", - "documents": [ - { - "_id": 2, - "x": 22 - } - ] + "ping": 1 }, - "commandName": "insert", - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" } } ] } + ] + }, + { + "description": "findOneAndUpdate succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } ], - "outcome": [ + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, { - "collectionName": "coll", - "databaseName": "retryable-handshake-tests", - "documents": [ + "client": "client", + "events": [ { - "_id": 1, - "x": 11 + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } }, { - "_id": 2, - "x": 22 + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } } ] } ] }, { - "description": "InsertOne succeeds after retryable handshake error ShutdownInProgress", + "description": "findOneAndUpdate succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", "object": "testRunner", "arguments": { - "client": "client0", + "client": "client", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -178,17 +1506,17 @@ }, "data": { "failCommands": [ - "saslContinue", - "ping" + "ping", + "saslContinue" ], - "errorCode": 91 + "closeConnection": true } } } }, { "name": "runCommand", - "object": "database0", + "object": "database", "arguments": { "commandName": "ping", "command": { @@ -200,19 +1528,21 @@ } }, { - "name": "insertOne", - "object": "collection0", + "name": "findOneAndUpdate", + "object": "collection", "arguments": { - "document": { - "_id": 2, - "x": 22 + "filter": {}, + "update": { + "$set": { + "x": 22 + } } } } ], "expectEvents": [ { - "client": "client0", + "client": "client", "eventType": "cmap", "events": [ { @@ -230,46 +1560,234 @@ ] }, { - "client": "client0", + "client": "client", "events": [ { "commandStartedEvent": { "command": { "ping": 1 }, - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "bulkWrite succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ { "commandStartedEvent": { "command": { - "insert": "coll", - "documents": [ - { - "_id": 2, - "x": 22 - } - ] + "ping": 1 }, - "commandName": "insert", - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" } } ] } + ] + }, + { + "description": "bulkWrite succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ] + } + } ], - "outcome": [ + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, { - "collectionName": "coll", - "databaseName": "retryable-handshake-tests", - "documents": [ + "client": "client", + "events": [ { - "_id": 1, - "x": 11 + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } }, { - "_id": 2, - "x": 22 + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } } ] } From 92e6150d84f128463f6e8f5f6b9d0e2537fef64e Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 10 Nov 2022 14:19:55 -0800 Subject: [PATCH 0297/1588] PYTHON-3493 Bulk Write InsertOne Should Be Parameter Of Collection Type (#1106) --- doc/examples/type_hints.rst | 20 ++++++++ mypy.ini | 2 +- pymongo/collection.py | 11 ++++- pymongo/encryption.py | 5 +- pymongo/operations.py | 13 +++--- pymongo/typings.py | 7 +++ test/__init__.py | 2 +- test/mockupdb/test_cluster_time.py | 2 +- test/mockupdb/test_op_msg.py | 6 +-- test/test_bulk.py | 4 +- test/test_client.py | 1 + test/test_database.py | 5 +- test/test_mypy.py | 75 +++++++++++++++++++++++++++--- test/test_server_selection.py | 6 ++- test/test_session.py | 6 ++- test/test_transactions.py | 4 +- test/utils.py | 13 +++--- 17 files changed, 144 insertions(+), 38 deletions(-) diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst index 38349038b1..b413ad7b24 100644 --- a/doc/examples/type_hints.rst +++ b/doc/examples/type_hints.rst @@ -113,6 +113,26 @@ These methods automatically add an "_id" field. >>> assert result is not None >>> assert result["year"] == 1993 >>> # This will raise a type-checking error, despite being present, because it is added by PyMongo. + >>> assert result["_id"] # type:ignore[typeddict-item] + +This same typing scheme works for all of the insert methods (:meth:`~pymongo.collection.Collection.insert_one`, +:meth:`~pymongo.collection.Collection.insert_many`, and :meth:`~pymongo.collection.Collection.bulk_write`). +For `bulk_write` both :class:`~pymongo.operations.InsertOne` and :class:`~pymongo.operations.ReplaceOne` operators are generic. + +.. doctest:: + :pyversion: >= 3.8 + + >>> from typing import TypedDict + >>> from pymongo import MongoClient + >>> from pymongo.operations import InsertOne + >>> from pymongo.collection import Collection + >>> client: MongoClient = MongoClient() + >>> collection: Collection[Movie] = client.test.test + >>> inserted = collection.bulk_write([InsertOne(Movie(name="Jurassic Park", year=1993))]) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> assert result["year"] == 1993 + >>> # This will raise a type-checking error, despite being present, because it is added by PyMongo. >>> assert result["_id"] # type:ignore[typeddict-item] Modeling Document Types with TypedDict diff --git a/mypy.ini b/mypy.ini index 9b1348472c..2562177ab1 100644 --- a/mypy.ini +++ b/mypy.ini @@ -33,7 +33,7 @@ ignore_missing_imports = True ignore_missing_imports = True [mypy-test.test_mypy] -warn_unused_ignores = false +warn_unused_ignores = True [mypy-winkerberos.*] ignore_missing_imports = True diff --git a/pymongo/collection.py b/pymongo/collection.py index 23efe8fd35..600d73c4bc 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -77,7 +77,14 @@ _FIND_AND_MODIFY_DOC_FIELDS = {"value": 1} -_WriteOp = Union[InsertOne, DeleteOne, DeleteMany, ReplaceOne, UpdateOne, UpdateMany] +_WriteOp = Union[ + InsertOne[_DocumentType], + DeleteOne, + DeleteMany, + ReplaceOne[_DocumentType], + UpdateOne, + UpdateMany, +] # Hint supports index name, "myIndex", or list of index pairs: [('x', 1), ('y', -1)] _IndexList = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] _IndexKeyHint = Union[str, _IndexList] @@ -436,7 +443,7 @@ def with_options( @_csot.apply def bulk_write( self, - requests: Sequence[_WriteOp], + requests: Sequence[_WriteOp[_DocumentType]], ordered: bool = True, bypass_document_validation: bool = False, session: Optional["ClientSession"] = None, diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 9fef5963a6..92a268f452 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -18,7 +18,7 @@ import enum import socket import weakref -from typing import Any, Mapping, Optional, Sequence +from typing import Any, Generic, Mapping, Optional, Sequence try: from pymongocrypt.auto_encrypter import AutoEncrypter @@ -55,6 +55,7 @@ from pymongo.read_concern import ReadConcern from pymongo.results import BulkWriteResult, DeleteResult from pymongo.ssl_support import get_ssl_context +from pymongo.typings import _DocumentType from pymongo.uri_parser import parse_host from pymongo.write_concern import WriteConcern @@ -430,7 +431,7 @@ class QueryType(str, enum.Enum): """Used to encrypt a value for an equality query.""" -class ClientEncryption(object): +class ClientEncryption(Generic[_DocumentType]): """Explicit client-side field level encryption.""" def __init__( diff --git a/pymongo/operations.py b/pymongo/operations.py index 84e8bf4d35..92a4dad0ac 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -13,21 +13,22 @@ # limitations under the License. """Operation class definitions.""" -from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union +from typing import Any, Dict, Generic, List, Mapping, Optional, Sequence, Tuple, Union +from bson.raw_bson import RawBSONDocument from pymongo import helpers from pymongo.collation import validate_collation_or_none from pymongo.common import validate_boolean, validate_is_mapping, validate_list from pymongo.helpers import _gen_index_name, _index_document, _index_list -from pymongo.typings import _CollationIn, _DocumentIn, _Pipeline +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline -class InsertOne(object): +class InsertOne(Generic[_DocumentType]): """Represents an insert_one operation.""" __slots__ = ("_doc",) - def __init__(self, document: _DocumentIn) -> None: + def __init__(self, document: Union[_DocumentType, RawBSONDocument]) -> None: """Create an InsertOne instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -170,7 +171,7 @@ def __ne__(self, other: Any) -> bool: return not self == other -class ReplaceOne(object): +class ReplaceOne(Generic[_DocumentType]): """Represents a replace_one operation.""" __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_hint") @@ -178,7 +179,7 @@ class ReplaceOne(object): def __init__( self, filter: Mapping[str, Any], - replacement: Mapping[str, Any], + replacement: Union[_DocumentType, RawBSONDocument], upsert: bool = False, collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, diff --git a/pymongo/typings.py b/pymongo/typings.py index 14e059a8f0..fe0e8bd523 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -37,3 +37,10 @@ _Pipeline = Sequence[Mapping[str, Any]] _DocumentOut = _DocumentIn _DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) + + +def strip_optional(elem): + """This function is to allow us to cast all of the elements of an iterator from Optional[_T] to _T + while inside a list comprehension.""" + assert elem is not None + return elem diff --git a/test/__init__.py b/test/__init__.py index eb66e45667..20b1d00ca8 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -1090,7 +1090,7 @@ def print_thread_stacks(pid: int) -> None: class IntegrationTest(PyMongoTestCase): """Base class for TestCases that need a connection to MongoDB to pass.""" - client: MongoClient + client: MongoClient[dict] db: Database credentials: Dict[str, str] diff --git a/test/mockupdb/test_cluster_time.py b/test/mockupdb/test_cluster_time.py index cb06a129d2..e4f3e12d07 100644 --- a/test/mockupdb/test_cluster_time.py +++ b/test/mockupdb/test_cluster_time.py @@ -60,7 +60,7 @@ def callback(client): self.cluster_time_conversation(callback, [{"ok": 1}] * 2) def test_bulk(self): - def callback(client): + def callback(client: MongoClient[dict]) -> None: client.db.collection.bulk_write( [InsertOne({}), InsertOne({}), UpdateOne({}, {"$inc": {"x": 1}}), DeleteMany({})] ) diff --git a/test/mockupdb/test_op_msg.py b/test/mockupdb/test_op_msg.py index da7ff3d33e..22fe38fd02 100755 --- a/test/mockupdb/test_op_msg.py +++ b/test/mockupdb/test_op_msg.py @@ -137,14 +137,14 @@ # Legacy methods Operation( "bulk_write_insert", - lambda coll: coll.bulk_write([InsertOne({}), InsertOne({})]), + lambda coll: coll.bulk_write([InsertOne[dict]({}), InsertOne[dict]({})]), request=OpMsg({"insert": "coll"}, flags=0), reply={"ok": 1, "n": 2}, ), Operation( "bulk_write_insert-w0", lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( - [InsertOne({}), InsertOne({})] + [InsertOne[dict]({}), InsertOne[dict]({})] ), request=OpMsg({"insert": "coll"}, flags=0), reply={"ok": 1, "n": 2}, @@ -152,7 +152,7 @@ Operation( "bulk_write_insert-w0-unordered", lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( - [InsertOne({}), InsertOne({})], ordered=False + [InsertOne[dict]({}), InsertOne[dict]({})], ordered=False ), request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), reply=None, diff --git a/test/test_bulk.py b/test/test_bulk.py index fae1c7e201..ac7073c0ef 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -296,7 +296,7 @@ def test_upsert(self): def test_numerous_inserts(self): # Ensure we don't exceed server's maxWriteBatchSize size limit. n_docs = client_context.max_write_batch_size + 100 - requests = [InsertOne({}) for _ in range(n_docs)] + requests = [InsertOne[dict]({}) for _ in range(n_docs)] result = self.coll.bulk_write(requests, ordered=False) self.assertEqual(n_docs, result.inserted_count) self.assertEqual(n_docs, self.coll.count_documents({})) @@ -347,7 +347,7 @@ def test_bulk_write_no_results(self): def test_bulk_write_invalid_arguments(self): # The requests argument must be a list. - generator = (InsertOne({}) for _ in range(10)) + generator = (InsertOne[dict]({}) for _ in range(10)) with self.assertRaises(TypeError): self.coll.bulk_write(generator) # type: ignore[arg-type] diff --git a/test/test_client.py b/test/test_client.py index 5bb116dbda..a33881fded 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1652,6 +1652,7 @@ def test_network_error_message(self): with self.fail_point( {"mode": {"times": 1}, "data": {"closeConnection": True, "failCommands": ["find"]}} ): + assert client.address is not None expected = "%s:%s: " % client.address with self.assertRaisesRegex(AutoReconnect, expected): client.pymongo_test.test.find_one({}) diff --git a/test/test_database.py b/test/test_database.py index d49ac8324f..49387b8bb9 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -16,7 +16,7 @@ import re import sys -from typing import Any, Iterable, List, Mapping +from typing import Any, Iterable, List, Mapping, Union sys.path[0:0] = [""] @@ -201,7 +201,7 @@ def test_list_collection_names_filter(self): db.capped.insert_one({}) db.non_capped.insert_one({}) self.addCleanup(client.drop_database, db.name) - + filter: Union[None, dict] # Should not send nameOnly. for filter in ({"options.capped": True}, {"options.capped": True, "name": "capped"}): results.clear() @@ -210,7 +210,6 @@ def test_list_collection_names_filter(self): self.assertNotIn("nameOnly", results["started"][0].command) # Should send nameOnly (except on 2.6). - filter: Any for filter in (None, {}, {"name": {"$in": ["capped", "non_capped"]}}): results.clear() names = db.list_collection_names(filter=filter) diff --git a/test/test_mypy.py b/test/test_mypy.py index 807f0e8ef3..58e69853ca 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -17,7 +17,7 @@ import os import tempfile import unittest -from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List +from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List, Union try: from typing_extensions import NotRequired, TypedDict @@ -42,7 +42,7 @@ class ImplicitMovie(TypedDict): Movie = dict # type:ignore[misc,assignment] ImplicitMovie = dict # type: ignore[assignment,misc] MovieWithId = dict # type: ignore[assignment,misc] - TypedDict = None # type: ignore[assignment] + TypedDict = None NotRequired = None # type: ignore[assignment] @@ -59,7 +59,7 @@ class ImplicitMovie(TypedDict): from bson.son import SON from pymongo import ASCENDING, MongoClient from pymongo.collection import Collection -from pymongo.operations import InsertOne +from pymongo.operations import DeleteOne, InsertOne, ReplaceOne from pymongo.read_preferences import ReadPreference TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mypy_fails") @@ -124,11 +124,40 @@ def to_list(iterable: Iterable[Dict[str, Any]]) -> List[Dict[str, Any]]: docs = to_list(cursor) self.assertTrue(docs) + @only_type_check def test_bulk_write(self) -> None: self.coll.insert_one({}) - requests = [InsertOne({})] - result = self.coll.bulk_write(requests) - self.assertTrue(result.acknowledged) + coll: Collection[Movie] = self.coll + requests: List[InsertOne[Movie]] = [InsertOne(Movie(name="American Graffiti", year=1973))] + self.assertTrue(coll.bulk_write(requests).acknowledged) + new_requests: List[Union[InsertOne[Movie], ReplaceOne[Movie]]] = [] + input_list: List[Union[InsertOne[Movie], ReplaceOne[Movie]]] = [ + InsertOne(Movie(name="American Graffiti", year=1973)), + ReplaceOne({}, Movie(name="American Graffiti", year=1973)), + ] + for i in input_list: + new_requests.append(i) + self.assertTrue(coll.bulk_write(new_requests).acknowledged) + + # Because ReplaceOne is not generic, type checking is not enforced for ReplaceOne in the first example. + @only_type_check + def test_bulk_write_heterogeneous(self): + coll: Collection[Movie] = self.coll + requests: List[Union[InsertOne[Movie], ReplaceOne, DeleteOne]] = [ + InsertOne(Movie(name="American Graffiti", year=1973)), + ReplaceOne({}, {"name": "American Graffiti", "year": "WRONG_TYPE"}), + DeleteOne({}), + ] + self.assertTrue(coll.bulk_write(requests).acknowledged) + requests_two: List[Union[InsertOne[Movie], ReplaceOne[Movie], DeleteOne]] = [ + InsertOne(Movie(name="American Graffiti", year=1973)), + ReplaceOne( + {}, + {"name": "American Graffiti", "year": "WRONG_TYPE"}, # type:ignore[typeddict-item] + ), + DeleteOne({}), + ] + self.assertTrue(coll.bulk_write(requests_two).acknowledged) def test_command(self) -> None: result: Dict = self.client.admin.command("ping") @@ -340,6 +369,40 @@ def test_typeddict_document_type_insertion(self) -> None: ) coll.insert_many([bad_movie]) + @only_type_check + def test_bulk_write_document_type_insertion(self): + client: MongoClient[MovieWithId] = MongoClient() + coll: Collection[MovieWithId] = client.test.test + coll.bulk_write( + [InsertOne(Movie({"name": "THX-1138", "year": 1971}))] # type:ignore[arg-type] + ) + mov_dict = {"_id": ObjectId(), "name": "THX-1138", "year": 1971} + coll.bulk_write( + [InsertOne(mov_dict)] # type:ignore[arg-type] + ) + coll.bulk_write( + [ + InsertOne({"_id": ObjectId(), "name": "THX-1138", "year": 1971}) + ] # No error because it is in-line. + ) + + @only_type_check + def test_bulk_write_document_type_replacement(self): + client: MongoClient[MovieWithId] = MongoClient() + coll: Collection[MovieWithId] = client.test.test + coll.bulk_write( + [ReplaceOne({}, Movie({"name": "THX-1138", "year": 1971}))] # type:ignore[arg-type] + ) + mov_dict = {"_id": ObjectId(), "name": "THX-1138", "year": 1971} + coll.bulk_write( + [ReplaceOne({}, mov_dict)] # type:ignore[arg-type] + ) + coll.bulk_write( + [ + ReplaceOne({}, {"_id": ObjectId(), "name": "THX-1138", "year": 1971}) + ] # No error because it is in-line. + ) + @only_type_check def test_typeddict_explicit_document_type(self) -> None: out = MovieWithId(_id=ObjectId(), name="THX-1138", year=1971) diff --git a/test/test_server_selection.py b/test/test_server_selection.py index a80d5f13d9..c3f3762f9a 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -23,6 +23,7 @@ from pymongo.server_selectors import writable_server_selector from pymongo.settings import TopologySettings from pymongo.topology import Topology +from pymongo.typings import strip_optional sys.path[0:0] = [""] @@ -85,7 +86,10 @@ def all_hosts_started(): ) wait_until(all_hosts_started, "receive heartbeat from all hosts") - expected_port = max([n.address[1] for n in client._topology._description.readable_servers]) + + expected_port = max( + [strip_optional(n.address[1]) for n in client._topology._description.readable_servers] + ) # Insert 1 record and access it 10 times. coll.insert_one({"name": "John Doe"}) diff --git a/test/test_session.py b/test/test_session.py index f22a2d5eab..386bab295c 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -898,7 +898,9 @@ def _test_writes(self, op): @client_context.require_no_standalone def test_writes(self): - self._test_writes(lambda coll, session: coll.bulk_write([InsertOne({})], session=session)) + self._test_writes( + lambda coll, session: coll.bulk_write([InsertOne[dict]({})], session=session) + ) self._test_writes(lambda coll, session: coll.insert_one({}, session=session)) self._test_writes(lambda coll, session: coll.insert_many([{}], session=session)) self._test_writes( @@ -944,7 +946,7 @@ def _test_no_read_concern(self, op): @client_context.require_no_standalone def test_writes_do_not_include_read_concern(self): self._test_no_read_concern( - lambda coll, session: coll.bulk_write([InsertOne({})], session=session) + lambda coll, session: coll.bulk_write([InsertOne[dict]({})], session=session) ) self._test_no_read_concern(lambda coll, session: coll.insert_one({}, session=session)) self._test_no_read_concern(lambda coll, session: coll.insert_many([{}], session=session)) diff --git a/test/test_transactions.py b/test/test_transactions.py index 4cee3fa236..02e691329e 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -363,7 +363,7 @@ def test_transaction_direct_connection(self): coll.insert_one({}) self.assertEqual(client.topology_description.topology_type_name, "Single") ops = [ - (coll.bulk_write, [[InsertOne({})]]), + (coll.bulk_write, [[InsertOne[dict]({})]]), (coll.insert_one, [{}]), (coll.insert_many, [[{}, {}]]), (coll.replace_one, [{}, {}]), @@ -385,7 +385,7 @@ def test_transaction_direct_connection(self): ] for f, args in ops: with client.start_session() as s, s.start_transaction(): - res = f(*args, session=s) + res = f(*args, session=s) # type:ignore[operator] if isinstance(res, (CommandCursor, Cursor)): list(res) diff --git a/test/utils.py b/test/utils.py index 59349f4fdc..6b0876a158 100644 --- a/test/utils.py +++ b/test/utils.py @@ -29,6 +29,7 @@ from collections import abc, defaultdict from functools import partial from test import client_context, db_pwd, db_user +from typing import Any from bson import json_util from bson.objectid import ObjectId @@ -557,27 +558,27 @@ def _mongo_client(host, port, authenticate=True, directConnection=None, **kwargs return MongoClient(uri, port, **client_options) -def single_client_noauth(h=None, p=None, **kwargs): +def single_client_noauth(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: """Make a direct connection. Don't authenticate.""" return _mongo_client(h, p, authenticate=False, directConnection=True, **kwargs) -def single_client(h=None, p=None, **kwargs): +def single_client(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: """Make a direct connection, and authenticate if necessary.""" return _mongo_client(h, p, directConnection=True, **kwargs) -def rs_client_noauth(h=None, p=None, **kwargs): +def rs_client_noauth(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: """Connect to the replica set. Don't authenticate.""" return _mongo_client(h, p, authenticate=False, **kwargs) -def rs_client(h=None, p=None, **kwargs): +def rs_client(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: """Connect to the replica set and authenticate if necessary.""" return _mongo_client(h, p, **kwargs) -def rs_or_single_client_noauth(h=None, p=None, **kwargs): +def rs_or_single_client_noauth(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: """Connect to the replica set if there is one, otherwise the standalone. Like rs_or_single_client, but does not authenticate. @@ -585,7 +586,7 @@ def rs_or_single_client_noauth(h=None, p=None, **kwargs): return _mongo_client(h, p, authenticate=False, **kwargs) -def rs_or_single_client(h=None, p=None, **kwargs): +def rs_or_single_client(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[Any]: """Connect to the replica set if there is one, otherwise the standalone. Authenticates if necessary. From fcb11514506acddbd50b8ec13e76a7c34d336aac Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 11 Nov 2022 16:23:03 -0600 Subject: [PATCH 0298/1588] PYTHON-3517 Add documentation for on-demand KMS providers (#1113) --- doc/changelog.rst | 2 +- doc/examples/encryption.rst | 73 ++++++++++++++++++++++++++++++++++++- 2 files changed, 73 insertions(+), 2 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 8220b6897a..ebd796116e 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,7 +4,7 @@ Changelog Changes in Version 4.3.3 ------------------------ -Version 4.3.3 fixes a number of bugs: +Version 4.3.3 documents support for :ref:`CSFLE on-demand credentials` for cloud KMS providers, and fixes the following bugs: - Fixed a performance regression in :meth:`~gridfs.GridFSBucket.download_to_stream` and :meth:`~gridfs.GridFSBucket.download_to_stream_by_name` by reading in chunks diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 72205ad119..9978cb6e36 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -713,6 +713,77 @@ To configure automatic *decryption* without automatic *encryption* set client_encryption.close() client.close() - if __name__ == "__main__": main() + + +.. _CSFLE on-demand credentials: + + +CSFLE on-demand credentials +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``pymongocrypt`` 1.4 adds support for fetching on-demand KMS credentials for +AWS, GCP, and Azure cloud environments. + +To enable the driver's behavior to obtain credentials from the environment, add the appropriate key ("aws", "gcp", or "azure") with an empty map to +"kms_providers" in either :class:`~pymongo.encryption_options.AutoEncryptionOpts` or :class:`~pymongo.encryption.ClientEncryption` options. + +An application using AWS credentials would look like:: + + from pymongo import MongoClient + from pymongo.encryption import ClientEncryption + client = MongoClient() + client_encryption = ClientEncryption( + # The empty dictionary enables on-demand credentials. + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client, + codec_options=client.codec_options, + ) + master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:123456789:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + } + client_encryption.create_data_key("aws", master_key) + +The above will enable the same behavior of obtaining AWS credentials from the environment as is used for :ref:`MONGODB-AWS` authentication, including the +caching to avoid rate limiting. + +An application using GCP credentials would look like:: + + from pymongo import MongoClient + from pymongo.encryption import ClientEncryption + client = MongoClient() + client_encryption = ClientEncryption( + # The empty dictionary enables on-demand credentials. + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client, + codec_options=client.codec_options, + ) + master_key = { + "projectId": "my-project", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + } + client_encryption.create_data_key("gcp", master_key) + +The driver will query the `VM instance metadata `_ to obtain credentials. + +An application using Azure credentials would look like, this time using +:class:`~pymongo.encryption_options.AutoEncryptionOpts`:: + + from pymongo import MongoClient + from pymongo.encryption_options import AutoEncryptionOpts + # The empty dictionary enables on-demand credentials. + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys" + auto_encryption_opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace) + client = MongoClient(auto_encryption_opts=auto_encryption_opts) + coll = client.test.coll + coll.insert_one({"encryptedField": "123456789"}) + +The driver will `acquire an access token `_ from the Azure VM. From d0568042fa3e89786a47a182718a0210e910cce6 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 14 Nov 2022 07:41:49 -0600 Subject: [PATCH 0299/1588] PYTHON-2818 Add native support for AWS IAM Roles for service accounts, EKS in particular (#1032) --- .evergreen/config.yml | 80 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 77 insertions(+), 3 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 28e54e2ded..96b6a00688 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -572,7 +572,13 @@ functions: "iam_auth_ec2_instance_account" : "${iam_auth_ec2_instance_account}", "iam_auth_ec2_instance_secret_access_key" : "${iam_auth_ec2_instance_secret_access_key}", - "iam_auth_ec2_instance_profile" : "${iam_auth_ec2_instance_profile}" + "iam_auth_ec2_instance_profile" : "${iam_auth_ec2_instance_profile}", + + "iam_auth_assume_web_role_name": "${iam_auth_assume_web_role_name}", + "iam_web_identity_issuer": "${iam_web_identity_issuer}", + "iam_web_identity_rsa_key": "${iam_web_identity_rsa_key}", + "iam_web_identity_jwks_uri": "${iam_web_identity_jwks_uri}", + "iam_web_identity_token_file": "${iam_web_identity_token_file}" } EOF @@ -668,7 +674,67 @@ functions: fi # Write an empty prepare_mongodb_aws so no auth environment variables # are set. - echo "" > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + rm "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" || true + PYTHON_BINARY=${PYTHON_BINARY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh + + "run aws auth test with aws web identity credentials": + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + if [ "${skip_EC2_auth_test}" = "true" ]; then + echo "This platform does not support the web identity auth test, skipping..." + exit 0 + fi + cd ${DRIVERS_TOOLS}/.evergreen/auth_aws + . ./activate_venv.sh + mongo aws_e2e_web_identity.js + - command: shell.exec + type: test + params: + working_dir: "src" + silent: true + script: | + # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) + cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + export AWS_ROLE_ARN="${iam_auth_assume_web_role_name}" + export AWS_WEB_IDENTITY_TOKEN_FILE="${iam_web_identity_token_file}" + EOF + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + if [ "${skip_web_identity_auth_test}" = "true" ]; then + echo "This platform does not support the web identity auth test, skipping..." + exit 0 + fi + PYTHON_BINARY=${PYTHON_BINARY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh + - command: shell.exec + type: test + params: + working_dir: "src" + silent: true + script: | + # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) + cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + export AWS_ROLE_ARN="${iam_auth_assume_web_role_name}" + export AWS_WEB_IDENTITY_TOKEN_FILE="${iam_web_identity_token_file}" + export AWS_ROLE_SESSION_NAME="test" + EOF + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + if [ "${skip_web_identity_auth_test}" = "true" ]; then + echo "This platform does not support the web identity auth test, skipping..." + exit 0 + fi PYTHON_BINARY=${PYTHON_BINARY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh "run aws auth test with aws credentials as environment variables": @@ -1832,6 +1898,7 @@ tasks: - func: "run aws auth test with aws credentials as environment variables" - func: "run aws auth test with aws credentials and session token as environment variables" - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" - name: "aws-auth-test-5.0" @@ -1848,6 +1915,7 @@ tasks: - func: "run aws auth test with aws credentials as environment variables" - func: "run aws auth test with aws credentials and session token as environment variables" - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" - name: "aws-auth-test-6.0" @@ -1864,6 +1932,7 @@ tasks: - func: "run aws auth test with aws credentials as environment variables" - func: "run aws auth test with aws credentials and session token as environment variables" - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" - name: "aws-auth-test-latest" @@ -1880,6 +1949,7 @@ tasks: - func: "run aws auth test with aws credentials as environment variables" - func: "run aws auth test with aws credentials and session token as environment variables" - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" - name: "aws-auth-test-rapid" commands: @@ -1895,6 +1965,7 @@ tasks: - func: "run aws auth test with aws credentials as environment variables" - func: "run aws auth test with aws credentials and session token as environment variables" - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" - name: load-balancer-test @@ -2076,6 +2147,7 @@ axes: variables: skip_EC2_auth_test: true skip_ECS_auth_test: true + skip_web_identity_auth_test: true python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - id: macos-1100 @@ -2084,6 +2156,7 @@ axes: variables: skip_EC2_auth_test: true skip_ECS_auth_test: true + skip_web_identity_auth_test: true python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - id: rhel62 @@ -2146,8 +2219,9 @@ axes: run_on: windows-64-vsMulti-small batchtime: 10080 # 7 days variables: - skip_EC2_auth_test: true skip_ECS_auth_test: true + skip_EC2_auth_test: true + skip_web_identity_auth_test: true python3_binary: "C:/python/Python38/python.exe" venv_bin_dir: "Scripts" libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/windows-test/master/latest/libmongocrypt.tar.gz From 79aa5e6757fe816c7aaadf08b56120e4375b904e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 14 Nov 2022 08:50:08 -0800 Subject: [PATCH 0300/1588] PYTHON-3516 Improve test EventListener api (#1114) --- test/test_auth.py | 8 +- test/test_change_stream.py | 55 +++-- test/test_collation.py | 38 ++-- test/test_collection.py | 33 ++- test/test_command_monitoring_legacy.py | 32 +-- test/test_comment.py | 7 +- test/test_cursor.py | 150 ++++++------- test/test_data_lake.py | 6 +- test/test_database.py | 12 +- test/test_encryption.py | 44 ++-- test/test_monitoring.py | 283 +++++++++++------------- test/test_read_concern.py | 29 ++- test/test_read_preferences.py | 2 +- test/test_read_write_concern_spec.py | 8 +- test/test_retryable_reads.py | 6 +- test/test_retryable_writes.py | 56 +++-- test/test_server_selection.py | 2 +- test/test_server_selection_in_window.py | 2 +- test/test_session.py | 112 +++++----- test/test_transactions.py | 10 +- test/test_versioned_api.py | 4 +- test/utils.py | 39 +++- test/utils_spec_runner.py | 8 +- 23 files changed, 460 insertions(+), 486 deletions(-) diff --git a/test/test_auth.py b/test/test_auth.py index 69ed27bda0..9d80f06c00 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -392,7 +392,7 @@ def test_scram_skip_empty_exchange(self): if client_context.version < (4, 4, -1): # Assert we sent the skipEmptyExchange option. - first_event = listener.results["started"][0] + first_event = listener.started_events[0] self.assertEqual(first_event.command_name, "saslStart") self.assertEqual(first_event.command["options"], {"skipEmptyExchange": True}) @@ -449,7 +449,7 @@ def test_scram(self): ) client.testscram.command("dbstats") - self.listener.results.clear() + self.listener.reset() client = rs_or_single_client_noauth( username="both", password="pwd", authSource="testscram", event_listeners=[self.listener] ) @@ -457,9 +457,9 @@ def test_scram(self): if client_context.version.at_least(4, 4, -1): # Speculative authentication in 4.4+ sends saslStart with the # handshake. - self.assertEqual(self.listener.results["started"], []) + self.assertEqual(self.listener.started_events, []) else: - started = self.listener.results["started"][0] + started = self.listener.started_events[0] self.assertEqual(started.command.get("mechanism"), "SCRAM-SHA-256") # Step 3: verify auth failure conditions diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 62d7abee62..2388a6e1f4 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -167,7 +167,7 @@ def test_try_next_runs_one_getmore(self): client = rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. client.admin.command("ping") - listener.results.clear() + listener.reset() # ChangeStreams only read majority committed data so use w:majority. coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) coll.drop() @@ -177,25 +177,25 @@ def test_try_next_runs_one_getmore(self): self.addCleanup(coll.drop) with self.change_stream_with_client(client, max_await_time_ms=250) as stream: self.assertEqual(listener.started_command_names(), ["aggregate"]) - listener.results.clear() + listener.reset() # Confirm that only a single getMore is run even when no documents # are returned. self.assertIsNone(stream.try_next()) self.assertEqual(listener.started_command_names(), ["getMore"]) - listener.results.clear() + listener.reset() self.assertIsNone(stream.try_next()) self.assertEqual(listener.started_command_names(), ["getMore"]) - listener.results.clear() + listener.reset() # Get at least one change before resuming. coll.insert_one({"_id": 2}) wait_until(lambda: stream.try_next() is not None, "get change from try_next") - listener.results.clear() + listener.reset() # Cause the next request to initiate the resume process. self.kill_change_stream_cursor(stream) - listener.results.clear() + listener.reset() # The sequence should be: # - getMore, fail @@ -203,7 +203,7 @@ def test_try_next_runs_one_getmore(self): # - no results, return immediately without another getMore self.assertIsNone(stream.try_next()) self.assertEqual(listener.started_command_names(), ["getMore", "aggregate"]) - listener.results.clear() + listener.reset() # Stream still works after a resume. coll.insert_one({"_id": 3}) @@ -217,7 +217,7 @@ def test_batch_size_is_honored(self): client = rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. client.admin.command("ping") - listener.results.clear() + listener.reset() # ChangeStreams only read majority committed data so use w:majority. coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) coll.drop() @@ -229,12 +229,12 @@ def test_batch_size_is_honored(self): expected = {"batchSize": 23} with self.change_stream_with_client(client, max_await_time_ms=250, batch_size=23) as stream: # Confirm that batchSize is honored for initial batch. - cmd = listener.results["started"][0].command + cmd = listener.started_events[0].command self.assertEqual(cmd["cursor"], expected) - listener.results.clear() + listener.reset() # Confirm that batchSize is honored by getMores. self.assertIsNone(stream.try_next()) - cmd = listener.results["started"][0].command + cmd = listener.started_events[0].command key = next(iter(expected)) self.assertEqual(expected[key], cmd[key]) @@ -255,12 +255,11 @@ def test_start_at_operation_time(self): @no_type_check def _test_full_pipeline(self, expected_cs_stage): client, listener = self.client_with_listener("aggregate") - results = listener.results with self.change_stream_with_client(client, [{"$project": {"foo": 0}}]) as _: pass - self.assertEqual(1, len(results["started"])) - command = results["started"][0] + self.assertEqual(1, len(listener.started_events)) + command = listener.started_events[0] self.assertEqual("aggregate", command.command_name) self.assertEqual( [{"$changeStream": expected_cs_stage}, {"$project": {"foo": 0}}], @@ -464,7 +463,7 @@ def _get_expected_resume_token_legacy(self, stream, listener, previous_change=No versions that don't support postBatchResumeToken. Assumes the stream has never returned any changes if previous_change is None.""" if previous_change is None: - agg_cmd = listener.results["started"][0] + agg_cmd = listener.started_events[0] stage = agg_cmd.command["pipeline"][0]["$changeStream"] return stage.get("resumeAfter") or stage.get("startAfter") @@ -481,7 +480,7 @@ def _get_expected_resume_token(self, stream, listener, previous_change=None): if token is not None: return token - response = listener.results["succeeded"][-1].reply + response = listener.succeeded_events[-1].reply return response["cursor"]["postBatchResumeToken"] @no_type_check @@ -558,8 +557,8 @@ def test_no_resume_attempt_if_aggregate_command_fails(self): pass # Driver should have attempted aggregate command only once. - self.assertEqual(len(listener.results["started"]), 1) - self.assertEqual(listener.results["started"][0].command_name, "aggregate") + self.assertEqual(len(listener.started_events), 1) + self.assertEqual(listener.started_events[0].command_name, "aggregate") # Prose test no. 5 - REMOVED # Prose test no. 6 - SKIPPED @@ -603,20 +602,20 @@ def test_start_at_operation_time_caching(self): with self.change_stream_with_client(client) as cs: self.kill_change_stream_cursor(cs) cs.try_next() - cmd = listener.results["started"][-1].command + cmd = listener.started_events[-1].command self.assertIsNotNone(cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime")) # Case 2: change stream started with startAtOperationTime - listener.results.clear() + listener.reset() optime = self.get_start_at_operation_time() with self.change_stream_with_client(client, start_at_operation_time=optime) as cs: self.kill_change_stream_cursor(cs) cs.try_next() - cmd = listener.results["started"][-1].command + cmd = listener.started_events[-1].command self.assertEqual( cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime"), optime, - str([k.command for k in listener.results["started"]]), + str([k.command for k in listener.started_events]), ) # Prose test no. 10 - SKIPPED @@ -631,7 +630,7 @@ def test_resumetoken_empty_batch(self): self.assertIsNone(change_stream.try_next()) resume_token = change_stream.resume_token - response = listener.results["succeeded"][0].reply + response = listener.succeeded_events[0].reply self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) # Prose test no. 11 @@ -643,7 +642,7 @@ def test_resumetoken_exhausted_batch(self): self._populate_and_exhaust_change_stream(change_stream) resume_token = change_stream.resume_token - response = listener.results["succeeded"][-1].reply + response = listener.succeeded_events[-1].reply self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) # Prose test no. 12 @@ -737,7 +736,7 @@ def test_startafter_resume_uses_startafter_after_empty_getMore(self): self.kill_change_stream_cursor(change_stream) change_stream.try_next() # Resume attempt - response = listener.results["started"][-1] + response = listener.started_events[-1] self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) @@ -756,7 +755,7 @@ def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): self.kill_change_stream_cursor(change_stream) change_stream.try_next() # Resume attempt - response = listener.results["started"][-1] + response = listener.started_events[-1] self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) @@ -1056,7 +1055,7 @@ def tearDownClass(cls): def setUp(self): super(TestAllLegacyScenarios, self).setUp() - self.listener.results.clear() + self.listener.reset() def setUpCluster(self, scenario_dict): assets = [ @@ -1128,7 +1127,7 @@ def check_event(self, event, expectation_dict): self.assertEqual(getattr(event, key), value) def tearDown(self): - self.listener.results.clear() + self.listener.reset() _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "change_streams") diff --git a/test/test_collation.py b/test/test_collation.py index d8410a9de4..18f8bc78ac 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -113,11 +113,11 @@ def tearDownClass(cls): super(TestCollation, cls).tearDownClass() def tearDown(self): - self.listener.results.clear() + self.listener.reset() super(TestCollation, self).tearDown() def last_command_started(self): - return self.listener.results["started"][-1].command + return self.listener.started_events[-1].command def assertCollationInLastCommand(self): self.assertEqual(self.collation.document, self.last_command_started()["collation"]) @@ -129,7 +129,7 @@ def test_create_collection(self): # Test passing collation as a dict as well. self.db.test.drop() - self.listener.results.clear() + self.listener.reset() self.db.create_collection("test", collation=self.collation.document) self.assertCollationInLastCommand() @@ -139,7 +139,7 @@ def test_index_model(self): def test_create_index(self): self.db.test.create_index("foo", collation=self.collation) - ci_cmd = self.listener.results["started"][0].command + ci_cmd = self.listener.started_events[0].command self.assertEqual(self.collation.document, ci_cmd["indexes"][0]["collation"]) def test_aggregate(self): @@ -154,18 +154,18 @@ def test_distinct(self): self.db.test.distinct("foo", collation=self.collation) self.assertCollationInLastCommand() - self.listener.results.clear() + self.listener.reset() self.db.test.find(collation=self.collation).distinct("foo") self.assertCollationInLastCommand() def test_find_command(self): self.db.test.insert_one({"is this thing on?": True}) - self.listener.results.clear() + self.listener.reset() next(self.db.test.find(collation=self.collation)) self.assertCollationInLastCommand() def test_explain_command(self): - self.listener.results.clear() + self.listener.reset() self.db.test.find(collation=self.collation).explain() # The collation should be part of the explained command. self.assertEqual( @@ -174,40 +174,40 @@ def test_explain_command(self): def test_delete(self): self.db.test.delete_one({"foo": 42}, collation=self.collation) - command = self.listener.results["started"][0].command + command = self.listener.started_events[0].command self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) - self.listener.results.clear() + self.listener.reset() self.db.test.delete_many({"foo": 42}, collation=self.collation) - command = self.listener.results["started"][0].command + command = self.listener.started_events[0].command self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) def test_update(self): self.db.test.replace_one({"foo": 42}, {"foo": 43}, collation=self.collation) - command = self.listener.results["started"][0].command + command = self.listener.started_events[0].command self.assertEqual(self.collation.document, command["updates"][0]["collation"]) - self.listener.results.clear() + self.listener.reset() self.db.test.update_one({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) - command = self.listener.results["started"][0].command + command = self.listener.started_events[0].command self.assertEqual(self.collation.document, command["updates"][0]["collation"]) - self.listener.results.clear() + self.listener.reset() self.db.test.update_many({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) - command = self.listener.results["started"][0].command + command = self.listener.started_events[0].command self.assertEqual(self.collation.document, command["updates"][0]["collation"]) def test_find_and(self): self.db.test.find_one_and_delete({"foo": 42}, collation=self.collation) self.assertCollationInLastCommand() - self.listener.results.clear() + self.listener.reset() self.db.test.find_one_and_update( {"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation ) self.assertCollationInLastCommand() - self.listener.results.clear() + self.listener.reset() self.db.test.find_one_and_replace({"foo": 42}, {"foo": 43}, collation=self.collation) self.assertCollationInLastCommand() @@ -229,8 +229,8 @@ def test_bulk_write(self): ] ) - delete_cmd = self.listener.results["started"][0].command - update_cmd = self.listener.results["started"][1].command + delete_cmd = self.listener.started_events[0].command + update_cmd = self.listener.started_events[1].command def check_ops(ops): for op in ops: diff --git a/test/test_collection.py b/test/test_collection.py index e7ac248124..49a7017ef3 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1986,21 +1986,20 @@ def test_find_one_and_write_concern(self): c_w0 = db.get_collection("test", write_concern=WriteConcern(w=0)) # default WriteConcern. c_default = db.get_collection("test", write_concern=WriteConcern()) - results = listener.results # Authenticate the client and throw out auth commands from the listener. db.command("ping") - results.clear() + listener.reset() c_w0.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) - self.assertEqual({"w": 0}, results["started"][0].command["writeConcern"]) - results.clear() + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() c_w0.find_one_and_replace({"_id": 1}, {"foo": "bar"}) - self.assertEqual({"w": 0}, results["started"][0].command["writeConcern"]) - results.clear() + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() c_w0.find_one_and_delete({"_id": 1}) - self.assertEqual({"w": 0}, results["started"][0].command["writeConcern"]) - results.clear() + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() # Test write concern errors. if client_context.is_rs: @@ -2017,27 +2016,27 @@ def test_find_one_and_write_concern(self): WriteConcernError, c_wc_error.find_one_and_replace, {"w": 0}, - results["started"][0].command["writeConcern"], + listener.started_events[0].command["writeConcern"], ) self.assertRaises( WriteConcernError, c_wc_error.find_one_and_delete, {"w": 0}, - results["started"][0].command["writeConcern"], + listener.started_events[0].command["writeConcern"], ) - results.clear() + listener.reset() c_default.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) - self.assertNotIn("writeConcern", results["started"][0].command) - results.clear() + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() c_default.find_one_and_replace({"_id": 1}, {"foo": "bar"}) - self.assertNotIn("writeConcern", results["started"][0].command) - results.clear() + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() c_default.find_one_and_delete({"_id": 1}) - self.assertNotIn("writeConcern", results["started"][0].command) - results.clear() + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() def test_find_with_nested(self): c = self.db.test diff --git a/test/test_command_monitoring_legacy.py b/test/test_command_monitoring_legacy.py index 5d9f2fe3ee..1cc3e15cc9 100644 --- a/test/test_command_monitoring_legacy.py +++ b/test/test_command_monitoring_legacy.py @@ -54,17 +54,7 @@ def tearDownClass(cls): cls.client.close() def tearDown(self): - self.listener.results.clear() - - -def format_actual_results(results): - started = results["started"] - succeeded = results["succeeded"] - failed = results["failed"] - msg = "\nStarted: %r" % (started[0].command if len(started) else None,) - msg += "\nSucceeded: %r" % (succeeded[0].reply if len(succeeded) else None,) - msg += "\nFailed: %r" % (failed[0].failure if len(failed) else None,) - return msg + self.listener.reset() def create_test(scenario_def, test): @@ -75,7 +65,7 @@ def run_scenario(self): coll = self.client[dbname][collname] coll.drop() coll.insert_many(scenario_def["data"]) - self.listener.results.clear() + self.listener.reset() name = camel_to_snake(test["operation"]["name"]) if "read_preference" in test["operation"]: coll = coll.with_options( @@ -127,11 +117,13 @@ def run_scenario(self): except OperationFailure: pass - res = self.listener.results + started_events = self.listener.started_events + succeeded_events = self.listener.succeeded_events + failed_events = self.listener.failed_events for expectation in test["expectations"]: event_type = next(iter(expectation)) if event_type == "command_started_event": - event = res["started"][0] if len(res["started"]) else None + event = started_events[0] if len(started_events) else None if event is not None: # The tests substitute 42 for any number other than 0. if event.command_name == "getMore" and event.command["getMore"]: @@ -147,7 +139,7 @@ def run_scenario(self): update.setdefault("upsert", False) update.setdefault("multi", False) elif event_type == "command_succeeded_event": - event = res["succeeded"].pop(0) if len(res["succeeded"]) else None + event = succeeded_events.pop(0) if len(succeeded_events) else None if event is not None: reply = event.reply # The tests substitute 42 for any number other than 0, @@ -171,12 +163,12 @@ def run_scenario(self): reply.pop("cursorsKilled") reply["cursorsUnknown"] = [42] # Found succeeded event. Pop related started event. - res["started"].pop(0) + started_events.pop(0) elif event_type == "command_failed_event": - event = res["failed"].pop(0) if len(res["failed"]) else None + event = failed_events.pop(0) if len(failed_events) else None if event is not None: # Found failed event. Pop related started event. - res["started"].pop(0) + started_events.pop(0) else: self.fail("Unknown event type") @@ -184,11 +176,11 @@ def run_scenario(self): event_name = event_type.split("_")[1] self.fail( "Expected %s event for %s command. Actual " - "results:%s" + "results:\n%s" % ( event_name, expectation[event_type]["command_name"], - format_actual_results(res), + "\n".join(str(e) for e in self.listener.events), ) ) diff --git a/test/test_comment.py b/test/test_comment.py index c83428fd70..85e5470d74 100644 --- a/test/test_comment.py +++ b/test/test_comment.py @@ -43,12 +43,11 @@ class TestComment(IntegrationTest): def _test_ops( self, helpers, already_supported, listener, db=Empty(), coll=Empty() # noqa: B008 ): - results = listener.results for h, args in helpers: c = "testing comment with " + h.__name__ with self.subTest("collection-" + h.__name__ + "-comment"): for cc in [c, {"key": c}, ["any", 1]]: - results.clear() + listener.reset() kwargs = {"comment": cc} if h == coll.rename: _ = db.get_collection("temp_temp_temp").drop() @@ -77,7 +76,7 @@ def _test_ops( tested = False # For some reason collection.list_indexes creates two commands and the first # one doesn't contain 'comment'. - for i in results["started"]: + for i in listener.started_events: if cc == i.command.get("comment", ""): self.assertEqual(cc, i.command["comment"]) tested = True @@ -98,7 +97,7 @@ def _test_ops( h.__doc__, ) - results.clear() + listener.reset() @client_context.require_version_min(4, 7, -1) @client_context.require_replica_set diff --git a/test/test_cursor.py b/test/test_cursor.py index 5b4efcd391..96d83fecf1 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -218,79 +218,78 @@ def test_max_await_time_ms(self): listener = AllowListEventListener("find", "getMore") coll = rs_or_single_client(event_listeners=[listener])[self.db.name].pymongo_test - results = listener.results # Tailable_await defaults. list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT)) # find - self.assertFalse("maxTimeMS" in results["started"][0].command) + self.assertFalse("maxTimeMS" in listener.started_events[0].command) # getMore - self.assertFalse("maxTimeMS" in results["started"][1].command) - results.clear() + self.assertFalse("maxTimeMS" in listener.started_events[1].command) + listener.reset() # Tailable_await with max_await_time_ms set. list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99)) # find - self.assertEqual("find", results["started"][0].command_name) - self.assertFalse("maxTimeMS" in results["started"][0].command) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[0].command) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertTrue("maxTimeMS" in results["started"][1].command) - self.assertEqual(99, results["started"][1].command["maxTimeMS"]) - results.clear() + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[1].command) + self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) + listener.reset() # Tailable_await with max_time_ms list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99)) # find - self.assertEqual("find", results["started"][0].command_name) - self.assertTrue("maxTimeMS" in results["started"][0].command) - self.assertEqual(99, results["started"][0].command["maxTimeMS"]) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertFalse("maxTimeMS" in results["started"][1].command) - results.clear() + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[1].command) + listener.reset() # Tailable_await with both max_time_ms and max_await_time_ms list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99).max_await_time_ms(99)) # find - self.assertEqual("find", results["started"][0].command_name) - self.assertTrue("maxTimeMS" in results["started"][0].command) - self.assertEqual(99, results["started"][0].command["maxTimeMS"]) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertTrue("maxTimeMS" in results["started"][1].command) - self.assertEqual(99, results["started"][1].command["maxTimeMS"]) - results.clear() + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[1].command) + self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) + listener.reset() # Non tailable_await with max_await_time_ms list(coll.find(batch_size=1).max_await_time_ms(99)) # find - self.assertEqual("find", results["started"][0].command_name) - self.assertFalse("maxTimeMS" in results["started"][0].command) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[0].command) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertFalse("maxTimeMS" in results["started"][1].command) - results.clear() + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[1].command) + listener.reset() # Non tailable_await with max_time_ms list(coll.find(batch_size=1).max_time_ms(99)) # find - self.assertEqual("find", results["started"][0].command_name) - self.assertTrue("maxTimeMS" in results["started"][0].command) - self.assertEqual(99, results["started"][0].command["maxTimeMS"]) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertFalse("maxTimeMS" in results["started"][1].command) + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[1].command) # Non tailable_await with both max_time_ms and max_await_time_ms list(coll.find(batch_size=1).max_time_ms(99).max_await_time_ms(88)) # find - self.assertEqual("find", results["started"][0].command_name) - self.assertTrue("maxTimeMS" in results["started"][0].command) - self.assertEqual(99, results["started"][0].command["maxTimeMS"]) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertFalse("maxTimeMS" in results["started"][1].command) + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[1].command) @client_context.require_test_commands @client_context.require_no_mongos @@ -329,7 +328,7 @@ def test_explain_with_read_concern(self): self.addCleanup(client.close) coll = client.pymongo_test.test.with_options(read_concern=ReadConcern(level="local")) self.assertTrue(coll.find().explain()) - started = listener.results["started"] + started = listener.started_events self.assertEqual(len(started), 1) self.assertNotIn("readConcern", started[0].command) @@ -1169,7 +1168,6 @@ def test_close_kills_cursor_synchronously(self): self.client._process_periodic_tasks() listener = AllowListEventListener("killCursors") - results = listener.results client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) coll = client[self.db.name].test_close_kills_cursors @@ -1178,7 +1176,7 @@ def test_close_kills_cursor_synchronously(self): docs_inserted = 1000 coll.insert_many([{"i": i} for i in range(docs_inserted)]) - results.clear() + listener.reset() # Close a cursor while it's still open on the server. cursor = coll.find().batch_size(10) @@ -1187,13 +1185,13 @@ def test_close_kills_cursor_synchronously(self): cursor.close() def assertCursorKilled(): - self.assertEqual(1, len(results["started"])) - self.assertEqual("killCursors", results["started"][0].command_name) - self.assertEqual(1, len(results["succeeded"])) - self.assertEqual("killCursors", results["succeeded"][0].command_name) + self.assertEqual(1, len(listener.started_events)) + self.assertEqual("killCursors", listener.started_events[0].command_name) + self.assertEqual(1, len(listener.succeeded_events)) + self.assertEqual("killCursors", listener.succeeded_events[0].command_name) assertCursorKilled() - results.clear() + listener.reset() # Close a command cursor while it's still open on the server. cursor = coll.aggregate([], batchSize=10) @@ -1204,7 +1202,7 @@ def assertCursorKilled(): if cursor.cursor_id: assertCursorKilled() else: - self.assertEqual(0, len(results["started"])) + self.assertEqual(0, len(listener.started_events)) def test_delete_not_initialized(self): # Creating a cursor with invalid arguments will not run __init__ @@ -1226,7 +1224,7 @@ def test_getMore_does_not_send_readPreference(self): self.addCleanup(coll.drop) list(coll.find(batch_size=3)) - started = listener.results["started"] + started = listener.started_events self.assertEqual(2, len(started)) self.assertEqual("find", started[0].command_name) if client_context.is_rs or client_context.is_mongos: @@ -1261,13 +1259,13 @@ def test_find_raw_transaction(self): batches = list( client[self.db.name].test.find_raw_batches(session=session).sort("_id") ) - cmd = listener.results["started"][0] + cmd = listener.started_events[0] self.assertEqual(cmd.command_name, "find") self.assertIn("$clusterTime", cmd.command) self.assertEqual(cmd.command["startTransaction"], True) self.assertEqual(cmd.command["txnNumber"], 1) # Ensure we update $clusterTime from the command response. - last_cmd = listener.results["succeeded"][-1] + last_cmd = listener.succeeded_events[-1] self.assertEqual( last_cmd.reply["$clusterTime"]["clusterTime"], session.cluster_time["clusterTime"], @@ -1293,8 +1291,8 @@ def test_find_raw_retryable_reads(self): self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - self.assertEqual(len(listener.results["started"]), 2) - for cmd in listener.results["started"]: + self.assertEqual(len(listener.started_events), 2) + for cmd in listener.started_events: self.assertEqual(cmd.command_name, "find") @client_context.require_version_min(5, 0, 0) @@ -1314,7 +1312,7 @@ def test_find_raw_snapshot_reads(self): self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - find_cmd = listener.results["started"][1].command + find_cmd = listener.started_events[1].command self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) @@ -1372,15 +1370,15 @@ def test_monitoring(self): c.drop() c.insert_many([{"_id": i} for i in range(10)]) - listener.results.clear() + listener.reset() cursor = c.find_raw_batches(batch_size=4) # First raw batch of 4 documents. next(cursor) - started = listener.results["started"][0] - succeeded = listener.results["succeeded"][0] - self.assertEqual(0, len(listener.results["failed"])) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) self.assertEqual("find", started.command_name) self.assertEqual("pymongo_test", started.database_name) self.assertEqual("find", succeeded.command_name) @@ -1391,15 +1389,14 @@ def test_monitoring(self): self.assertEqual(len(csr["firstBatch"]), 1) self.assertEqual(decode_all(csr["firstBatch"][0]), [{"_id": i} for i in range(0, 4)]) - listener.results.clear() + listener.reset() # Next raw batch of 4 documents. next(cursor) try: - results = listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) self.assertEqual("getMore", started.command_name) self.assertEqual("pymongo_test", started.database_name) self.assertEqual("getMore", succeeded.command_name) @@ -1442,13 +1439,13 @@ def test_aggregate_raw_transaction(self): [{"$sort": {"_id": 1}}], session=session ) ) - cmd = listener.results["started"][0] + cmd = listener.started_events[0] self.assertEqual(cmd.command_name, "aggregate") self.assertIn("$clusterTime", cmd.command) self.assertEqual(cmd.command["startTransaction"], True) self.assertEqual(cmd.command["txnNumber"], 1) # Ensure we update $clusterTime from the command response. - last_cmd = listener.results["succeeded"][-1] + last_cmd = listener.succeeded_events[-1] self.assertEqual( last_cmd.reply["$clusterTime"]["clusterTime"], session.cluster_time["clusterTime"], @@ -1473,8 +1470,8 @@ def test_aggregate_raw_retryable_reads(self): self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - self.assertEqual(len(listener.results["started"]), 3) - cmds = listener.results["started"] + self.assertEqual(len(listener.started_events), 3) + cmds = listener.started_events self.assertEqual(cmds[0].command_name, "aggregate") self.assertEqual(cmds[1].command_name, "aggregate") @@ -1495,7 +1492,7 @@ def test_aggregate_raw_snapshot_reads(self): self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - find_cmd = listener.results["started"][1].command + find_cmd = listener.started_events[1].command self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) @@ -1536,13 +1533,13 @@ def test_monitoring(self): c.drop() c.insert_many([{"_id": i} for i in range(10)]) - listener.results.clear() + listener.reset() cursor = c.aggregate_raw_batches([{"$sort": {"_id": 1}}], batchSize=4) # Start cursor, no initial batch. - started = listener.results["started"][0] - succeeded = listener.results["succeeded"][0] - self.assertEqual(0, len(listener.results["failed"])) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) self.assertEqual("aggregate", started.command_name) self.assertEqual("pymongo_test", started.database_name) self.assertEqual("aggregate", succeeded.command_name) @@ -1551,15 +1548,14 @@ def test_monitoring(self): # First batch is empty. self.assertEqual(len(csr["firstBatch"]), 0) - listener.results.clear() + listener.reset() # Batches of 4 documents. n = 0 for batch in cursor: - results = listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) self.assertEqual("getMore", started.command_name) self.assertEqual("pymongo_test", started.database_name) self.assertEqual("getMore", succeeded.command_name) @@ -1570,7 +1566,7 @@ def test_monitoring(self): self.assertEqual(decode_all(batch), [{"_id": i} for i in range(n, min(n + 4, 10))]) n += 4 - listener.results.clear() + listener.reset() if __name__ == "__main__": diff --git a/test/test_data_lake.py b/test/test_data_lake.py index fbf79994d3..4fa38435a3 100644 --- a/test/test_data_lake.py +++ b/test/test_data_lake.py @@ -62,16 +62,16 @@ def test_1(self): next(cursor) # find command assertions - find_cmd = listener.results["succeeded"][-1] + find_cmd = listener.succeeded_events[-1] self.assertEqual(find_cmd.command_name, "find") cursor_id = find_cmd.reply["cursor"]["id"] cursor_ns = find_cmd.reply["cursor"]["ns"] # killCursors command assertions cursor.close() - started = listener.results["started"][-1] + started = listener.started_events[-1] self.assertEqual(started.command_name, "killCursors") - succeeded = listener.results["succeeded"][-1] + succeeded = listener.succeeded_events[-1] self.assertEqual(succeeded.command_name, "killCursors") self.assertIn(cursor_id, started.command["cursors"]) diff --git a/test/test_database.py b/test/test_database.py index 49387b8bb9..b1b2999df4 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -193,7 +193,6 @@ def test_list_collection_names(self): def test_list_collection_names_filter(self): listener = OvertCommandListener() - results = listener.results client = rs_or_single_client(event_listeners=[listener]) db = client[self.db.name] db.capped.drop() @@ -204,24 +203,23 @@ def test_list_collection_names_filter(self): filter: Union[None, dict] # Should not send nameOnly. for filter in ({"options.capped": True}, {"options.capped": True, "name": "capped"}): - results.clear() + listener.reset() names = db.list_collection_names(filter=filter) self.assertEqual(names, ["capped"]) - self.assertNotIn("nameOnly", results["started"][0].command) + self.assertNotIn("nameOnly", listener.started_events[0].command) # Should send nameOnly (except on 2.6). for filter in (None, {}, {"name": {"$in": ["capped", "non_capped"]}}): - results.clear() + listener.reset() names = db.list_collection_names(filter=filter) self.assertIn("capped", names) self.assertIn("non_capped", names) - command = results["started"][0].command + command = listener.started_events[0].command self.assertIn("nameOnly", command) self.assertTrue(command["nameOnly"]) def test_check_exists(self): listener = OvertCommandListener() - results = listener.results client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) db = client[self.db.name] @@ -231,7 +229,7 @@ def test_check_exists(self): listener.reset() db.drop_collection("unique") db.create_collection("unique", check_exists=False) - self.assertTrue(len(results["started"]) > 0) + self.assertTrue(len(listener.started_events) > 0) self.assertNotIn("listCollections", listener.started_command_names()) def test_list_collections(self): diff --git a/test/test_encryption.py b/test/test_encryption.py index 6c54a90f7a..eaee22ebac 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -814,7 +814,7 @@ def run_test(self, provider_name): provider_name, master_key=master_key, key_alt_names=["%s_altname" % (provider_name,)] ) self.assertBinaryUUID(datakey_id) - cmd = self.listener.results["started"][-1] + cmd = self.listener.started_events[-1] self.assertEqual("insert", cmd.command_name) self.assertEqual({"w": "majority"}, cmd.command.get("writeConcern")) docs = list(self.vault.find({"_id": datakey_id})) @@ -1489,7 +1489,7 @@ def _test_automatic(self, expectation_extjson, payload): expected_document = json_util.loads(expectation_extjson, json_options=JSON_OPTS) coll.insert_one(payload) - event = insert_listener.results["started"][0] + event = insert_listener.started_events[0] inserted_doc = event.command["documents"][0] for key, value in expected_document.items(): @@ -1622,7 +1622,7 @@ def test_case_1(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 4) self.assertEqual(cev[0].command_name, "listCollections") self.assertEqual(cev[0].database_name, "db") @@ -1643,7 +1643,7 @@ def test_case_2(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 3) self.assertEqual(cev[0].command_name, "listCollections") self.assertEqual(cev[0].database_name, "db") @@ -1652,7 +1652,7 @@ def test_case_2(self): self.assertEqual(cev[2].command_name, "find") self.assertEqual(cev[2].database_name, "db") - cev = self.client_keyvault_listener.results["started"] + cev = self.client_keyvault_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "keyvault") @@ -1667,7 +1667,7 @@ def test_case_3(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 2) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "db") @@ -1684,12 +1684,12 @@ def test_case_4(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "db") - cev = self.client_keyvault_listener.results["started"] + cev = self.client_keyvault_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "keyvault") @@ -1704,7 +1704,7 @@ def test_case_5(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 5) self.assertEqual(cev[0].command_name, "listCollections") self.assertEqual(cev[0].database_name, "db") @@ -1727,7 +1727,7 @@ def test_case_6(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 3) self.assertEqual(cev[0].command_name, "listCollections") self.assertEqual(cev[0].database_name, "db") @@ -1736,7 +1736,7 @@ def test_case_6(self): self.assertEqual(cev[2].command_name, "find") self.assertEqual(cev[2].database_name, "db") - cev = self.client_keyvault_listener.results["started"] + cev = self.client_keyvault_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "keyvault") @@ -1751,7 +1751,7 @@ def test_case_7(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 2) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "db") @@ -1768,12 +1768,12 @@ def test_case_8(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "db") - cev = self.client_keyvault_listener.results["started"] + cev = self.client_keyvault_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "keyvault") @@ -1821,8 +1821,8 @@ def test_01_command_error(self): ): with self.assertRaises(OperationFailure): self.encrypted_client.db.decryption_events.aggregate([]) - self.assertEqual(len(self.listener.results["failed"]), 1) - for event in self.listener.results["failed"]: + self.assertEqual(len(self.listener.failed_events), 1) + for event in self.listener.failed_events: self.assertEqual(event.failure["code"], 123) def test_02_network_error(self): @@ -1834,8 +1834,8 @@ def test_02_network_error(self): ): with self.assertRaises(AutoReconnect): self.encrypted_client.db.decryption_events.aggregate([]) - self.assertEqual(len(self.listener.results["failed"]), 1) - self.assertEqual(self.listener.results["failed"][0].command_name, "aggregate") + self.assertEqual(len(self.listener.failed_events), 1) + self.assertEqual(self.listener.failed_events[0].command_name, "aggregate") def test_03_decrypt_error(self): self.encrypted_client.db.decryption_events.insert_one( @@ -1843,8 +1843,8 @@ def test_03_decrypt_error(self): ) with self.assertRaises(EncryptionError): next(self.encrypted_client.db.decryption_events.aggregate([])) - event = self.listener.results["succeeded"][0] - self.assertEqual(len(self.listener.results["failed"]), 0) + event = self.listener.succeeded_events[0] + self.assertEqual(len(self.listener.failed_events), 0) self.assertEqual( event.reply["cursor"]["firstBatch"][0]["encrypted"], self.malformed_cipher_text ) @@ -1852,8 +1852,8 @@ def test_03_decrypt_error(self): def test_04_decrypt_success(self): self.encrypted_client.db.decryption_events.insert_one({"encrypted": self.cipher_text}) next(self.encrypted_client.db.decryption_events.aggregate([])) - event = self.listener.results["succeeded"][0] - self.assertEqual(len(self.listener.results["failed"]), 0) + event = self.listener.succeeded_events[0] + self.assertEqual(len(self.listener.failed_events), 0) self.assertEqual(event.reply["cursor"]["firstBatch"][0]["encrypted"], self.cipher_text) diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 0b8200c019..ffa535eeed 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -49,15 +49,14 @@ def tearDownClass(cls): super(TestCommandMonitoring, cls).tearDownClass() def tearDown(self): - self.listener.results.clear() + self.listener.reset() super(TestCommandMonitoring, self).tearDown() def test_started_simple(self): self.client.pymongo_test.command("ping") - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand(SON([("ping", 1)]), started.command) @@ -68,10 +67,9 @@ def test_started_simple(self): def test_succeeded_simple(self): self.client.pymongo_test.command("ping") - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertEqual("ping", succeeded.command_name) @@ -85,10 +83,9 @@ def test_failed_simple(self): self.client.pymongo_test.command("oops!") except OperationFailure: pass - results = self.listener.results - started = results["started"][0] - failed = results["failed"][0] - self.assertEqual(0, len(results["succeeded"])) + started = self.listener.started_events[0] + failed = self.listener.failed_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) self.assertEqual("oops!", failed.command_name) @@ -99,10 +96,9 @@ def test_failed_simple(self): def test_find_one(self): self.client.pymongo_test.test.find_one() - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( @@ -117,15 +113,14 @@ def test_find_one(self): def test_find_and_get_more(self): self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_many([{} for _ in range(10)]) - self.listener.results.clear() + self.listener.reset() cursor = self.client.pymongo_test.test.find(projection={"_id": False}, batch_size=4) for _ in range(4): next(cursor) cursor_id = cursor.cursor_id - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( SON( @@ -147,15 +142,14 @@ def test_find_and_get_more(self): self.assertEqual(csr["ns"], "pymongo_test.test") self.assertEqual(csr["firstBatch"], [{} for _ in range(4)]) - self.listener.results.clear() + self.listener.reset() # Next batch. Exhausting the cursor could cause a getMore # that returns id of 0 and no results. next(cursor) try: - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), @@ -182,16 +176,15 @@ def test_find_with_explain(self): cmd = SON([("explain", SON([("find", "test"), ("filter", {})]))]) self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_one({}) - self.listener.results.clear() + self.listener.reset() coll = self.client.pymongo_test.test # Test that we publish the unwrapped command. if self.client.is_mongos: coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) res = coll.find().explain() - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand(cmd, started.command) self.assertEqual("explain", started.command_name) @@ -212,7 +205,7 @@ def _test_find_options(self, query, expected_cmd): coll.insert_many([{"x": i} for i in range(5)]) # Test that we publish the unwrapped command. - self.listener.results.clear() + self.listener.reset() if self.client.is_mongos: coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) @@ -220,10 +213,9 @@ def _test_find_options(self, query, expected_cmd): next(cursor) try: - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand(expected_cmd, started.command) self.assertEqual("find", started.command_name) @@ -293,7 +285,7 @@ def test_find_snapshot(self): def test_command_and_get_more(self): self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_many([{"x": 1} for _ in range(10)]) - self.listener.results.clear() + self.listener.reset() coll = self.client.pymongo_test.test # Test that we publish the unwrapped command. if self.client.is_mongos: @@ -302,10 +294,9 @@ def test_command_and_get_more(self): for _ in range(4): next(cursor) cursor_id = cursor.cursor_id - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( SON( @@ -333,13 +324,12 @@ def test_command_and_get_more(self): } self.assertEqualCommand(expected_cursor, succeeded.reply.get("cursor")) - self.listener.results.clear() + self.listener.reset() next(cursor) try: - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), @@ -377,10 +367,9 @@ def test_get_more_failure(self): next(cursor) except Exception: pass - results = self.listener.results - started = results["started"][0] - self.assertEqual(0, len(results["succeeded"])) - failed = results["failed"][0] + started = self.listener.started_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + failed = self.listener.failed_events[0] self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test")]), started.command @@ -403,16 +392,15 @@ def test_not_primary_error(self): client = single_client(*address, event_listeners=[self.listener]) # Clear authentication command results from the listener. client.admin.command("ping") - self.listener.results.clear() + self.listener.reset() error = None try: client.pymongo_test.test.find_one_and_delete({}) except NotPrimaryError as exc: error = exc.errors - results = self.listener.results - started = results["started"][0] - failed = results["failed"][0] - self.assertEqual(0, len(results["succeeded"])) + started = self.listener.started_events[0] + failed = self.listener.failed_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) self.assertEqual("findAndModify", failed.command_name) @@ -426,16 +414,15 @@ def test_not_primary_error(self): def test_exhaust(self): self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_many([{} for _ in range(11)]) - self.listener.results.clear() + self.listener.reset() cursor = self.client.pymongo_test.test.find( projection={"_id": False}, batch_size=5, cursor_type=CursorType.EXHAUST ) next(cursor) cursor_id = cursor.cursor_id - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( SON( @@ -462,11 +449,10 @@ def test_exhaust(self): } self.assertEqualReply(expected_result, succeeded.reply) - self.listener.results.clear() + self.listener.reset() tuple(cursor) - results = self.listener.results - self.assertEqual(0, len(results["failed"])) - for event in results["started"]: + self.assertEqual(0, len(self.listener.failed_events)) + for event in self.listener.started_events: self.assertTrue(isinstance(event, monitoring.CommandStartedEvent)) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 5)]), @@ -476,14 +462,14 @@ def test_exhaust(self): self.assertEqual(cursor.address, event.connection_id) self.assertEqual("pymongo_test", event.database_name) self.assertTrue(isinstance(event.request_id, int)) - for event in results["succeeded"]: + for event in self.listener.succeeded_events: self.assertTrue(isinstance(event, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(event.duration_micros, int)) self.assertEqual("getMore", event.command_name) self.assertTrue(isinstance(event.request_id, int)) self.assertEqual(cursor.address, event.connection_id) # Last getMore receives a response with cursor id 0. - self.assertEqual(0, results["succeeded"][-1].reply["cursor"]["id"]) + self.assertEqual(0, self.listener.succeeded_events[-1].reply["cursor"]["id"]) def test_kill_cursors(self): with client_knobs(kill_cursor_frequency=0.01): @@ -492,13 +478,12 @@ def test_kill_cursors(self): cursor = self.client.pymongo_test.test.find().batch_size(5) next(cursor) cursor_id = cursor.cursor_id - self.listener.results.clear() + self.listener.reset() cursor.close() time.sleep(2) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) # There could be more than one cursor_id here depending on # when the thread last ran. @@ -524,14 +509,13 @@ def test_kill_cursors(self): def test_non_bulk_writes(self): coll = self.client.pymongo_test.test coll.drop() - self.listener.results.clear() + self.listener.reset() # Implied write concern insert_one res = coll.insert_one({"x": 1}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -555,13 +539,12 @@ def test_non_bulk_writes(self): self.assertEqual(1, reply.get("n")) # Unacknowledged insert_one - self.listener.results.clear() + self.listener.reset() coll = coll.with_options(write_concern=WriteConcern(w=0)) res = coll.insert_one({"x": 1}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -584,13 +567,12 @@ def test_non_bulk_writes(self): self.assertEqualReply(succeeded.reply, {"ok": 1}) # Explicit write concern insert_one - self.listener.results.clear() + self.listener.reset() coll = coll.with_options(write_concern=WriteConcern(w=1)) res = coll.insert_one({"x": 1}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -615,12 +597,11 @@ def test_non_bulk_writes(self): self.assertEqual(1, reply.get("n")) # delete_many - self.listener.results.clear() + self.listener.reset() res = coll.delete_many({"x": 1}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -645,13 +626,12 @@ def test_non_bulk_writes(self): self.assertEqual(res.deleted_count, reply.get("n")) # replace_one - self.listener.results.clear() + self.listener.reset() oid = ObjectId() res = coll.replace_one({"_id": oid}, {"_id": oid, "x": 1}, upsert=True) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -689,12 +669,11 @@ def test_non_bulk_writes(self): self.assertEqual([{"index": 0, "_id": oid}], reply.get("upserted")) # update_one - self.listener.results.clear() + self.listener.reset() res = coll.update_one({"x": 1}, {"$inc": {"x": 1}}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -731,12 +710,11 @@ def test_non_bulk_writes(self): self.assertEqual(1, reply.get("n")) # update_many - self.listener.results.clear() + self.listener.reset() res = coll.update_many({"x": 2}, {"$inc": {"x": 1}}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -773,12 +751,11 @@ def test_non_bulk_writes(self): self.assertEqual(1, reply.get("n")) # delete_one - self.listener.results.clear() + self.listener.reset() _ = coll.delete_one({"x": 3}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -807,14 +784,13 @@ def test_non_bulk_writes(self): # write errors coll.insert_one({"_id": 1}) try: - self.listener.results.clear() + self.listener.reset() coll.insert_one({"_id": 1}) except OperationFailure: pass - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -848,15 +824,14 @@ def test_insert_many(self): # This always uses the bulk API. coll = self.client.pymongo_test.test coll.drop() - self.listener.results.clear() + self.listener.reset() big = "x" * (1024 * 1024 * 4) docs = [{"_id": i, "big": big} for i in range(6)] coll.insert_many(docs) - results = self.listener.results - started = results["started"] - succeeded = results["succeeded"] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) documents = [] count = 0 operation_id = started[0].operation_id @@ -889,16 +864,15 @@ def test_insert_many_unacknowledged(self): coll = self.client.pymongo_test.test coll.drop() unack_coll = coll.with_options(write_concern=WriteConcern(w=0)) - self.listener.results.clear() + self.listener.reset() # Force two batches on legacy servers. big = "x" * (1024 * 1024 * 12) docs = [{"_id": i, "big": big} for i in range(6)] unack_coll.insert_many(docs) - results = self.listener.results - started = results["started"] - succeeded = results["succeeded"] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) documents = [] operation_id = started[0].operation_id self.assertIsInstance(operation_id, int) @@ -928,7 +902,7 @@ def test_insert_many_unacknowledged(self): def test_bulk_write(self): coll = self.client.pymongo_test.test coll.drop() - self.listener.results.clear() + self.listener.reset() coll.bulk_write( [ @@ -937,10 +911,9 @@ def test_bulk_write(self): DeleteOne({"_id": 1}), ] ) - results = self.listener.results - started = results["started"] - succeeded = results["succeeded"] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) operation_id = started[0].operation_id pairs = list(zip(started, succeeded)) self.assertEqual(3, len(pairs)) @@ -991,7 +964,7 @@ def test_bulk_write(self): @client_context.require_failCommand_fail_point def test_bulk_write_command_network_error(self): coll = self.client.pymongo_test.test - self.listener.results.clear() + self.listener.reset() insert_network_error = { "configureFailPoint": "failCommand", @@ -1004,7 +977,7 @@ def test_bulk_write_command_network_error(self): with self.fail_point(insert_network_error): with self.assertRaises(AutoReconnect): coll.bulk_write([InsertOne({"_id": 1})]) - failed = self.listener.results["failed"] + failed = self.listener.failed_events self.assertEqual(1, len(failed)) event = failed[0] self.assertEqual(event.command_name, "insert") @@ -1015,7 +988,7 @@ def test_bulk_write_command_network_error(self): @client_context.require_failCommand_fail_point def test_bulk_write_command_error(self): coll = self.client.pymongo_test.test - self.listener.results.clear() + self.listener.reset() insert_command_error = { "configureFailPoint": "failCommand", @@ -1029,7 +1002,7 @@ def test_bulk_write_command_error(self): with self.fail_point(insert_command_error): with self.assertRaises(NotPrimaryError): coll.bulk_write([InsertOne({"_id": 1})]) - failed = self.listener.results["failed"] + failed = self.listener.failed_events self.assertEqual(1, len(failed)) event = failed[0] self.assertEqual(event.command_name, "insert") @@ -1040,7 +1013,7 @@ def test_bulk_write_command_error(self): def test_write_errors(self): coll = self.client.pymongo_test.test coll.drop() - self.listener.results.clear() + self.listener.reset() try: coll.bulk_write( @@ -1054,10 +1027,9 @@ def test_write_errors(self): ) except OperationFailure: pass - results = self.listener.results - started = results["started"] - succeeded = results["succeeded"] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) operation_id = started[0].operation_id pairs = list(zip(started, succeeded)) errors = [] @@ -1084,12 +1056,11 @@ def test_write_errors(self): def test_first_batch_helper(self): # Regardless of server version and use of helpers._first_batch # this test should still pass. - self.listener.results.clear() + self.listener.reset() tuple(self.client.pymongo_test.test.list_indexes()) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON([("listIndexes", "test"), ("cursor", {})]) self.assertEqualCommand(expected, started.command) @@ -1105,22 +1076,21 @@ def test_first_batch_helper(self): self.assertTrue("cursor" in succeeded.reply) self.assertTrue("ok" in succeeded.reply) - self.listener.results.clear() + self.listener.reset() def test_sensitive_commands(self): listeners = self.client._event_listeners - self.listener.results.clear() + self.listener.reset() cmd = SON([("getnonce", 1)]) listeners.publish_command_start(cmd, "pymongo_test", 12345, self.client.address) delta = datetime.timedelta(milliseconds=100) listeners.publish_command_success( delta, {"nonce": "e474f4561c5eb40b", "ok": 1.0}, "getnonce", 12345, self.client.address ) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqual({}, started.command) self.assertEqual("pymongo_test", started.database_name) @@ -1159,14 +1129,13 @@ def tearDownClass(cls): def setUp(self): super(TestGlobalListener, self).setUp() - self.listener.results.clear() + self.listener.reset() def test_simple(self): self.client.pymongo_test.command("ping") - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand(SON([("ping", 1)]), started.command) diff --git a/test/test_read_concern.py b/test/test_read_concern.py index d5df682fba..3a1c8f3a54 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -14,6 +14,11 @@ """Test the read_concern module.""" +import sys +import unittest + +sys.path[0:0] = [""] + from test import IntegrationTest, client_context from test.utils import OvertCommandListener, rs_or_single_client, single_client @@ -41,7 +46,7 @@ def tearDownClass(cls): super(TestReadConcern, cls).tearDownClass() def tearDown(self): - self.listener.results.clear() + self.listener.reset() super(TestReadConcern, self).tearDown() def test_read_concern(self): @@ -74,9 +79,9 @@ def test_find_command(self): # readConcern not sent in command if not specified. coll = self.db.coll tuple(coll.find({"field": "value"})) - self.assertNotIn("readConcern", self.listener.results["started"][0].command) + self.assertNotIn("readConcern", self.listener.started_events[0].command) - self.listener.results.clear() + self.listener.reset() # Explicitly set readConcern to 'local'. coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) @@ -89,23 +94,21 @@ def test_find_command(self): ("readConcern", {"level": "local"}), ] ), - self.listener.results["started"][0].command, + self.listener.started_events[0].command, ) def test_command_cursor(self): # readConcern not sent in command if not specified. coll = self.db.coll tuple(coll.aggregate([{"$match": {"field": "value"}}])) - self.assertNotIn("readConcern", self.listener.results["started"][0].command) + self.assertNotIn("readConcern", self.listener.started_events[0].command) - self.listener.results.clear() + self.listener.reset() # Explicitly set readConcern to 'local'. coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) tuple(coll.aggregate([{"$match": {"field": "value"}}])) - self.assertEqual( - {"level": "local"}, self.listener.results["started"][0].command["readConcern"] - ) + self.assertEqual({"level": "local"}, self.listener.started_events[0].command["readConcern"]) def test_aggregate_out(self): coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) @@ -113,6 +116,10 @@ def test_aggregate_out(self): # Aggregate with $out supports readConcern MongoDB 4.2 onwards. if client_context.version >= (4, 1): - self.assertIn("readConcern", self.listener.results["started"][0].command) + self.assertIn("readConcern", self.listener.started_events[0].command) else: - self.assertNotIn("readConcern", self.listener.results["started"][0].command) + self.assertNotIn("readConcern", self.listener.started_events[0].command) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index ae2fa8bcee..1362623dff 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -541,7 +541,7 @@ def test_send_hedge(self): coll = client.test.get_collection("test", read_preference=pref) listener.reset() coll.find_one() - started = listener.results["started"] + started = listener.started_events self.assertEqual(len(started), 1, started) cmd = started[0].command if client_context.is_rs or client_context.is_mongos: diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 4dfc8f068c..5cc4845e32 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -85,11 +85,11 @@ def insert_command_default_write_concern(): ] for name, f in ops: - listener.results.clear() + listener.reset() f() - self.assertGreaterEqual(len(listener.results["started"]), 1) - for i, event in enumerate(listener.results["started"]): + self.assertGreaterEqual(len(listener.started_events), 1) + for i, event in enumerate(listener.started_events): self.assertNotIn( "readConcern", event.command, @@ -221,7 +221,7 @@ def test_write_error_details_exposes_errinfo(self): self.assertIsNotNone(ctx.exception.details) assert ctx.exception.details is not None self.assertIsNotNone(ctx.exception.details.get("errInfo")) - for event in listener.results["succeeded"]: + for event in listener.succeeded_events: if event.command_name == "insert": self.assertEqual(event.reply["writeErrors"][0], ctx.exception.details) break diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 2b8bc17c58..517e1122b0 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -208,12 +208,12 @@ def test_pool_paused_error_is_retryable(self): # Connection check out failures are not reflected in command # monitoring because we only publish command events _after_ checking # out a connection. - started = cmd_listener.results["started"] + started = cmd_listener.started_events msg = pprint.pformat(cmd_listener.results) self.assertEqual(3, len(started), msg) - succeeded = cmd_listener.results["succeeded"] + succeeded = cmd_listener.succeeded_events self.assertEqual(2, len(succeeded), msg) - failed = cmd_listener.results["failed"] + failed = cmd_listener.failed_events self.assertEqual(1, len(failed), msg) diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 8d556b90ae..7ca1c9c1ef 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -227,9 +227,9 @@ def test_supported_single_statement_no_retry(self): self.addCleanup(client.close) for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - listener.results.clear() + listener.reset() method(*args, **kwargs) - for event in listener.results["started"]: + for event in listener.started_events: self.assertNotIn( "txnNumber", event.command, @@ -240,10 +240,10 @@ def test_supported_single_statement_no_retry(self): def test_supported_single_statement_supported_cluster(self): for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - self.listener.results.clear() + self.listener.reset() method(*args, **kwargs) - commands_started = self.listener.results["started"] - self.assertEqual(len(self.listener.results["succeeded"]), 1, msg) + commands_started = self.listener.started_events + self.assertEqual(len(self.listener.succeeded_events), 1, msg) first_attempt = commands_started[0] self.assertIn( "lsid", @@ -283,10 +283,10 @@ def test_supported_single_statement_unsupported_cluster(self): for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - self.listener.results.clear() + self.listener.reset() method(*args, **kwargs) - for event in self.listener.results["started"]: + for event in self.listener.started_events: self.assertNotIn( "txnNumber", event.command, @@ -301,11 +301,11 @@ def test_unsupported_single_statement(self): coll ) + retryable_single_statement_ops(coll_w0): msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - self.listener.results.clear() + self.listener.reset() method(*args, **kwargs) - started_events = self.listener.results["started"] - self.assertEqual(len(self.listener.results["succeeded"]), len(started_events), msg) - self.assertEqual(len(self.listener.results["failed"]), 0, msg) + started_events = self.listener.started_events + self.assertEqual(len(self.listener.succeeded_events), len(started_events), msg) + self.assertEqual(len(self.listener.failed_events), 0, msg) for event in started_events: self.assertNotIn( "txnNumber", @@ -324,10 +324,10 @@ def test_server_selection_timeout_not_retried(self): ) for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - listener.results.clear() + listener.reset() with self.assertRaises(ServerSelectionTimeoutError, msg=msg): method(*args, **kwargs) - self.assertEqual(len(listener.results["started"]), 0, msg) + self.assertEqual(len(listener.started_events), 0, msg) @client_context.require_replica_set @client_context.require_test_commands @@ -353,11 +353,11 @@ def raise_error(*args, **kwargs): for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - listener.results.clear() + listener.reset() topology.select_server = mock_select_server with self.assertRaises(ConnectionFailure, msg=msg): method(*args, **kwargs) - self.assertEqual(len(listener.results["started"]), 1, msg) + self.assertEqual(len(listener.started_events), 1, msg) @client_context.require_replica_set @client_context.require_test_commands @@ -366,7 +366,7 @@ def test_batch_splitting(self): large = "s" * 1024 * 1024 * 15 coll = self.db.retryable_write_test coll.delete_many({}) - self.listener.results.clear() + self.listener.reset() bulk_result = coll.bulk_write( [ InsertOne({"_id": 1, "l": large}), @@ -381,7 +381,7 @@ def test_batch_splitting(self): # Each command should fail and be retried. # With OP_MSG 3 inserts are one batch. 2 updates another. # 2 deletes a third. - self.assertEqual(len(self.listener.results["started"]), 6) + self.assertEqual(len(self.listener.started_events), 6) self.assertEqual(coll.find_one(), {"_id": 1, "count": 1}) # Assert the final result expected_result = { @@ -412,7 +412,7 @@ def test_batch_splitting_retry_fails(self): ] ) ) - self.listener.results.clear() + self.listener.reset() with self.client.start_session() as session: initial_txn = session._server_session._transaction_id try: @@ -430,9 +430,9 @@ def test_batch_splitting_retry_fails(self): else: self.fail("bulk_write should have failed") - started = self.listener.results["started"] + started = self.listener.started_events self.assertEqual(len(started), 3) - self.assertEqual(len(self.listener.results["succeeded"]), 1) + self.assertEqual(len(self.listener.succeeded_events), 1) expected_txn = Int64(initial_txn + 1) self.assertEqual(started[0].command["txnNumber"], expected_txn) self.assertEqual(started[0].command["lsid"], session.session_id) @@ -483,9 +483,7 @@ def test_RetryableWriteError_error_label(self): if client_context.version >= Version(4, 4): # In MongoDB 4.4+ we rely on the server returning the error label. - self.assertIn( - "RetryableWriteError", listener.results["succeeded"][-1].reply["errorLabels"] - ) + self.assertIn("RetryableWriteError", listener.succeeded_events[-1].reply["errorLabels"]) @client_context.require_version_min(4, 4) def test_RetryableWriteError_error_label_RawBSONDocument(self): @@ -575,12 +573,12 @@ def test_pool_paused_error_is_retryable(self): # Connection check out failures are not reflected in command # monitoring because we only publish command events _after_ checking # out a connection. - started = cmd_listener.results["started"] + started = cmd_listener.started_events msg = pprint.pformat(cmd_listener.results) self.assertEqual(3, len(started), msg) - succeeded = cmd_listener.results["succeeded"] + succeeded = cmd_listener.succeeded_events self.assertEqual(2, len(succeeded), msg) - failed = cmd_listener.results["failed"] + failed = cmd_listener.failed_events self.assertEqual(1, len(failed), msg) @@ -605,7 +603,7 @@ def raise_connection_err_select_server(*args, **kwargs): raise ConnectionFailure("Connection refused") for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): - listener.results.clear() + listener.reset() topology.select_server = raise_connection_err_select_server with client.start_session() as session: kwargs = copy.deepcopy(kwargs) @@ -616,8 +614,8 @@ def raise_connection_err_select_server(*args, **kwargs): # Each operation should fail on the first attempt and succeed # on the second. method(*args, **kwargs) - self.assertEqual(len(listener.results["started"]), 1, msg) - retry_cmd = listener.results["started"][0].command + self.assertEqual(len(listener.started_events), 1, msg) + retry_cmd = listener.started_events[0].command sent_txn_id = retry_cmd["txnNumber"] final_txn_id = session._server_session.transaction_id self.assertEqual(Int64(initial_txn_id + 1), sent_txn_id, msg) diff --git a/test/test_server_selection.py b/test/test_server_selection.py index c3f3762f9a..8d4ffe5e9b 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -97,7 +97,7 @@ def all_hosts_started(): coll.find_one({"name": "John Doe"}) # Confirm all find commands are run against appropriate host. - for command in listener.results["started"]: + for command in listener.started_events: if command.command_name == "find": self.assertEqual(command.connection_id[1], expected_port) diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index cae2d7661b..d076ae77b3 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -115,7 +115,7 @@ def frequencies(self, client, listener, n_finds=10): for thread in threads: self.assertTrue(thread.passed) - events = listener.results["started"] + events = listener.started_events self.assertEqual(len(events), n_finds * N_THREADS) nodes = client.nodes self.assertEqual(len(nodes), 2) diff --git a/test/test_session.py b/test/test_session.py index 386bab295c..25d209ebaf 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -58,9 +58,9 @@ def failed(self, event): super(SessionTestListener, self).failed(event) def first_command_started(self): - assert len(self.results["started"]) >= 1, "No command-started events" + assert len(self.started_events) >= 1, "No command-started events" - return self.results["started"][0] + return self.started_events[0] def session_ids(client): @@ -103,7 +103,7 @@ def tearDown(self): """All sessions used in the test must be returned to the pool.""" self.client.drop_database("pymongo_test") used_lsids = self.initial_lsids.copy() - for event in self.session_checker_listener.results["started"]: + for event in self.session_checker_listener.started_events: if "lsid" in event.command: used_lsids.add(event.command["lsid"]["id"]) @@ -118,15 +118,15 @@ def _test_ops(self, client, *ops): last_use = s._server_session.last_use start = time.monotonic() self.assertLessEqual(last_use, start) - listener.results.clear() + listener.reset() # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) kw["session"] = s f(*args, **kw) self.assertGreaterEqual(s._server_session.last_use, start) - self.assertGreaterEqual(len(listener.results["started"]), 1) - for event in listener.results["started"]: + self.assertGreaterEqual(len(listener.started_events), 1) + for event in listener.started_events: self.assertTrue( "lsid" in event.command, "%s sent no lsid with %s" % (f.__name__, event.command_name), @@ -157,11 +157,11 @@ def _test_ops(self, client, *ops): # No explicit session. for f, args, kw in ops: - listener.results.clear() + listener.reset() f(*args, **kw) - self.assertGreaterEqual(len(listener.results["started"]), 1) + self.assertGreaterEqual(len(listener.started_events), 1) lsids = [] - for event in listener.results["started"]: + for event in listener.started_events: self.assertTrue( "lsid" in event.command, "%s sent no lsid with %s" % (f.__name__, event.command_name), @@ -205,7 +205,7 @@ def test_implicit_sessions_checkout(self): (client.db.list_collections, []), ] threads = [] - listener.results.clear() + listener.reset() def thread_target(op, *args): res = op(*args) @@ -225,7 +225,7 @@ def thread_target(op, *args): self.assertIsNone(thread.exc) client.close() lsid_set.clear() - for i in listener.results["started"]: + for i in listener.started_events: if i.command.get("lsid"): lsid_set.add(i.command.get("lsid")["id"]) if len(lsid_set) == 1: @@ -280,13 +280,13 @@ def test_end_sessions(self): self.assertEqual(len(client._topology._session_pool), _MAX_END_SESSIONS + 1) client.close() self.assertEqual(len(client._topology._session_pool), 0) - end_sessions = [e for e in listener.results["started"] if e.command_name == "endSessions"] + end_sessions = [e for e in listener.started_events if e.command_name == "endSessions"] self.assertEqual(len(end_sessions), 2) # Closing again should not send any commands. - listener.results.clear() + listener.reset() client.close() - self.assertEqual(len(listener.results["started"]), 0) + self.assertEqual(len(listener.started_events), 0) def test_client(self): client = self.client @@ -399,10 +399,10 @@ def test_cursor(self): for name, f in ops: with client.start_session() as s: - listener.results.clear() + listener.reset() f(session=s) - self.assertGreaterEqual(len(listener.results["started"]), 1) - for event in listener.results["started"]: + self.assertGreaterEqual(len(listener.started_events), 1) + for event in listener.started_events: self.assertTrue( "lsid" in event.command, "%s sent no lsid with %s" % (name, event.command_name), @@ -419,7 +419,7 @@ def test_cursor(self): # No explicit session. for name, f in ops: - listener.results.clear() + listener.reset() f(session=None) event0 = listener.first_command_started() self.assertTrue( @@ -428,7 +428,7 @@ def test_cursor(self): lsid = event0.command["lsid"] - for event in listener.results["started"][1:]: + for event in listener.started_events[1:]: self.assertTrue( "lsid" in event.command, "%s sent no lsid with %s" % (name, event.command_name) ) @@ -600,7 +600,7 @@ def test_aggregate_error(self): # 3.6.0 mongos only validates the aggregate pipeline when the # database exists. coll.insert_one({}) - listener.results.clear() + listener.reset() with self.assertRaises(OperationFailure): coll.aggregate([{"$badOperation": {"bar": 1}}]) @@ -687,7 +687,7 @@ def _test_unacknowledged_ops(self, client, *ops): for f, args, kw in ops: with client.start_session() as s: - listener.results.clear() + listener.reset() # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) @@ -698,7 +698,7 @@ def _test_unacknowledged_ops(self, client, *ops): f(*args, **kw) if f.__name__ == "create_collection": # create_collection runs listCollections first. - event = listener.results["started"].pop(0) + event = listener.started_events.pop(0) self.assertEqual("listCollections", event.command_name) self.assertIn( "lsid", @@ -707,19 +707,19 @@ def _test_unacknowledged_ops(self, client, *ops): ) # Should not run any command before raising an error. - self.assertFalse(listener.results["started"], "%s sent command" % (f.__name__,)) + self.assertFalse(listener.started_events, "%s sent command" % (f.__name__,)) self.assertTrue(s.has_ended) # Unacknowledged write without a session does not send an lsid. for f, args, kw in ops: - listener.results.clear() + listener.reset() f(*args, **kw) - self.assertGreaterEqual(len(listener.results["started"]), 1) + self.assertGreaterEqual(len(listener.started_events), 1) if f.__name__ == "create_collection": # create_collection runs listCollections first. - event = listener.results["started"].pop(0) + event = listener.started_events.pop(0) self.assertEqual("listCollections", event.command_name) self.assertIn( "lsid", @@ -727,7 +727,7 @@ def _test_unacknowledged_ops(self, client, *ops): "%s sent no lsid with %s" % (f.__name__, event.command_name), ) - for event in listener.results["started"]: + for event in listener.started_events: self.assertNotIn( "lsid", event.command, "%s sent lsid with %s" % (f.__name__, event.command_name) ) @@ -799,26 +799,26 @@ def test_core(self): with self.client.start_session() as sess: self.assertIsNone(sess.cluster_time) self.assertIsNone(sess.operation_time) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one(session=sess) - started = self.listener.results["started"][0] + started = self.listener.started_events[0] cmd = started.command self.assertIsNone(cmd.get("readConcern")) op_time = sess.operation_time self.assertIsNotNone(op_time) - succeeded = self.listener.results["succeeded"][0] + succeeded = self.listener.succeeded_events[0] reply = succeeded.reply self.assertEqual(op_time, reply.get("operationTime")) # No explicit session self.client.pymongo_test.test.insert_one({}) self.assertEqual(sess.operation_time, op_time) - self.listener.results.clear() + self.listener.reset() try: self.client.pymongo_test.command("doesntexist", session=sess) except: pass - failed = self.listener.results["failed"][0] + failed = self.listener.failed_events[0] failed_op_time = failed.failure.get("operationTime") # Some older builds of MongoDB 3.5 / 3.6 return None for # operationTime when a command fails. Make sure we don't @@ -848,14 +848,14 @@ def _test_reads(self, op, exception=None): coll.find_one({}, session=sess) operation_time = sess.operation_time self.assertIsNotNone(operation_time) - self.listener.results.clear() + self.listener.reset() if exception: with self.assertRaises(exception): op(coll, sess) else: op(coll, sess) act = ( - self.listener.results["started"][0] + self.listener.started_events[0] .command.get("readConcern", {}) .get("afterClusterTime") ) @@ -887,10 +887,10 @@ def _test_writes(self, op): op(coll, sess) operation_time = sess.operation_time self.assertIsNotNone(operation_time) - self.listener.results.clear() + self.listener.reset() coll.find_one({}, session=sess) act = ( - self.listener.results["started"][0] + self.listener.started_events[0] .command.get("readConcern", {}) .get("afterClusterTime") ) @@ -938,9 +938,9 @@ def _test_no_read_concern(self, op): coll.find_one({}, session=sess) operation_time = sess.operation_time self.assertIsNotNone(operation_time) - self.listener.results.clear() + self.listener.reset() op(coll, sess) - rc = self.listener.results["started"][0].command.get("readConcern") + rc = self.listener.started_events[0].command.get("readConcern") self.assertIsNone(rc) @client_context.require_no_standalone @@ -1001,19 +1001,19 @@ def test_get_more_does_not_include_read_concern(self): coll.insert_many([{}, {}]) cursor = coll.find({}).batch_size(1) next(cursor) - self.listener.results.clear() + self.listener.reset() list(cursor) - started = self.listener.results["started"][0] + started = self.listener.started_events[0] self.assertEqual(started.command_name, "getMore") self.assertIsNone(started.command.get("readConcern")) def test_session_not_causal(self): with self.client.start_session(causal_consistency=False) as s: self.client.pymongo_test.test.insert_one({}, session=s) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one({}, session=s) act = ( - self.listener.results["started"][0] + self.listener.started_events[0] .command.get("readConcern", {}) .get("afterClusterTime") ) @@ -1023,10 +1023,10 @@ def test_session_not_causal(self): def test_server_not_causal(self): with self.client.start_session(causal_consistency=True) as s: self.client.pymongo_test.test.insert_one({}, session=s) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one({}, session=s) act = ( - self.listener.results["started"][0] + self.listener.started_events[0] .command.get("readConcern", {}) .get("afterClusterTime") ) @@ -1038,17 +1038,17 @@ def test_read_concern(self): with self.client.start_session(causal_consistency=True) as s: coll = self.client.pymongo_test.test coll.insert_one({}, session=s) - self.listener.results.clear() + self.listener.reset() coll.find_one({}, session=s) - read_concern = self.listener.results["started"][0].command.get("readConcern") + read_concern = self.listener.started_events[0].command.get("readConcern") self.assertIsNotNone(read_concern) self.assertIsNone(read_concern.get("level")) self.assertIsNotNone(read_concern.get("afterClusterTime")) coll = coll.with_options(read_concern=ReadConcern("majority")) - self.listener.results.clear() + self.listener.reset() coll.find_one({}, session=s) - read_concern = self.listener.results["started"][0].command.get("readConcern") + read_concern = self.listener.started_events[0].command.get("readConcern") self.assertIsNotNone(read_concern) self.assertEqual(read_concern.get("level"), "majority") self.assertIsNotNone(read_concern.get("afterClusterTime")) @@ -1056,17 +1056,17 @@ def test_read_concern(self): @client_context.require_no_standalone def test_cluster_time_with_server_support(self): self.client.pymongo_test.test.insert_one({}) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one({}) - after_cluster_time = self.listener.results["started"][0].command.get("$clusterTime") + after_cluster_time = self.listener.started_events[0].command.get("$clusterTime") self.assertIsNotNone(after_cluster_time) @client_context.require_standalone def test_cluster_time_no_server_support(self): self.client.pymongo_test.test.insert_one({}) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one({}) - after_cluster_time = self.listener.results["started"][0].command.get("$clusterTime") + after_cluster_time = self.listener.started_events[0].command.get("$clusterTime") self.assertIsNone(after_cluster_time) @@ -1129,22 +1129,22 @@ def insert_and_aggregate(): ] for name, f in ops: - listener.results.clear() + listener.reset() # Call f() twice, insert to advance clusterTime, call f() again. f() f() collection.insert_one({}) f() - self.assertGreaterEqual(len(listener.results["started"]), 1) - for i, event in enumerate(listener.results["started"]): + self.assertGreaterEqual(len(listener.started_events), 1) + for i, event in enumerate(listener.started_events): self.assertTrue( "$clusterTime" in event.command, "%s sent no $clusterTime with %s" % (f.__name__, event.command_name), ) if i > 0: - succeeded = listener.results["succeeded"][i - 1] + succeeded = listener.succeeded_events[i - 1] self.assertTrue( "$clusterTime" in succeeded.reply, "%s received no $clusterTime with %s" diff --git a/test/test_transactions.py b/test/test_transactions.py index 02e691329e..dc58beb930 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -343,11 +343,11 @@ def test_transaction_starts_with_batched_write(self): self.assertEqual( ["insert", "insert", "commitTransaction"], listener.started_command_names() ) - first_cmd = listener.results["started"][0].command + first_cmd = listener.started_events[0].command self.assertTrue(first_cmd["startTransaction"]) lsid = first_cmd["lsid"] txn_number = first_cmd["txnNumber"] - for event in listener.results["started"][1:]: + for event in listener.started_events[1:]: self.assertNotIn("startTransaction", event.command) self.assertEqual(lsid, event.command["lsid"]) self.assertEqual(txn_number, event.command["txnNumber"]) @@ -459,7 +459,7 @@ def callback(session): # Create the collection. coll.insert_one({}) - listener.results.clear() + listener.reset() with client.start_session() as s: with PatchSessionTimeout(0): with self.assertRaises(OperationFailure): @@ -491,7 +491,7 @@ def callback(session): } ) self.addCleanup(self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"}) - listener.results.clear() + listener.reset() with client.start_session() as s: with PatchSessionTimeout(0): @@ -521,7 +521,7 @@ def callback(session): } ) self.addCleanup(self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"}) - listener.results.clear() + listener.reset() with client.start_session() as s: with PatchSessionTimeout(0): diff --git a/test/test_versioned_api.py b/test/test_versioned_api.py index a2fd059d21..7dbf2c867d 100644 --- a/test/test_versioned_api.py +++ b/test/test_versioned_api.py @@ -83,7 +83,7 @@ def test_command_options(self): self.addCleanup(coll.delete_many, {}) list(coll.find(batch_size=25)) client.admin.command("ping") - self.assertServerApiInAllCommands(listener.results["started"]) + self.assertServerApiInAllCommands(listener.started_events) @client_context.require_version_min(4, 7) @client_context.require_transactions @@ -100,7 +100,7 @@ def test_command_options_txn(self): coll.insert_many([{} for _ in range(100)], session=s) list(coll.find(batch_size=25, session=s)) client.test.command("find", "test", session=s) - self.assertServerApiInAllCommands(listener.results["started"]) + self.assertServerApiInAllCommands(listener.started_events) if __name__ == "__main__": diff --git a/test/utils.py b/test/utils.py index 6b0876a158..842e9e3a7b 100644 --- a/test/utils.py +++ b/test/utils.py @@ -29,7 +29,7 @@ from collections import abc, defaultdict from functools import partial from test import client_context, db_pwd, db_user -from typing import Any +from typing import Any, List from bson import json_util from bson.objectid import ObjectId @@ -140,26 +140,43 @@ def pool_closed(self, event): self.add_event(event) -class EventListener(monitoring.CommandListener): +class EventListener(BaseListener, monitoring.CommandListener): def __init__(self): + super(EventListener, self).__init__() self.results = defaultdict(list) - def started(self, event): - self.results["started"].append(event) + @property + def started_events(self) -> List[monitoring.CommandStartedEvent]: + return self.results["started"] - def succeeded(self, event): - self.results["succeeded"].append(event) + @property + def succeeded_events(self) -> List[monitoring.CommandSucceededEvent]: + return self.results["succeeded"] - def failed(self, event): - self.results["failed"].append(event) + @property + def failed_events(self) -> List[monitoring.CommandFailedEvent]: + return self.results["failed"] - def started_command_names(self): + def started(self, event: monitoring.CommandStartedEvent) -> None: + self.started_events.append(event) + self.add_event(event) + + def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: + self.succeeded_events.append(event) + self.add_event(event) + + def failed(self, event: monitoring.CommandFailedEvent) -> None: + self.failed_events.append(event) + self.add_event(event) + + def started_command_names(self) -> List[str]: """Return list of command names started.""" - return [event.command_name for event in self.results["started"]] + return [event.command_name for event in self.started_events] - def reset(self): + def reset(self) -> None: """Reset the state of this listener.""" self.results.clear() + super(EventListener, self).reset() class TopologyEventListener(monitoring.TopologyListener): diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index f8ad26efe7..8528ecb8c7 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -371,16 +371,16 @@ def run_operations(self, sessions, collection, ops, in_with_transaction=False): # TODO: factor with test_command_monitoring.py def check_events(self, test, listener, session_ids): - res = listener.results + events = listener.started_events if not len(test["expectations"]): return # Give a nicer message when there are missing or extra events - cmds = decode_raw([event.command for event in res["started"]]) - self.assertEqual(len(res["started"]), len(test["expectations"]), cmds) + cmds = decode_raw([event.command for event in events]) + self.assertEqual(len(events), len(test["expectations"]), cmds) for i, expectation in enumerate(test["expectations"]): event_type = next(iter(expectation)) - event = res["started"][i] + event = events[i] # The tests substitute 42 for any number other than 0. if event.command_name == "getMore" and event.command["getMore"]: From 363e0b2b2c12d0f0c4ef064b390086fea4688dc3 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 14 Nov 2022 15:13:51 -0600 Subject: [PATCH 0301/1588] PYTHON-2818 Add documentation and changelog (#1115) --- doc/changelog.rst | 11 ++++++++--- doc/examples/authentication.rst | 18 ++++++++++++++++++ 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index ebd796116e..89d3f2fdde 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,15 +4,20 @@ Changelog Changes in Version 4.3.3 ------------------------ -Version 4.3.3 documents support for :ref:`CSFLE on-demand credentials` for cloud KMS providers, and fixes the following bugs: +Version 4.3.3 documents support for the following: +- :ref:`CSFLE on-demand credentials` for cloud KMS providers. +- Authentication support for :ref:`EKS Clusters`. +- Added the :ref:`timeout-example` example page to improve the documentation + for :func:`pymongo.timeout`. + +Bug Fixes +......... - Fixed a performance regression in :meth:`~gridfs.GridFSBucket.download_to_stream` and :meth:`~gridfs.GridFSBucket.download_to_stream_by_name` by reading in chunks instead of line by line (`PYTHON-3502`_). - Improved performance of :meth:`gridfs.grid_file.GridOut.read` and :meth:`gridfs.grid_file.GridOut.readline` (`PYTHON-3508`_). -- Added the :ref:`timeout-example` example page to improve the documentation - for :func:`pymongo.timeout`. Issues Resolved ............... diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index 862ac40db2..a984d17fc0 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -311,6 +311,7 @@ A sample URI would be:: .. note:: The access_key_id, secret_access_key, and session_token passed into the URI MUST be `percent escaped`_. + AWS Lambda (Environment Variables) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -331,6 +332,23 @@ for the access key ID, secret access key, and session token, respectively:: PyMongo will use credentials set via the environment variables. These environment variables MUST NOT be `percent escaped`_. + +.. _EKS Clusters: + +EKS Clusters +~~~~~~~~~~~~ + +Applications using the `Authenticating users for your cluster from an OpenID Connect identity provider `_ capability on EKS can now +use the provided credentials, by giving the associated IAM User +`sts:AssumeRoleWithWebIdentity `_ +permission. + +When the username and password are not provided, the MONGODB-AWS mechanism +is set, and ``AWS_WEB_IDENTITY_TOKEN_FILE``, ``AWS_ROLE_ARN``, and +optional ``AWS_ROLE_SESSION_NAME`` are available, the driver will use +an ``AssumeRoleWithWebIdentity`` call to retrieve temporary credentials. +The application must be using ``pymongo_auth_aws`` >= 1.1.0 for EKS support. + ECS Container ~~~~~~~~~~~~~ From 3ab73905dc957c919112ad0def10ef27024659c8 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 15 Nov 2022 17:43:34 -0800 Subject: [PATCH 0302/1588] PYTHON-3500 Resync retryable tests to fix serverless failures (#1116) --- .../unified/handshakeError.json | 106 ++++++++++++------ .../unified/handshakeError.json | 36 +++--- 2 files changed, 91 insertions(+), 51 deletions(-) diff --git a/test/retryable_reads/unified/handshakeError.json b/test/retryable_reads/unified/handshakeError.json index 58bbce66a8..2921d8a954 100644 --- a/test/retryable_reads/unified/handshakeError.json +++ b/test/retryable_reads/unified/handshakeError.json @@ -1,6 +1,6 @@ { "description": "retryable reads handshake failures", - "schemaVersion": "1.3", + "schemaVersion": "1.4", "runOnRequirements": [ { "minServerVersion": "4.2", @@ -62,7 +62,7 @@ ], "tests": [ { - "description": "listDatabases succeeds after retryable handshake network error", + "description": "client.listDatabases succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -155,7 +155,7 @@ ] }, { - "description": "listDatabases succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "client.listDatabases succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -248,7 +248,7 @@ ] }, { - "description": "listDatabaseNames succeeds after retryable handshake network error", + "description": "client.listDatabaseNames succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -338,7 +338,7 @@ ] }, { - "description": "listDatabaseNames succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "client.listDatabaseNames succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -428,7 +428,12 @@ ] }, { - "description": "createChangeStream succeeds after retryable handshake network error", + "description": "client.createChangeStream succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -522,7 +527,12 @@ ] }, { - "description": "createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "client.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -616,7 +626,12 @@ ] }, { - "description": "aggregate succeeds after retryable handshake network error", + "description": "database.aggregate succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -716,7 +731,12 @@ ] }, { - "description": "aggregate succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "database.aggregate succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -816,7 +836,7 @@ ] }, { - "description": "listCollections succeeds after retryable handshake network error", + "description": "database.listCollections succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -909,7 +929,7 @@ ] }, { - "description": "listCollections succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "database.listCollections succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1002,7 +1022,7 @@ ] }, { - "description": "listCollectionNames succeeds after retryable handshake network error", + "description": "database.listCollectionNames succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -1095,7 +1115,7 @@ ] }, { - "description": "listCollectionNames succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "database.listCollectionNames succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1188,7 +1208,12 @@ ] }, { - "description": "createChangeStream succeeds after retryable handshake network error", + "description": "database.createChangeStream succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -1282,7 +1307,12 @@ ] }, { - "description": "createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "database.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -1376,7 +1406,7 @@ ] }, { - "description": "aggregate succeeds after retryable handshake network error", + "description": "collection.aggregate succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -1469,7 +1499,7 @@ ] }, { - "description": "aggregate succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.aggregate succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1562,7 +1592,7 @@ ] }, { - "description": "countDocuments succeeds after retryable handshake network error", + "description": "collection.countDocuments succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -1655,7 +1685,7 @@ ] }, { - "description": "countDocuments succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.countDocuments succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1748,7 +1778,7 @@ ] }, { - "description": "estimatedDocumentCount succeeds after retryable handshake network error", + "description": "collection.estimatedDocumentCount succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -1838,7 +1868,7 @@ ] }, { - "description": "estimatedDocumentCount succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.estimatedDocumentCount succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1928,7 +1958,7 @@ ] }, { - "description": "distinct succeeds after retryable handshake network error", + "description": "collection.distinct succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -2022,7 +2052,7 @@ ] }, { - "description": "distinct succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.distinct succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -2116,7 +2146,7 @@ ] }, { - "description": "find succeeds after retryable handshake network error", + "description": "collection.find succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -2209,7 +2239,7 @@ ] }, { - "description": "find succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.find succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -2302,7 +2332,7 @@ ] }, { - "description": "findOne succeeds after retryable handshake network error", + "description": "collection.findOne succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -2395,7 +2425,7 @@ ] }, { - "description": "findOne succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.findOne succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -2488,7 +2518,7 @@ ] }, { - "description": "listIndexes succeeds after retryable handshake network error", + "description": "collection.listIndexes succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -2578,7 +2608,7 @@ ] }, { - "description": "listIndexes succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.listIndexes succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -2668,7 +2698,7 @@ ] }, { - "description": "listIndexNames succeeds after retryable handshake network error", + "description": "collection.listIndexNames succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -2758,7 +2788,7 @@ ] }, { - "description": "listIndexNames succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.listIndexNames succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -2848,7 +2878,12 @@ ] }, { - "description": "createChangeStream succeeds after retryable handshake network error", + "description": "collection.createChangeStream succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -2942,7 +2977,12 @@ ] }, { - "description": "createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", diff --git a/test/retryable_writes/unified/handshakeError.json b/test/retryable_writes/unified/handshakeError.json index e07e5412b2..df37bd7232 100644 --- a/test/retryable_writes/unified/handshakeError.json +++ b/test/retryable_writes/unified/handshakeError.json @@ -54,7 +54,7 @@ ], "tests": [ { - "description": "insertOne succeeds after retryable handshake network error", + "description": "collection.insertOne succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -150,7 +150,7 @@ ] }, { - "description": "insertOne succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.insertOne succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -246,7 +246,7 @@ ] }, { - "description": "insertMany succeeds after retryable handshake network error", + "description": "collection.insertMany succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -344,7 +344,7 @@ ] }, { - "description": "insertMany succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.insertMany succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -442,7 +442,7 @@ ] }, { - "description": "deleteOne succeeds after retryable handshake network error", + "description": "collection.deleteOne succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -535,7 +535,7 @@ ] }, { - "description": "deleteOne succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.deleteOne succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -628,7 +628,7 @@ ] }, { - "description": "replaceOne succeeds after retryable handshake network error", + "description": "collection.replaceOne succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -724,7 +724,7 @@ ] }, { - "description": "replaceOne succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.replaceOne succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -820,7 +820,7 @@ ] }, { - "description": "updateOne succeeds after retryable handshake network error", + "description": "collection.updateOne succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -918,7 +918,7 @@ ] }, { - "description": "updateOne succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.updateOne succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1016,7 +1016,7 @@ ] }, { - "description": "findOneAndDelete succeeds after retryable handshake network error", + "description": "collection.findOneAndDelete succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -1109,7 +1109,7 @@ ] }, { - "description": "findOneAndDelete succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.findOneAndDelete succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1202,7 +1202,7 @@ ] }, { - "description": "findOneAndReplace succeeds after retryable handshake network error", + "description": "collection.findOneAndReplace succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -1298,7 +1298,7 @@ ] }, { - "description": "findOneAndReplace succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.findOneAndReplace succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1394,7 +1394,7 @@ ] }, { - "description": "findOneAndUpdate succeeds after retryable handshake network error", + "description": "collection.findOneAndUpdate succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -1492,7 +1492,7 @@ ] }, { - "description": "findOneAndUpdate succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.findOneAndUpdate succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1590,7 +1590,7 @@ ] }, { - "description": "bulkWrite succeeds after retryable handshake network error", + "description": "collection.bulkWrite succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -1692,7 +1692,7 @@ ] }, { - "description": "bulkWrite succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.bulkWrite succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", From b290f7b1a17f7f2195503034e627b922936b98bc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 17 Nov 2022 12:26:32 -0800 Subject: [PATCH 0303/1588] PYTHON-3526 Fix mockup tests (#1119) --- test/mockupdb/test_mixed_version_sharded.py | 3 +-- test/mockupdb/test_mongos_command_read_mode.py | 3 +-- test/mockupdb/test_op_msg_read_preference.py | 3 +-- test/mockupdb/test_reset_and_request_check.py | 3 +-- test/mockupdb/test_slave_okay_rs.py | 3 +-- test/mockupdb/test_slave_okay_sharded.py | 3 +-- test/mockupdb/test_slave_okay_single.py | 3 +-- 7 files changed, 7 insertions(+), 14 deletions(-) diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index 7e12fcab35..dc2cd57380 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -19,11 +19,10 @@ from queue import Queue from mockupdb import MockupDB, go +from operations import upgrades # type: ignore[import] from pymongo import MongoClient -from .operations import upgrades - class TestMixedVersionSharded(unittest.TestCase): def setup_server(self, upgrade): diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py index a84907d8cf..997f5af118 100644 --- a/test/mockupdb/test_mongos_command_read_mode.py +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -16,6 +16,7 @@ import unittest from mockupdb import MockupDB, OpMsg, going +from operations import operations # type: ignore[import] from pymongo import MongoClient, ReadPreference from pymongo.read_preferences import ( @@ -24,8 +25,6 @@ read_pref_mode_from_name, ) -from .operations import operations - class TestMongosCommandReadMode(unittest.TestCase): def test_aggregate(self): diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index 37882912bb..b377f4cf69 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -18,6 +18,7 @@ from typing import Any from mockupdb import CommandBase, MockupDB, going +from operations import operations # type: ignore[import] from pymongo import MongoClient, ReadPreference from pymongo.read_preferences import ( @@ -26,8 +27,6 @@ read_pref_mode_from_name, ) -from .operations import operations - class OpMsgReadPrefBase(unittest.TestCase): single_mongod = False diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py index bc00e38a09..841cd41846 100755 --- a/test/mockupdb/test_reset_and_request_check.py +++ b/test/mockupdb/test_reset_and_request_check.py @@ -17,13 +17,12 @@ import unittest from mockupdb import MockupDB, going, wait_until +from operations import operations # type: ignore[import] from pymongo import MongoClient from pymongo.errors import ConnectionFailure from pymongo.server_type import SERVER_TYPE -from .operations import operations - class TestResetAndRequestCheck(unittest.TestCase): def __init__(self, *args, **kwargs): diff --git a/test/mockupdb/test_slave_okay_rs.py b/test/mockupdb/test_slave_okay_rs.py index 7ac489117a..225d8e4071 100644 --- a/test/mockupdb/test_slave_okay_rs.py +++ b/test/mockupdb/test_slave_okay_rs.py @@ -20,11 +20,10 @@ import unittest from mockupdb import MockupDB, going +from operations import operations # type: ignore[import] from pymongo import MongoClient -from .operations import operations - class TestSlaveOkayRS(unittest.TestCase): def setup_server(self): diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py index 51e422595e..18f2016126 100644 --- a/test/mockupdb/test_slave_okay_sharded.py +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -23,12 +23,11 @@ from queue import Queue from mockupdb import MockupDB, going +from operations import operations # type: ignore[import] from pymongo import MongoClient from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name -from .operations import operations - class TestSlaveOkaySharded(unittest.TestCase): def setup_server(self): diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py index bd36c77a04..4b2846490f 100644 --- a/test/mockupdb/test_slave_okay_single.py +++ b/test/mockupdb/test_slave_okay_single.py @@ -23,13 +23,12 @@ import unittest from mockupdb import MockupDB, going +from operations import operations # type: ignore[import] from pymongo import MongoClient from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name from pymongo.topology_description import TOPOLOGY_TYPE -from .operations import operations - def topology_type_name(client): topology_type = client._topology._description.topology_type From cde9adf6aba388e2ed6bc135750d6a01ec63c660 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 17 Nov 2022 12:27:00 -0800 Subject: [PATCH 0304/1588] PYTHON-3527 + PYTHON-3528 Fix no-server tests (#1118) Fix TestCreateEntities when no server is running. Fix no-server test_typeddict_find_notrequired. --- test/test_create_entities.py | 29 +++++++++++++++++++---------- test/test_mypy.py | 6 +++++- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/test/test_create_entities.py b/test/test_create_entities.py index ad0ac9347e..1e46614da0 100644 --- a/test/test_create_entities.py +++ b/test/test_create_entities.py @@ -11,11 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import sys import unittest + +sys.path[0:0] = [""] + +from test import IntegrationTest from test.unified_format import UnifiedSpecTestMixinV1 -class TestCreateEntities(unittest.TestCase): +class TestCreateEntities(IntegrationTest): def test_store_events_as_entities(self): self.scenario_runner = UnifiedSpecTestMixinV1() spec = { @@ -91,7 +96,7 @@ def test_store_all_others_as_entities(self): { "name": "insertOne", "object": "collection0", - "arguments": {"document": {"_id": 1, "x": 44}}, + "arguments": {"document": {"_id": 2, "x": 44}}, }, ], }, @@ -101,15 +106,19 @@ def test_store_all_others_as_entities(self): ], } + self.client.dat.dat.delete_many({}) self.scenario_runner.TEST_SPEC = spec self.scenario_runner.setUp() self.scenario_runner.run_scenario(spec["tests"][0]) self.scenario_runner.entity_map["client0"].close() - final_entity_map = self.scenario_runner.entity_map - for entity in ["errors", "failures"]: - self.assertIn(entity, final_entity_map) - self.assertGreaterEqual(len(final_entity_map[entity]), 0) - self.assertEqual(type(final_entity_map[entity]), list) - for entity in ["successes", "iterations"]: - self.assertIn(entity, final_entity_map) - self.assertEqual(type(final_entity_map[entity]), int) + entity_map = self.scenario_runner.entity_map + self.assertEqual(len(entity_map["errors"]), 4) + for error in entity_map["errors"]: + self.assertEqual(error["type"], "DuplicateKeyError") + self.assertEqual(entity_map["failures"], []) + self.assertEqual(entity_map["successes"], 2) + self.assertEqual(entity_map["iterations"], 5) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_mypy.py b/test/test_mypy.py index 58e69853ca..3b29bbf20e 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -15,6 +15,7 @@ """Test that each file in mypy_fails/ actually fails mypy, and test some sample client code that uses PyMongo typings.""" import os +import sys import tempfile import unittest from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List, Union @@ -51,7 +52,9 @@ class ImplicitMovie(TypedDict): except ImportError: api = None # type: ignore[assignment] -from test import IntegrationTest +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context from test.utils import rs_or_single_client from bson import CodecOptions, decode, decode_all, decode_file_iter, decode_iter, encode @@ -430,6 +433,7 @@ def test_typeddict_empty_document_type(self) -> None: # This should fail because _id is not included in our TypedDict definition. assert out["_id"] # type:ignore[typeddict-item] + @client_context.require_connection def test_typeddict_find_notrequired(self): if NotRequired is None or ImplicitMovie is None: raise unittest.SkipTest("Python 3.11+ is required to use NotRequired.") From 1edbfad0c8afc17e899b7982cd4c7942deab16f1 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 17 Nov 2022 12:27:15 -0800 Subject: [PATCH 0305/1588] PYTHON-3529 Improve reliability of test_list_databases (#1120) --- test/test_client.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/test_client.py b/test/test_client.py index a33881fded..53a234a33d 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -791,6 +791,10 @@ def test_list_databases(self): self.assertIsInstance(cursor, CommandCursor) helper_docs = list(cursor) self.assertTrue(len(helper_docs) > 0) + # sizeOnDisk can change between calls. + for doc_list in (helper_docs, cmd_docs): + for doc in doc_list: + doc.pop("sizeOnDisk", None) self.assertEqual(helper_docs, cmd_docs) for doc in helper_docs: self.assertIs(type(doc), dict) From 3d032768a0c617e4c594cb3f971df1cd06b3e0d4 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 17 Nov 2022 14:59:25 -0600 Subject: [PATCH 0306/1588] BUMP 4.3.3 --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 78c325a23c..7eff43faa8 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, ".dev0") +version_tuple: Tuple[Union[int, str], ...] = (4, 3, 3) def get_version_string() -> str: From f92dd40c8696c0e26ba7d544e95ddfd19402f7df Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 17 Nov 2022 15:00:04 -0600 Subject: [PATCH 0307/1588] BUMP 4.4.0.dev0 --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 7eff43faa8..78c325a23c 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 3, 3) +version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, ".dev0") def get_version_string() -> str: From 0c6aacb0fb20687e6f99b4886fdd0951fd8347ae Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 22 Nov 2022 13:34:17 -0600 Subject: [PATCH 0308/1588] PYTHON-3531 Pre-commit failure due to flake8 repository move (#1122) --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d72d51971c..cfe0db31cf 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,7 +30,7 @@ repos: files: \.py$ args: [--profile=black] -- repo: https://gitlab.com/pycqa/flake8 +- repo: https://github.com/PyCQA/flake8 rev: 3.9.2 hooks: - id: flake8 From ee2badff75e9523b838f1a9242cfde2018b74703 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 29 Nov 2022 05:27:45 -0600 Subject: [PATCH 0309/1588] PYTHON-3524 Support passing list of strings to create_index (#1121) --- doc/changelog.rst | 14 ++++++++++++++ pymongo/collection.py | 15 +++++++++------ pymongo/cursor.py | 9 +++++---- pymongo/helpers.py | 14 ++++++++++++-- pymongo/operations.py | 16 +++++++++------- test/test_collection.py | 6 +++++- test/test_cursor.py | 18 ++++++++++++++++++ 7 files changed, 72 insertions(+), 20 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 89d3f2fdde..6913f09fc3 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,20 @@ Changelog ========= +Changes in Version 4.4 +----------------------- + +- Added support for passing a list containing (key, direction) pairs + or keys to :meth:`~pymongo.collection.Collection.create_index`. + +Issues Resolved +............... + +See the `PyMongo 4.4 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.4 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=34354 + Changes in Version 4.3.3 ------------------------ diff --git a/pymongo/collection.py b/pymongo/collection.py index 600d73c4bc..77f154f5e7 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -26,7 +26,6 @@ NoReturn, Optional, Sequence, - Tuple, Union, ) @@ -62,6 +61,8 @@ ReplaceOne, UpdateMany, UpdateOne, + _IndexKeyHint, + _IndexList, ) from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.results import ( @@ -85,9 +86,6 @@ UpdateOne, UpdateMany, ] -# Hint supports index name, "myIndex", or list of index pairs: [('x', 1), ('y', -1)] -_IndexList = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] -_IndexKeyHint = Union[str, _IndexList] class ReturnDocument(object): @@ -1948,7 +1946,9 @@ def create_index( ) -> str: """Creates an index on this collection. - Takes either a single key or a list of (key, direction) pairs. + Takes either a single key or a list containing (key, direction) pairs + or keys. If no direction is given, :data:`~pymongo.ASCENDING` will + be assumed. The key(s) must be an instance of :class:`basestring` (:class:`str` in python 3), and the direction(s) must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, @@ -1964,7 +1964,7 @@ def create_index( ascending we need to use a list of tuples:: >>> my_collection.create_index([("mike", pymongo.DESCENDING), - ... ("eliot", pymongo.ASCENDING)]) + ... "eliot"]) All optional index creation parameters should be passed as keyword arguments to this method. For example:: @@ -2025,6 +2025,9 @@ def create_index( - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword + .. versionchanged:: 4.4 + Allow passing a list containing (key, direction) pairs + or keys for the ``keys`` parameter. .. versionchanged:: 4.1 Added ``comment`` parameter. .. versionchanged:: 3.11 diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 658c4276ef..ccf0bfd71b 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -146,7 +146,7 @@ def close(self): self.sock = None -_Sort = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] +_Sort = Sequence[Union[str, Tuple[str, Union[int, str, Mapping[str, Any]]]]] _Hint = Union[str, _Sort] @@ -832,15 +832,16 @@ def sort( """Sorts this cursor's results. Pass a field name and a direction, either - :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`:: + :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`.:: for doc in collection.find().sort('field', pymongo.ASCENDING): print(doc) - To sort by multiple fields, pass a list of (key, direction) pairs:: + To sort by multiple fields, pass a list of (key, direction) pairs. + If just a name is given, :data:`~pymongo.ASCENDING` will be inferred:: for doc in collection.find().sort([ - ('field1', pymongo.ASCENDING), + 'field1', ('field2', pymongo.DESCENDING)]): print(doc) diff --git a/pymongo/helpers.py b/pymongo/helpers.py index dd210db188..31325c8af2 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -80,6 +80,8 @@ def _index_list(key_or_list, direction=None): Takes such a list, or a single key, or a single key and direction. """ if direction is not None: + if not isinstance(key_or_list, str): + raise TypeError("Expected a string and a direction") return [(key_or_list, direction)] else: if isinstance(key_or_list, str): @@ -88,7 +90,12 @@ def _index_list(key_or_list, direction=None): return list(key_or_list) elif not isinstance(key_or_list, (list, tuple)): raise TypeError("if no direction is specified, key_or_list must be an instance of list") - return key_or_list + values = [] + for item in key_or_list: + if isinstance(item, str): + item = (item, ASCENDING) + values.append(item) + return values def _index_document(index_list): @@ -108,7 +115,10 @@ def _index_document(index_list): raise ValueError("key_or_list must not be the empty list") index: SON[str, Any] = SON() - for (key, value) in index_list: + for item in index_list: + if isinstance(item, str): + item = (item, ASCENDING) + key, value = item if not isinstance(key, str): raise TypeError("first item in each key pair must be an instance of str") if not isinstance(value, (str, int, abc.Mapping)): diff --git a/pymongo/operations.py b/pymongo/operations.py index 92a4dad0ac..f939cd479f 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -22,6 +22,10 @@ from pymongo.helpers import _gen_index_name, _index_document, _index_list from pymongo.typings import _CollationIn, _DocumentType, _Pipeline +# Hint supports index name, "myIndex", or list of either strings or index pairs: [('x', 1), ('y', -1), 'z''] +_IndexList = Sequence[Union[str, Tuple[str, Union[int, str, Mapping[str, Any]]]]] +_IndexKeyHint = Union[str, _IndexList] + class InsertOne(Generic[_DocumentType]): """Represents an insert_one operation.""" @@ -55,10 +59,6 @@ def __ne__(self, other: Any) -> bool: return not self == other -_IndexList = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] -_IndexKeyHint = Union[str, _IndexList] - - class DeleteOne(object): """Represents a delete_one operation.""" @@ -435,7 +435,9 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: For use with :meth:`~pymongo.collection.Collection.create_indexes`. - Takes either a single key or a list of (key, direction) pairs. + Takes either a single key or a list containing (key, direction) pairs + or keys. If no direction is given, :data:`~pymongo.ASCENDING` will + be assumed. The key(s) must be an instance of :class:`basestring` (:class:`str` in python 3), and the direction(s) must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, @@ -477,8 +479,8 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: server version. :Parameters: - - `keys`: a single key or a list of (key, direction) - pairs specifying the index to create + - `keys`: a single key or a list containing (key, direction) pairs + or keys specifying the index to create - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword arguments diff --git a/test/test_collection.py b/test/test_collection.py index 49a7017ef3..b6883f4ece 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -315,6 +315,10 @@ def test_create_index(self): with self.write_concern_collection() as coll: coll.create_index([("hello", DESCENDING)]) + db.test.create_index(["hello", "world"]) + db.test.create_index(["hello", ("world", DESCENDING)]) + db.test.create_index({"hello": 1}.items()) # type:ignore[arg-type] + def test_drop_index(self): db = self.db db.test.drop_indexes() @@ -1680,7 +1684,7 @@ def to_list(things): self.assertRaises(TypeError, db.test.find, sort=5) self.assertRaises(TypeError, db.test.find, sort="hello") - self.assertRaises(ValueError, db.test.find, sort=["hello", 1]) + self.assertRaises(TypeError, db.test.find, sort=["hello", 1]) # TODO doesn't actually test functionality, just that it doesn't blow up def test_cursor_timeout(self): diff --git a/test/test_cursor.py b/test/test_cursor.py index 96d83fecf1..e96efb92b0 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -40,6 +40,7 @@ from pymongo.collation import Collation from pymongo.cursor import Cursor, CursorType from pymongo.errors import ExecutionTimeout, InvalidOperation, OperationFailure +from pymongo.operations import _IndexList from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -366,6 +367,21 @@ def test_hint(self): break self.assertRaises(InvalidOperation, a.hint, spec) + db.test.drop() + db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) + spec: _IndexList = ["num", ("foo", DESCENDING)] + db.test.create_index(spec) + first = next(db.test.find().hint(spec)) + self.assertEqual(0, first.get("num")) + self.assertEqual(0, first.get("foo")) + + db.test.drop() + db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) + spec = ["num"] + db.test.create_index(spec) + first = next(db.test.find().hint(spec)) + self.assertEqual(0, first.get("num")) + def test_hint_by_name(self): db = self.db db.test.drop() @@ -715,6 +731,8 @@ def test_sort(self): (i["a"], i["b"]) for i in db.test.find().sort([("b", DESCENDING), ("a", ASCENDING)]) ] self.assertEqual(result, expected) + result = [(i["a"], i["b"]) for i in db.test.find().sort([("b", DESCENDING), "a"])] + self.assertEqual(result, expected) a = db.test.find() a.sort("x", ASCENDING) From 26efc0f43ddb937a5f37bffc003a56627c1a8252 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 1 Dec 2022 17:54:15 -0800 Subject: [PATCH 0310/1588] PYTHON-3388 Propagate Original Error for Write Errors Labeled NoWritesPerformed (#1117) --- pymongo/mongo_client.py | 10 ++- .../insertOne-noWritesPerformedError.json | 90 +++++++++++++++++++ test/test_retryable_writes.py | 59 ++++++++++++ 3 files changed, 157 insertions(+), 2 deletions(-) create mode 100644 test/retryable_writes/unified/insertOne-noWritesPerformedError.json diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 7d16e58777..dccd4bb6b1 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1408,12 +1408,18 @@ def is_retrying(): if retryable_error: session._unpin() if not retryable_error or (is_retrying() and not multiple_retries): - raise + if exc.has_error_label("NoWritesPerformed") and last_error: + raise last_error from exc + else: + raise if bulk: bulk.retrying = True else: retrying = True - last_error = exc + if not exc.has_error_label("NoWritesPerformed"): + last_error = exc + if last_error is None: + last_error = exc @_csot.apply def _retryable_read(self, func, read_pref, session, address=None, retryable=True): diff --git a/test/retryable_writes/unified/insertOne-noWritesPerformedError.json b/test/retryable_writes/unified/insertOne-noWritesPerformedError.json new file mode 100644 index 0000000000..3194e91c5c --- /dev/null +++ b/test/retryable_writes/unified/insertOne-noWritesPerformedError.json @@ -0,0 +1,90 @@ +{ + "description": "retryable-writes insertOne noWritesPerformedErrors", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "6.0", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "no-writes-performed-collection" + } + } + ], + "tests": [ + { + "description": "InsertOne fails after NoWritesPerformed error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 64, + "errorLabels": [ + "NoWritesPerformed", + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "errorCode": 64, + "errorLabelsContain": [ + "NoWritesPerformed", + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "no-writes-performed-collection", + "databaseName": "retryable-writes-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 7ca1c9c1ef..a22c776534 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -26,6 +26,7 @@ from test.utils import ( CMAPListener, DeprecationFilter, + EventListener, OvertCommandListener, TestCreator, rs_or_single_client, @@ -45,6 +46,7 @@ ) from pymongo.mongo_client import MongoClient from pymongo.monitoring import ( + CommandSucceededEvent, ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, ConnectionCheckOutFailedReason, @@ -64,6 +66,26 @@ _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_writes", "legacy") +class InsertEventListener(EventListener): + def succeeded(self, event: CommandSucceededEvent) -> None: + super(InsertEventListener, self).succeeded(event) + if ( + event.command_name == "insert" + and event.reply.get("writeConcernError", {}).get("code", None) == 91 + ): + client_context.client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "errorCode": 10107, + "errorLabels": ["RetryableWriteError", "NoWritesPerformed"], + "failCommands": ["insert"], + }, + } + ) + + class TestAllScenarios(SpecRunner): RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True @@ -581,6 +603,43 @@ def test_pool_paused_error_is_retryable(self): failed = cmd_listener.failed_events self.assertEqual(1, len(failed), msg) + @client_context.require_failCommand_fail_point + @client_context.require_replica_set + @client_context.require_version_min( + 6, 0, 0 + ) # the spec requires that this prose test only be run on 6.0+ + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + def test_returns_original_error_code( + self, + ): + cmd_listener = InsertEventListener() + client = rs_or_single_client(retryWrites=True, event_listeners=[cmd_listener]) + client.test.test.drop() + self.addCleanup(client.close) + cmd_listener.reset() + client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "writeConcernError": { + "code": 91, + "errorLabels": ["RetryableWriteError"], + }, + "failCommands": ["insert"], + }, + } + ) + with self.assertRaises(WriteConcernError) as exc: + client.test.test.insert_one({"_id": 1}) + self.assertEqual(exc.exception.code, 91) + client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": "off", + } + ) + # TODO: Make this a real integration test where we stepdown the primary. class TestRetryableWritesTxnNumber(IgnoreDeprecationsTest): From ccade9bc058e0ccdd4363a09af1e8ac1bf76856a Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 2 Dec 2022 12:52:01 -0800 Subject: [PATCH 0311/1588] PYTHON-3351 Provide access to raw result document when the server returns an error for a command (#1125) --- .../aggregate-merge-errorResponse.json | 90 ++++++++++++ .../crud/unified/bulkWrite-errorResponse.json | 88 ++++++++++++ .../crud/unified/deleteOne-errorResponse.json | 82 +++++++++++ test/crud/unified/distinct-comment.json | 12 +- .../findOneAndUpdate-errorResponse.json | 132 ++++++++++++++++++ .../crud/unified/insertOne-errorResponse.json | 82 +++++++++++ .../crud/unified/updateOne-errorResponse.json | 87 ++++++++++++ test/unified_format.py | 6 +- 8 files changed, 576 insertions(+), 3 deletions(-) create mode 100644 test/crud/unified/aggregate-merge-errorResponse.json create mode 100644 test/crud/unified/bulkWrite-errorResponse.json create mode 100644 test/crud/unified/deleteOne-errorResponse.json create mode 100644 test/crud/unified/findOneAndUpdate-errorResponse.json create mode 100644 test/crud/unified/insertOne-errorResponse.json create mode 100644 test/crud/unified/updateOne-errorResponse.json diff --git a/test/crud/unified/aggregate-merge-errorResponse.json b/test/crud/unified/aggregate-merge-errorResponse.json new file mode 100644 index 0000000000..6c7305fd91 --- /dev/null +++ b/test/crud/unified/aggregate-merge-errorResponse.json @@ -0,0 +1,90 @@ +{ + "description": "aggregate-merge-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 1 + } + ] + } + ], + "tests": [ + { + "description": "aggregate $merge DuplicateKey error is accessible", + "runOnRequirements": [ + { + "minServerVersion": "5.1", + "topologies": [ + "single", + "replicaset" + ] + } + ], + "operations": [ + { + "name": "aggregate", + "object": "database0", + "arguments": { + "pipeline": [ + { + "$documents": [ + { + "_id": 2, + "x": 1 + } + ] + }, + { + "$merge": { + "into": "test", + "whenMatched": "fail" + } + } + ] + }, + "expectError": { + "errorCode": 11000, + "errorResponse": { + "keyPattern": { + "_id": 1 + }, + "keyValue": { + "_id": 2 + } + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-errorResponse.json b/test/crud/unified/bulkWrite-errorResponse.json new file mode 100644 index 0000000000..157637c713 --- /dev/null +++ b/test/crud/unified/bulkWrite-errorResponse.json @@ -0,0 +1,88 @@ +{ + "description": "bulkWrite-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "bulkWrite operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-errorResponse.json b/test/crud/unified/deleteOne-errorResponse.json new file mode 100644 index 0000000000..1f3a266f1e --- /dev/null +++ b/test/crud/unified/deleteOne-errorResponse.json @@ -0,0 +1,82 @@ +{ + "description": "deleteOne-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "delete operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/distinct-comment.json b/test/crud/unified/distinct-comment.json index 0669d4f30a..11bce9ac9d 100644 --- a/test/crud/unified/distinct-comment.json +++ b/test/crud/unified/distinct-comment.json @@ -64,7 +64,11 @@ "key": "value" } }, - "expectResult": [ 11, 22, 33 ] + "expectResult": [ + 11, + 22, + 33 + ] } ], "expectEvents": [ @@ -105,7 +109,11 @@ "filter": {}, "comment": "comment" }, - "expectResult": [ 11, 22, 33 ] + "expectResult": [ + 11, + 22, + 33 + ] } ], "expectEvents": [ diff --git a/test/crud/unified/findOneAndUpdate-errorResponse.json b/test/crud/unified/findOneAndUpdate-errorResponse.json new file mode 100644 index 0000000000..5023a450f3 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-errorResponse.json @@ -0,0 +1,132 @@ +{ + "description": "findOneAndUpdate-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "foo" + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate DuplicateKey error is accessible", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "unique": true + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": "foo" + } + }, + "upsert": true + }, + "expectError": { + "errorCode": 11000, + "errorResponse": { + "keyPattern": { + "x": 1 + }, + "keyValue": { + "x": "foo" + } + } + } + } + ] + }, + { + "description": "findOneAndUpdate document validation errInfo is accessible", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "modifyCollection", + "object": "database0", + "arguments": { + "collection": "test", + "validator": { + "x": { + "$type": "string" + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "errorCode": 121, + "errorResponse": { + "errInfo": { + "failingDocumentId": 1, + "details": { + "$$type": "object" + } + } + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/insertOne-errorResponse.json b/test/crud/unified/insertOne-errorResponse.json new file mode 100644 index 0000000000..04ea6a7451 --- /dev/null +++ b/test/crud/unified/insertOne-errorResponse.json @@ -0,0 +1,82 @@ +{ + "description": "insertOne-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "insert operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-errorResponse.json b/test/crud/unified/updateOne-errorResponse.json new file mode 100644 index 0000000000..0ceddbc4fc --- /dev/null +++ b/test/crud/unified/updateOne-errorResponse.json @@ -0,0 +1,87 @@ +{ + "description": "updateOne-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "update operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index 12eaceed35..5afc746859 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -862,7 +862,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.10") + SCHEMA_VERSION = Version.from_string("1.12") RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True TEST_SPEC: Any @@ -994,6 +994,10 @@ def process_error(self, exception, spec): error_labels_contain = spec.get("errorLabelsContain") error_labels_omit = spec.get("errorLabelsOmit") expect_result = spec.get("expectResult") + error_response = spec.get("errorResponse") + if error_response: + for k in error_response.keys(): + self.assertEqual(error_response[k], exception.details[k]) if is_error: # already satisfied because exception was raised From 64192663954569fab553c60d3259017a1a7b5fcb Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 6 Dec 2022 15:29:48 -0800 Subject: [PATCH 0312/1588] PYTHON-3492 Test mongocryptd is not spawned when shared library is loaded (#1124) --- test/test_encryption.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/test/test_encryption.py b/test/test_encryption.py index eaee22ebac..3c422b8c87 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1910,6 +1910,38 @@ def test_bypassAutoEncryption(self): with self.assertRaises(ServerSelectionTimeoutError): mongocryptd_client.admin.command("ping") + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + def test_via_loading_shared_library(self): + key_vault = client_context.client.keyvault.datakeys + key_vault.drop() + key_vault.create_index( + "keyAltNames", unique=True, partialFilterExpression={"keyAltNames": {"$exists": True}} + ) + key_vault.insert_one(json_data("external", "external-key.json")) + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + schema_map=schemas, + mongocryptd_uri="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000", + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=47021", + ], + crypt_shared_lib_required=True, + ) + client_encrypted = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(client_encrypted.close) + client_encrypted.db.coll.drop() + client_encrypted.db.coll.insert_one({"encrypted": "test"}) + self.assertEncrypted(client_context.client.db.coll.find_one({})["encrypted"]) + no_mongocryptd_client = MongoClient( + host="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000" + ) + self.addCleanup(no_mongocryptd_client.close) + with self.assertRaises(ServerSelectionTimeoutError): + no_mongocryptd_client.db.command("ping") + # https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#kms-tls-tests class TestKmsTLSProse(EncryptionIntegrationTest): From 024148ca2b4861c2ead627213b55c37e280c35d7 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 19 Dec 2022 14:23:40 -0800 Subject: [PATCH 0313/1588] PYTHON-3541 Use bash instead of sh in perf testing (#1127) --- .evergreen/perf.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.evergreen/perf.yml b/.evergreen/perf.yml index 8b3638d535..d975fca79f 100644 --- a/.evergreen/perf.yml +++ b/.evergreen/perf.yml @@ -105,7 +105,7 @@ functions: params: script: | ${PREPARE_SHELL} - MONGODB_VERSION=${VERSION} TOPOLOGY=${TOPOLOGY} AUTH=${AUTH} SSL=${SSL} STORAGE_ENGINE=${STORAGE_ENGINE} sh ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh + MONGODB_VERSION=${VERSION} TOPOLOGY=${TOPOLOGY} AUTH=${AUTH} SSL=${SSL} STORAGE_ENGINE=${STORAGE_ENGINE} bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh # run-orchestration generates expansion file with the MONGODB_URI for the cluster - command: expansions.update params: @@ -116,7 +116,7 @@ functions: params: script: | ${PREPARE_SHELL} - sh ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh + bash ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh "run perf tests": - command: shell.exec @@ -125,7 +125,7 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} - PROJECT_DIRECTORY=${PROJECT_DIRECTORY} sh ${PROJECT_DIRECTORY}/.evergreen/run-perf-tests.sh + PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-perf-tests.sh "attach benchmark test results": - command: attach.results @@ -182,7 +182,7 @@ functions: ${PREPARE_SHELL} file="${PROJECT_DIRECTORY}/.evergreen/install-dependencies.sh" # Don't use ${file} syntax here because evergreen treats it as an empty expansion. - [ -f "$file" ] && sh $file || echo "$file not available, skipping" + [ -f "$file" ] && bash $file || echo "$file not available, skipping" pre: - func: "fetch source" From f5d09e1c97ca12c94c555b40364b2ae10ec5126c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 20 Dec 2022 13:39:04 -0600 Subject: [PATCH 0314/1588] PYTHON-3542 Test Failure - test_iteration on PyPy 3.8+ (#1128) --- test/test_client.py | 2 +- test/test_collection.py | 2 +- test/test_database.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/test_client.py b/test/test_client.py index 53a234a33d..b2f128f11a 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -217,7 +217,7 @@ def test_getattr(self): def test_iteration(self): client = self.client - if "PyPy" in sys.version: + if "PyPy" in sys.version and sys.version_info < (3, 8, 15): msg = "'NoneType' object is not callable" else: msg = "'MongoClient' object is not iterable" diff --git a/test/test_collection.py b/test/test_collection.py index b6883f4ece..881896c847 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -125,7 +125,7 @@ def test_getattr(self): def test_iteration(self): coll = self.db.coll - if "PyPy" in sys.version: + if "PyPy" in sys.version and sys.version_info < (3, 8, 15): msg = "'NoneType' object is not callable" else: msg = "'Collection' object is not iterable" diff --git a/test/test_database.py b/test/test_database.py index b1b2999df4..53af4912e4 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -95,7 +95,7 @@ def test_getattr(self): def test_iteration(self): db = self.client.pymongo_test - if "PyPy" in sys.version: + if "PyPy" in sys.version and sys.version_info < (3, 8, 15): msg = "'NoneType' object is not callable" else: msg = "'Database' object is not iterable" From 47686c8f68363645579ee0ed0f841fdd3b7362f4 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 20 Dec 2022 16:29:43 -0600 Subject: [PATCH 0315/1588] PYTHON-3543 Broken Links for ICU Project (#1129) --- doc/examples/collations.rst | 2 +- pymongo/collation.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/examples/collations.rst b/doc/examples/collations.rst index 1a5106039c..45e647d816 100644 --- a/doc/examples/collations.rst +++ b/doc/examples/collations.rst @@ -42,7 +42,7 @@ or with plain Python dictionaries. The structure is the same:: backwards=) The only required parameter is ``locale``, which the server parses as -an `ICU format locale ID `_. +an `ICU format locale ID `_. For example, set ``locale`` to ``en_US`` to represent US English or ``fr_CA`` to represent Canadian French. diff --git a/pymongo/collation.py b/pymongo/collation.py index 5bc73c07c8..3d8503f7d5 100644 --- a/pymongo/collation.py +++ b/pymongo/collation.py @@ -14,7 +14,7 @@ """Tools for working with `collations`_. -.. _collations: http://userguide.icu-project.org/collation/concepts +.. _collations: https://www.mongodb.com/docs/manual/reference/collation/ """ from typing import Any, Dict, Mapping, Optional, Union From 7299dff84d3c9b6f137f83a9121e5209ee13efbf Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 5 Jan 2023 13:55:47 -0600 Subject: [PATCH 0316/1588] PYTHON-3546 bson.CodecOptions docs missing unicode_decode_error_handler=ignore option in newer documentation (#1131) --- bson/codec_options.py | 167 ++++++++++++++++++++++-------------------- 1 file changed, 86 insertions(+), 81 deletions(-) diff --git a/bson/codec_options.py b/bson/codec_options.py index 3c0a976a1b..6f4fdaac8d 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -245,87 +245,92 @@ class _BaseCodecOptions(NamedTuple): class CodecOptions(_BaseCodecOptions): - """Encapsulates options used encoding and / or decoding BSON. - - The `document_class` option is used to define a custom type for use - decoding BSON documents. Access to the underlying raw BSON bytes for - a document is available using the :class:`~bson.raw_bson.RawBSONDocument` - type:: - - >>> from bson.raw_bson import RawBSONDocument - >>> from bson.codec_options import CodecOptions - >>> codec_options = CodecOptions(document_class=RawBSONDocument) - >>> coll = db.get_collection('test', codec_options=codec_options) - >>> doc = coll.find_one() - >>> doc.raw - '\\x16\\x00\\x00\\x00\\x07_id\\x00[0\\x165\\x91\\x10\\xea\\x14\\xe8\\xc5\\x8b\\x93\\x00' - - The document class can be any type that inherits from - :class:`~collections.abc.MutableMapping`:: - - >>> class AttributeDict(dict): - ... # A dict that supports attribute access. - ... def __getattr__(self, key): - ... return self[key] - ... def __setattr__(self, key, value): - ... self[key] = value - ... - >>> codec_options = CodecOptions(document_class=AttributeDict) - >>> coll = db.get_collection('test', codec_options=codec_options) - >>> doc = coll.find_one() - >>> doc._id - ObjectId('5b3016359110ea14e8c58b93') - - See :doc:`/examples/datetimes` for examples using the `tz_aware` and - `tzinfo` options. - - See :doc:`/examples/uuid` for examples using the `uuid_representation` - option. - - :Parameters: - - `document_class`: BSON documents returned in queries will be decoded - to an instance of this class. Must be a subclass of - :class:`~collections.abc.MutableMapping`. Defaults to :class:`dict`. - - `tz_aware`: If ``True``, BSON datetimes will be decoded to timezone - aware instances of :class:`~datetime.datetime`. Otherwise they will be - naive. Defaults to ``False``. - - `uuid_representation`: The BSON representation to use when encoding - and decoding instances of :class:`~uuid.UUID`. Defaults to - :data:`~bson.binary.UuidRepresentation.UNSPECIFIED`. New - applications should consider setting this to - :data:`~bson.binary.UuidRepresentation.STANDARD` for cross language - compatibility. See :ref:`handling-uuid-data-example` for details. - - `unicode_decode_error_handler`: The error handler to apply when - a Unicode-related error occurs during BSON decoding that would - otherwise raise :exc:`UnicodeDecodeError`. Valid options include - 'strict', 'replace', 'backslashreplace', 'surrogateescape', and - 'ignore'. Defaults to 'strict'. - - `tzinfo`: A :class:`~datetime.tzinfo` subclass that specifies the - timezone to/from which :class:`~datetime.datetime` objects should be - encoded/decoded. - - `type_registry`: Instance of :class:`TypeRegistry` used to customize - encoding and decoding behavior. - - `datetime_conversion`: Specifies how UTC datetimes should be decoded - within BSON. Valid options include 'datetime_ms' to return as a - DatetimeMS, 'datetime' to return as a datetime.datetime and - raising a ValueError for out-of-range values, 'datetime_auto' to - return DatetimeMS objects when the underlying datetime is - out-of-range and 'datetime_clamp' to clamp to the minimum and - maximum possible datetimes. Defaults to 'datetime'. - .. versionchanged:: 4.0 - The default for `uuid_representation` was changed from - :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to - :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. - - .. versionadded:: 3.8 - `type_registry` attribute. - - .. warning:: Care must be taken when changing - `unicode_decode_error_handler` from its default value ('strict'). - The 'replace' and 'ignore' modes should not be used when documents - retrieved from the server will be modified in the client application - and stored back to the server. - """ + """Encapsulates options used encoding and / or decoding BSON.""" + + def __init__(self, *args, **kwargs): + """Encapsulates options used encoding and / or decoding BSON. + + The `document_class` option is used to define a custom type for use + decoding BSON documents. Access to the underlying raw BSON bytes for + a document is available using the :class:`~bson.raw_bson.RawBSONDocument` + type:: + + >>> from bson.raw_bson import RawBSONDocument + >>> from bson.codec_options import CodecOptions + >>> codec_options = CodecOptions(document_class=RawBSONDocument) + >>> coll = db.get_collection('test', codec_options=codec_options) + >>> doc = coll.find_one() + >>> doc.raw + '\\x16\\x00\\x00\\x00\\x07_id\\x00[0\\x165\\x91\\x10\\xea\\x14\\xe8\\xc5\\x8b\\x93\\x00' + + The document class can be any type that inherits from + :class:`~collections.abc.MutableMapping`:: + + >>> class AttributeDict(dict): + ... # A dict that supports attribute access. + ... def __getattr__(self, key): + ... return self[key] + ... def __setattr__(self, key, value): + ... self[key] = value + ... + >>> codec_options = CodecOptions(document_class=AttributeDict) + >>> coll = db.get_collection('test', codec_options=codec_options) + >>> doc = coll.find_one() + >>> doc._id + ObjectId('5b3016359110ea14e8c58b93') + + See :doc:`/examples/datetimes` for examples using the `tz_aware` and + `tzinfo` options. + + See :doc:`/examples/uuid` for examples using the `uuid_representation` + option. + + :Parameters: + - `document_class`: BSON documents returned in queries will be decoded + to an instance of this class. Must be a subclass of + :class:`~collections.abc.MutableMapping`. Defaults to :class:`dict`. + - `tz_aware`: If ``True``, BSON datetimes will be decoded to timezone + aware instances of :class:`~datetime.datetime`. Otherwise they will be + naive. Defaults to ``False``. + - `uuid_representation`: The BSON representation to use when encoding + and decoding instances of :class:`~uuid.UUID`. Defaults to + :data:`~bson.binary.UuidRepresentation.UNSPECIFIED`. New + applications should consider setting this to + :data:`~bson.binary.UuidRepresentation.STANDARD` for cross language + compatibility. See :ref:`handling-uuid-data-example` for details. + - `unicode_decode_error_handler`: The error handler to apply when + a Unicode-related error occurs during BSON decoding that would + otherwise raise :exc:`UnicodeDecodeError`. Valid options include + 'strict', 'replace', 'backslashreplace', 'surrogateescape', and + 'ignore'. Defaults to 'strict'. + - `tzinfo`: A :class:`~datetime.tzinfo` subclass that specifies the + timezone to/from which :class:`~datetime.datetime` objects should be + encoded/decoded. + - `type_registry`: Instance of :class:`TypeRegistry` used to customize + encoding and decoding behavior. + - `datetime_conversion`: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. + + .. versionchanged:: 4.0 + The default for `uuid_representation` was changed from + :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to + :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + + .. versionadded:: 3.8 + `type_registry` attribute. + + .. warning:: Care must be taken when changing + `unicode_decode_error_handler` from its default value ('strict'). + The 'replace' and 'ignore' modes should not be used when documents + retrieved from the server will be modified in the client application + and stored back to the server. + """ + return super().__init__() def __new__( cls: Type["CodecOptions"], From a43f320753a0b8710e23fb36ae7bb488ef790b41 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 5 Jan 2023 13:56:22 -0600 Subject: [PATCH 0317/1588] PYTHON-3470 Build Python 3.11 Wheels for MacOS (#1130) --- .evergreen/config.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 96b6a00688..65b29dab14 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1216,6 +1216,10 @@ tasks: tags: ["release_tag"] run_on: macos-1100 commands: + - func: "build release" + vars: + VERSION: "3.11" + ENSURE_UNIVERSAL2: "1" - func: "build release" vars: VERSION: "3.10" From eaea70bf08189d567838b3408bdc9fd5ae7cecf6 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 10 Jan 2023 15:49:46 -0800 Subject: [PATCH 0318/1588] DRIVERS-2369 Disable causal consistency in implicit sessions (#1132) --- ...t-sessions-default-causal-consistency.json | 318 ++++++++++++++++++ 1 file changed, 318 insertions(+) create mode 100644 test/sessions/implicit-sessions-default-causal-consistency.json diff --git a/test/sessions/implicit-sessions-default-causal-consistency.json b/test/sessions/implicit-sessions-default-causal-consistency.json new file mode 100644 index 0000000000..517c8ebc63 --- /dev/null +++ b/test/sessions/implicit-sessions-default-causal-consistency.json @@ -0,0 +1,318 @@ +{ + "description": "implicit sessions default causal consistency", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "implicit-cc-tests" + } + }, + { + "collection": { + "id": "collectionDefault", + "database": "database0", + "collectionName": "coll-default" + } + }, + { + "collection": { + "id": "collectionSnapshot", + "database": "database0", + "collectionName": "coll-snapshot", + "collectionOptions": { + "readConcern": { + "level": "snapshot" + } + } + } + }, + { + "collection": { + "id": "collectionlinearizable", + "database": "database0", + "collectionName": "coll-linearizable", + "collectionOptions": { + "readConcern": { + "level": "linearizable" + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll-default", + "databaseName": "implicit-cc-tests", + "documents": [ + { + "_id": 1, + "x": "default" + } + ] + }, + { + "collectionName": "coll-snapshot", + "databaseName": "implicit-cc-tests", + "documents": [ + { + "_id": 1, + "x": "snapshot" + } + ] + }, + { + "collectionName": "coll-linearizable", + "databaseName": "implicit-cc-tests", + "documents": [ + { + "_id": 1, + "x": "linearizable" + } + ] + } + ], + "tests": [ + { + "description": "readConcern is not sent on retried read in implicit session when readConcern level is not specified", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "find", + "object": "collectionDefault", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": "default" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll-default", + "filter": {}, + "readConcern": { + "$$exists": false + } + }, + "databaseName": "implicit-cc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll-default", + "filter": {}, + "readConcern": { + "$$exists": false + } + }, + "databaseName": "implicit-cc-tests" + } + } + ] + } + ] + }, + { + "description": "afterClusterTime is not sent on retried read in implicit session when readConcern level is snapshot", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "find", + "object": "collectionSnapshot", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": "snapshot" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll-snapshot", + "filter": {}, + "readConcern": { + "level": "snapshot", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll-snapshot", + "filter": {}, + "readConcern": { + "level": "snapshot", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + } + ] + } + ] + }, + { + "description": "afterClusterTime is not sent on retried read in implicit session when readConcern level is linearizable", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "find", + "object": "collectionlinearizable", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": "linearizable" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll-linearizable", + "filter": {}, + "readConcern": { + "level": "linearizable", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll-linearizable", + "filter": {}, + "readConcern": { + "level": "linearizable", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + } + ] + } + ] + } + ] +} From a4c90ae157ffcb1d4a073ceeb9177400126ad871 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 11 Jan 2023 20:03:28 -0800 Subject: [PATCH 0319/1588] PYTHON-3466 Test crypt_shared with older server versions (#1133) --- .evergreen/config.yml | 41 ++--------------------------------------- .evergreen/run-tests.sh | 18 ++++++------------ 2 files changed, 8 insertions(+), 51 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 65b29dab14..ab61725a20 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -452,6 +452,7 @@ functions: fi if [ -n "${test_crypt_shared}" ]; then export TEST_CRYPT_SHARED=1 + export CRYPT_SHARED_LIB_PATH=${CRYPT_SHARED_LIB_PATH} fi if [ -n "${test_pyopenssl}" ]; then export TEST_PYOPENSSL=1 @@ -2497,6 +2498,7 @@ axes: variables: test_encryption: true batchtime: 10080 # 7 days + # The path to crypt_shared is stored in the $CRYPT_SHARED_LIB_PATH expansion. - id: "encryption_crypt_shared" display_name: "Encryption shared lib" tags: ["encryption_tag"] @@ -2634,19 +2636,6 @@ buildvariants: - ".4.4" - ".4.2" - ".4.0" - rules: &encryption-exclude-rules - - if: - platform: "*" - auth: "*" - ssl: "*" - encryption: [ "encryption_crypt_shared" ] - then: - remove_tasks: - - ".rapid" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" # Test one server version with zSeries, POWER8, and ARM. - matrix_name: "test-different-cpu-architectures" @@ -2729,19 +2718,6 @@ buildvariants: encryption: "*" display_name: "${encryption} ${python-version} ${platform} ${auth-ssl}" tasks: *encryption-server-versions - rules: - - if: - platform: "*" - python-version: "*" - auth-ssl: "*" - encryption: [ "encryption_crypt_shared" ] - then: - remove_tasks: - - ".rapid" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" - matrix_name: "tests-python-version-ubuntu18-without-c-extensions" @@ -2853,19 +2829,6 @@ buildvariants: encryption: "*" display_name: "${encryption} ${platform} ${python-version-windows} ${auth-ssl}" tasks: *encryption-server-versions - rules: - - if: - platform: "*" - python-version-windows: "*" - auth-ssl: "*" - encryption: [ "encryption_crypt_shared" ] - then: - remove_tasks: - - ".rapid" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" # Storage engine tests on Ubuntu 18.04 (x86_64) with Python 3.7. - matrix_name: "tests-storage-engines" diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 959ad901ad..d495e2671a 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -29,7 +29,7 @@ COMPRESSORS=${COMPRESSORS:-} MONGODB_VERSION=${MONGODB_VERSION:-} MONGODB_API_VERSION=${MONGODB_API_VERSION:-} TEST_ENCRYPTION=${TEST_ENCRYPTION:-} -TEST_CRYPT_SHARED=${TEST_CRYPT_SHARED:-} +CRYPT_SHARED_LIB_PATH=${CRYPT_SHARED_LIB_PATH:-} LIBMONGOCRYPT_URL=${LIBMONGOCRYPT_URL:-} DATA_LAKE=${DATA_LAKE:-} TEST_ARGS="" @@ -158,17 +158,11 @@ if [ -n "$TEST_ENCRYPTION" ]; then . $DRIVERS_TOOLS/.evergreen/csfle/set-temp-creds.sh if [ -n "$TEST_CRYPT_SHARED" ]; then - REAL_VERSION=$(mongod --version | head -n1 | cut -d v -f3 | tr -d "\r") - if [ "$MONGODB_VERSION" = "latest" ]; then - REAL_VERSION="latest" - fi - echo "Testing CSFLE with crypt_shared lib" - $PYTHON $DRIVERS_TOOLS/.evergreen/mongodl.py --component crypt_shared \ - --version "$REAL_VERSION" \ - --out ../crypt_shared/ - export DYLD_FALLBACK_LIBRARY_PATH=../crypt_shared/lib/:$DYLD_FALLBACK_LIBRARY_PATH - export LD_LIBRARY_PATH=../crypt_shared/lib:$LD_LIBRARY_PATH - export PATH=../crypt_shared/bin:$PATH + CRYPT_SHARED_DIR=`dirname $CRYPT_SHARED_LIB_PATH` + echo "using crypt_shared_dir $CRYPT_SHARED_DIR" + export DYLD_FALLBACK_LIBRARY_PATH=$CRYPT_SHARED_DIR:$DYLD_FALLBACK_LIBRARY_PATH + export LD_LIBRARY_PATH=$CRYPT_SHARED_DIR:$LD_LIBRARY_PATH + export PATH=$CRYPT_SHARED_DIR:$PATH fi # Only run the encryption tests. TEST_ARGS="-s test.test_encryption" From 24170dd523b04e5f28ada72e125fc9d4c36a3510 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 12 Jan 2023 12:08:53 -0600 Subject: [PATCH 0320/1588] PYTHON-3523 Remove getNonce command usage for 6.2+ (#1135) --- .evergreen/resync-specs.sh | 2 +- test/command_monitoring/bulkWrite.json | 154 +++++ test/command_monitoring/command.json | 83 +++ test/command_monitoring/deleteMany.json | 162 +++++ test/command_monitoring/deleteOne.json | 162 +++++ test/command_monitoring/find.json | 550 +++++++++++++++++ test/command_monitoring/insertMany.json | 148 +++++ test/command_monitoring/insertOne.json | 144 +++++ test/command_monitoring/legacy/bulkWrite.json | 110 ---- test/command_monitoring/legacy/command.json | 113 ---- .../command_monitoring/legacy/deleteMany.json | 115 ---- test/command_monitoring/legacy/deleteOne.json | 115 ---- test/command_monitoring/legacy/find.json | 559 ------------------ .../command_monitoring/legacy/insertMany.json | 145 ----- test/command_monitoring/legacy/insertOne.json | 97 --- .../legacy/unacknowledgedBulkWrite.json | 69 --- .../command_monitoring/legacy/updateMany.json | 135 ----- test/command_monitoring/legacy/updateOne.json | 190 ------ .../{unified => }/redacted-commands.json | 20 + .../unacknowledgedBulkWrite.json | 108 ++++ test/command_monitoring/updateMany.json | 188 ++++++ test/command_monitoring/updateOne.json | 260 ++++++++ ..._unified.py => test_command_monitoring.py} | 2 +- test/test_command_monitoring_legacy.py | 237 -------- test/test_monitoring.py | 1 + 25 files changed, 1982 insertions(+), 1887 deletions(-) create mode 100644 test/command_monitoring/bulkWrite.json create mode 100644 test/command_monitoring/command.json create mode 100644 test/command_monitoring/deleteMany.json create mode 100644 test/command_monitoring/deleteOne.json create mode 100644 test/command_monitoring/find.json create mode 100644 test/command_monitoring/insertMany.json create mode 100644 test/command_monitoring/insertOne.json delete mode 100644 test/command_monitoring/legacy/bulkWrite.json delete mode 100644 test/command_monitoring/legacy/command.json delete mode 100644 test/command_monitoring/legacy/deleteMany.json delete mode 100644 test/command_monitoring/legacy/deleteOne.json delete mode 100644 test/command_monitoring/legacy/find.json delete mode 100644 test/command_monitoring/legacy/insertMany.json delete mode 100644 test/command_monitoring/legacy/insertOne.json delete mode 100644 test/command_monitoring/legacy/unacknowledgedBulkWrite.json delete mode 100644 test/command_monitoring/legacy/updateMany.json delete mode 100644 test/command_monitoring/legacy/updateOne.json rename test/command_monitoring/{unified => }/redacted-commands.json (97%) create mode 100644 test/command_monitoring/unacknowledgedBulkWrite.json create mode 100644 test/command_monitoring/updateMany.json create mode 100644 test/command_monitoring/updateOne.json rename test/{test_command_monitoring_unified.py => test_command_monitoring.py} (95%) delete mode 100644 test/test_command_monitoring_legacy.py diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index 817fa4b730..489ff28b3a 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -100,7 +100,7 @@ do rm $PYMONGO/test/cmap/wait-queue-fairness.json # PYTHON-1873 ;; apm|APM|command-monitoring|command_monitoring) - cpjson command-monitoring/tests command_monitoring + cpjson command-logging-and-monitoring/tests/monitoring command_monitoring ;; crud|CRUD) cpjson crud/tests/ crud diff --git a/test/command_monitoring/bulkWrite.json b/test/command_monitoring/bulkWrite.json new file mode 100644 index 0000000000..49c728442e --- /dev/null +++ b/test/command_monitoring/bulkWrite.json @@ -0,0 +1,154 @@ +{ + "description": "bulkWrite", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful mixed bulk write", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "x": 333 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 3 + }, + "u": { + "$set": { + "x": 333 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/command.json b/test/command_monitoring/command.json new file mode 100644 index 0000000000..c28af95fed --- /dev/null +++ b/test/command_monitoring/command.json @@ -0,0 +1,83 @@ +{ + "description": "command", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful command", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "commandName": "ping", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1 + }, + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/deleteMany.json b/test/command_monitoring/deleteMany.json new file mode 100644 index 0000000000..78ebad1f98 --- /dev/null +++ b/test/command_monitoring/deleteMany.json @@ -0,0 +1,162 @@ +{ + "description": "deleteMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful deleteMany", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 0 + } + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 2 + }, + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "A successful deleteMany with write errors", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$unsupported": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$unsupported": 1 + } + }, + "limit": 0 + } + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/deleteOne.json b/test/command_monitoring/deleteOne.json new file mode 100644 index 0000000000..2420794fe5 --- /dev/null +++ b/test/command_monitoring/deleteOne.json @@ -0,0 +1,162 @@ +{ + "description": "deleteOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful deleteOne", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 1 + } + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "A successful deleteOne with write errors", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$unsupported": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$unsupported": 1 + } + }, + "limit": 1 + } + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/find.json b/test/command_monitoring/find.json new file mode 100644 index 0000000000..4b5f45ae99 --- /dev/null +++ b/test/command_monitoring/find.json @@ -0,0 +1,550 @@ +{ + "description": "find", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "_yamlAnchors": { + "namespace": "command-monitoring-tests.test" + }, + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "A successful find with no options", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": 1 + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + } + ] + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "A successful find with options", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "x": -1 + }, + "projection": { + "_id": 0, + "x": 1 + }, + "skip": 2, + "comment": "test", + "hint": { + "_id": 1 + }, + "max": { + "_id": 6 + }, + "maxTimeMS": 6000, + "min": { + "_id": 0 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "x": -1 + }, + "projection": { + "_id": 0, + "x": 1 + }, + "skip": 2, + "comment": "test", + "hint": { + "_id": 1 + }, + "max": { + "_id": 6 + }, + "maxTimeMS": 6000, + "min": { + "_id": 0 + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "x": 33 + }, + { + "x": 22 + } + ] + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "A successful find with showRecordId and returnKey", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "showRecordId": true, + "returnKey": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "showRecordId": true, + "returnKey": true + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "A successful find with a getMore", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3 + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3 + }, + "commandName": "getMore", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "nextBatch": [ + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "A successful find event with a getmore and the server kills the cursor (<= 4.4)", + "runOnRequirements": [ + { + "minServerVersion": "3.1", + "maxServerVersion": "4.4.99", + "topologies": [ + "single", + "replicaset" + ] + } + ], + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3, + "limit": 4 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3, + "limit": 4 + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 1 + }, + "commandName": "getMore", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "nextBatch": [ + { + "_id": 4, + "x": 44 + } + ] + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "A failed find event", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "$or": true + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/insertMany.json b/test/command_monitoring/insertMany.json new file mode 100644 index 0000000000..a80a218c67 --- /dev/null +++ b/test/command_monitoring/insertMany.json @@ -0,0 +1,148 @@ +{ + "description": "insertMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful insertMany", + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "A successful insertMany with write errors", + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1, + "x": 11 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/insertOne.json b/test/command_monitoring/insertOne.json new file mode 100644 index 0000000000..6ff732e41b --- /dev/null +++ b/test/command_monitoring/insertOne.json @@ -0,0 +1,144 @@ +{ + "description": "insertOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful insertOne", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "A successful insertOne with write errors", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1, + "x": 11 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/legacy/bulkWrite.json b/test/command_monitoring/legacy/bulkWrite.json deleted file mode 100644 index ca5a9a105c..0000000000 --- a/test/command_monitoring/legacy/bulkWrite.json +++ /dev/null @@ -1,110 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful mixed bulk write", - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 4, - "x": 44 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 3 - }, - "update": { - "$set": { - "x": 333 - } - } - } - } - ] - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4, - "x": 44 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "insert" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 3 - }, - "u": { - "$set": { - "x": 333 - } - } - } - ], - "ordered": true - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "update" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/command.json b/test/command_monitoring/legacy/command.json deleted file mode 100644 index 7e1e347be0..0000000000 --- a/test/command_monitoring/legacy/command.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful command", - "operation": { - "name": "count", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "count": "test", - "query": { - "_id": 1 - } - }, - "command_name": "count", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "count" - } - } - ] - }, - { - "description": "A failed command event", - "operation": { - "name": "count", - "arguments": { - "filter": { - "$or": true - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "count": "test", - "query": { - "$or": true - } - }, - "command_name": "count", - "database_name": "command-monitoring-tests" - } - }, - { - "command_failed_event": { - "command_name": "count" - } - } - ] - }, - { - "description": "A successful command with a non-primary read preference", - "operation": { - "name": "count", - "arguments": { - "filter": { - "_id": 1 - } - }, - "read_preference": { - "mode": "primaryPreferred" - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "count": "test", - "query": { - "_id": 1 - } - }, - "command_name": "count", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "count" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/deleteMany.json b/test/command_monitoring/legacy/deleteMany.json deleted file mode 100644 index 7cd396806c..0000000000 --- a/test/command_monitoring/legacy/deleteMany.json +++ /dev/null @@ -1,115 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful delete many", - "operation": { - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "limit": 0 - } - ], - "ordered": true - }, - "command_name": "delete", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 2 - }, - "command_name": "delete" - } - } - ] - }, - { - "description": "A successful delete many command with write errors", - "operation": { - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$nothing": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$nothing": 1 - } - }, - "limit": 0 - } - ], - "ordered": true - }, - "command_name": "delete", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "delete" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/deleteOne.json b/test/command_monitoring/legacy/deleteOne.json deleted file mode 100644 index 0971dfcf2c..0000000000 --- a/test/command_monitoring/legacy/deleteOne.json +++ /dev/null @@ -1,115 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful delete one", - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "limit": 1 - } - ], - "ordered": true - }, - "command_name": "delete", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "delete" - } - } - ] - }, - { - "description": "A successful delete one command with write errors", - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": { - "$nothing": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$nothing": 1 - } - }, - "limit": 1 - } - ], - "ordered": true - }, - "command_name": "delete", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "delete" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/find.json b/test/command_monitoring/legacy/find.json deleted file mode 100644 index e2bb95306f..0000000000 --- a/test/command_monitoring/legacy/find.json +++ /dev/null @@ -1,559 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "namespace": "command-monitoring-tests.test", - "tests": [ - { - "description": "A successful find event with no options", - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": 1 - } - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "0" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 1, - "x": 11 - } - ] - } - }, - "command_name": "find" - } - } - ] - }, - { - "description": "A successful find event with options", - "operation": { - "name": "find", - "read_preference": { - "mode": "primaryPreferred" - }, - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "sort": { - "_id": 1 - }, - "skip": { - "$numberLong": "2" - }, - "comment": "test", - "hint": { - "_id": 1 - }, - "max": { - "_id": 6 - }, - "maxTimeMS": 6000, - "min": { - "_id": 0 - }, - "returnKey": false, - "showRecordId": false - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": { - "$gt": 1 - } - }, - "sort": { - "_id": 1 - }, - "skip": { - "$numberLong": "2" - }, - "comment": "test", - "hint": { - "_id": 1 - }, - "max": { - "_id": 6 - }, - "maxTimeMS": 6000, - "min": { - "_id": 0 - }, - "returnKey": false, - "showRecordId": false - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "0" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ] - } - }, - "command_name": "find" - } - } - ] - }, - { - "description": "A successful find event with a getmore", - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - } - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "42" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": { - "$numberLong": "3" - } - }, - "command_name": "getMore", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "0" - }, - "ns": "command-monitoring-tests.test", - "nextBatch": [ - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ] - } - }, - "command_name": "getMore" - } - } - ] - }, - { - "description": "A successful find event with a getmore and killcursors", - "ignore_if_server_version_greater_than": "3.0", - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - }, - "limit": { - "$numberLong": "4" - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - }, - "limit": { - "$numberLong": "4" - } - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "42" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": { - "$numberLong": "1" - } - }, - "command_name": "getMore", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "42" - }, - "ns": "command-monitoring-tests.test", - "nextBatch": [ - { - "_id": 4, - "x": 44 - } - ] - } - }, - "command_name": "getMore" - } - }, - { - "command_started_event": { - "command": { - "killCursors": "test", - "cursors": [ - { - "$numberLong": "42" - } - ] - }, - "command_name": "killCursors", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursorsUnknown": [ - { - "$numberLong": "42" - } - ] - }, - "command_name": "killCursors" - } - } - ] - }, - { - "description": "A successful find event with a getmore and the server kills the cursor (<= 4.4)", - "ignore_if_server_version_less_than": "3.1", - "ignore_if_server_version_greater_than": "4.4", - "ignore_if_topology_type": [ - "sharded" - ], - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - }, - "limit": { - "$numberLong": "4" - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - }, - "limit": { - "$numberLong": "4" - } - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "42" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": { - "$numberLong": "1" - } - }, - "command_name": "getMore", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "0" - }, - "ns": "command-monitoring-tests.test", - "nextBatch": [ - { - "_id": 4, - "x": 44 - } - ] - } - }, - "command_name": "getMore" - } - } - ] - }, - { - "description": "A failed find event", - "operation": { - "name": "find", - "arguments": { - "filter": { - "$or": true - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "$or": true - } - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_failed_event": { - "command_name": "find" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/insertMany.json b/test/command_monitoring/legacy/insertMany.json deleted file mode 100644 index 0becf928e4..0000000000 --- a/test/command_monitoring/legacy/insertMany.json +++ /dev/null @@ -1,145 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful insert many", - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - } - ] - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2, - "x": 22 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "insert" - } - } - ] - }, - { - "description": "A successful insert many command with write errors", - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 1, - "x": 11 - } - ] - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1, - "x": 11 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "insert" - } - } - ] - }, - { - "description": "A successful unordered insert many", - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - } - ], - "options": { - "ordered": false - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2, - "x": 22 - } - ], - "ordered": false - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "insert" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/insertOne.json b/test/command_monitoring/legacy/insertOne.json deleted file mode 100644 index 877bca1a61..0000000000 --- a/test/command_monitoring/legacy/insertOne.json +++ /dev/null @@ -1,97 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful insert one", - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 2, - "x": 22 - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2, - "x": 22 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "insert" - } - } - ] - }, - { - "description": "A successful insert one command with write errors", - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "x": 11 - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1, - "x": 11 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "insert" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/unacknowledgedBulkWrite.json b/test/command_monitoring/legacy/unacknowledgedBulkWrite.json deleted file mode 100644 index ae116289eb..0000000000 --- a/test/command_monitoring/legacy/unacknowledgedBulkWrite.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "collection_name": "test-unacknowledged-bulk-write", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful unordered bulk write with an unacknowledged write concern", - "comment": "On a 2.4 server, no GLE is sent and requires a client-side manufactured reply", - "operation": { - "name": "bulkWrite", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": "unorderedBulkWriteInsertW0", - "x": 44 - } - } - } - ], - "options": { - "ordered": false - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test-unacknowledged-bulk-write", - "documents": [ - { - "_id": "unorderedBulkWriteInsertW0", - "x": 44 - } - ], - "ordered": false, - "writeConcern": { - "w": 0 - } - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1 - }, - "command_name": "insert" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/updateMany.json b/test/command_monitoring/legacy/updateMany.json deleted file mode 100644 index d82792fc4e..0000000000 --- a/test/command_monitoring/legacy/updateMany.json +++ /dev/null @@ -1,135 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful update many", - "operation": { - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$inc": { - "x": 1 - } - }, - "multi": true - } - ] - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 2 - }, - "command_name": "update" - } - } - ] - }, - { - "description": "A successful update many command with write errors", - "operation": { - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$nothing": { - "x": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$nothing": { - "x": 1 - } - }, - "multi": true - } - ] - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "update" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/updateOne.json b/test/command_monitoring/legacy/updateOne.json deleted file mode 100644 index ba41dbb0c0..0000000000 --- a/test/command_monitoring/legacy/updateOne.json +++ /dev/null @@ -1,190 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful update one", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$inc": { - "x": 1 - } - } - } - ] - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "update" - } - } - ] - }, - { - "description": "A successful update one with upsert when the upserted id is not an object id", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": 4 - }, - "u": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - ] - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1, - "upserted": [ - { - "index": 0, - "_id": 4 - } - ] - }, - "command_name": "update" - } - } - ] - }, - { - "description": "A successful update one command with write errors", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$nothing": { - "x": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$nothing": { - "x": 1 - } - } - } - ] - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "update" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/unified/redacted-commands.json b/test/command_monitoring/redacted-commands.json similarity index 97% rename from test/command_monitoring/unified/redacted-commands.json rename to test/command_monitoring/redacted-commands.json index 0f85dc3e94..4302ba8900 100644 --- a/test/command_monitoring/unified/redacted-commands.json +++ b/test/command_monitoring/redacted-commands.json @@ -162,6 +162,11 @@ }, { "description": "getnonce", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], "operations": [ { "name": "runCommand", @@ -293,6 +298,11 @@ }, { "description": "copydbgetnonce", + "runOnRequirements": [ + { + "maxServerVersion": "3.6.99" + } + ], "operations": [ { "name": "runCommand", @@ -328,6 +338,11 @@ }, { "description": "copydbsaslstart", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], "operations": [ { "name": "runCommand", @@ -363,6 +378,11 @@ }, { "description": "copydb", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], "operations": [ { "name": "runCommand", diff --git a/test/command_monitoring/unacknowledgedBulkWrite.json b/test/command_monitoring/unacknowledgedBulkWrite.json new file mode 100644 index 0000000000..4c16d6df11 --- /dev/null +++ b/test/command_monitoring/unacknowledgedBulkWrite.json @@ -0,0 +1,108 @@ +{ + "description": "unacknowledgedBulkWrite", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful unordered bulk write with an unacknowledged write concern", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": "unorderedBulkWriteInsertW0", + "x": 44 + } + } + } + ], + "ordered": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": "unorderedBulkWriteInsertW0", + "x": 44 + } + ], + "ordered": false, + "writeConcern": { + "w": 0 + } + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": { + "$$exists": false + } + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/updateMany.json b/test/command_monitoring/updateMany.json new file mode 100644 index 0000000000..b15434226c --- /dev/null +++ b/test/command_monitoring/updateMany.json @@ -0,0 +1,188 @@ +{ + "description": "updateMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful updateMany", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": true + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 2 + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "A successful updateMany with write errors", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$unsupported": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$unsupported": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": true + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/updateOne.json b/test/command_monitoring/updateOne.json new file mode 100644 index 0000000000..a0ae99e88d --- /dev/null +++ b/test/command_monitoring/updateOne.json @@ -0,0 +1,260 @@ +{ + "description": "updateOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful updateOne", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "A successful updateOne with upsert where the upserted id is not an ObjectId", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": true, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1, + "upserted": [ + { + "index": 0, + "_id": 4 + } + ] + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "A successful updateOne with write errors", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$unsupported": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$unsupported": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/test_command_monitoring_unified.py b/test/test_command_monitoring.py similarity index 95% rename from test/test_command_monitoring_unified.py rename to test/test_command_monitoring.py index 46e1e4724c..c88b7ef810 100644 --- a/test/test_command_monitoring_unified.py +++ b/test/test_command_monitoring.py @@ -28,7 +28,7 @@ globals().update( generate_test_classes( - os.path.join(_TEST_PATH, "unified"), + _TEST_PATH, module=__name__, ) ) diff --git a/test/test_command_monitoring_legacy.py b/test/test_command_monitoring_legacy.py deleted file mode 100644 index 1cc3e15cc9..0000000000 --- a/test/test_command_monitoring_legacy.py +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright 2015-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Run the command monitoring legacy-format spec tests.""" - -import os -import re -import sys - -sys.path[0:0] = [""] - -from test import client_context, unittest -from test.utils import EventListener, parse_read_preference, rs_or_single_client - -import pymongo -from bson import json_util -from pymongo import MongoClient -from pymongo.errors import OperationFailure -from pymongo.write_concern import WriteConcern - -# Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "command_monitoring") - - -def camel_to_snake(camel): - # Regex to convert CamelCase to snake_case. - snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) - return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() - - -class TestAllScenarios(unittest.TestCase): - listener: EventListener - client: MongoClient - - @classmethod - @client_context.require_connection - def setUpClass(cls): - cls.listener = EventListener() - cls.client = rs_or_single_client(event_listeners=[cls.listener]) - - @classmethod - def tearDownClass(cls): - cls.client.close() - - def tearDown(self): - self.listener.reset() - - -def create_test(scenario_def, test): - def run_scenario(self): - dbname = scenario_def["database_name"] - collname = scenario_def["collection_name"] - - coll = self.client[dbname][collname] - coll.drop() - coll.insert_many(scenario_def["data"]) - self.listener.reset() - name = camel_to_snake(test["operation"]["name"]) - if "read_preference" in test["operation"]: - coll = coll.with_options( - read_preference=parse_read_preference(test["operation"]["read_preference"]) - ) - if "collectionOptions" in test["operation"]: - colloptions = test["operation"]["collectionOptions"] - if "writeConcern" in colloptions: - concern = colloptions["writeConcern"] - coll = coll.with_options(write_concern=WriteConcern(**concern)) - - test_args = test["operation"]["arguments"] - if "options" in test_args: - options = test_args.pop("options") - test_args.update(options) - args = {} - for arg in test_args: - args[camel_to_snake(arg)] = test_args[arg] - - if name == "count": - self.skipTest("PyMongo does not support count") - elif name == "bulk_write": - bulk_args = [] - for request in args["requests"]: - opname = request["name"] - klass = opname[0:1].upper() + opname[1:] - arg = getattr(pymongo, klass)(**request["arguments"]) - bulk_args.append(arg) - try: - coll.bulk_write(bulk_args, args.get("ordered", True)) - except OperationFailure: - pass - elif name == "find": - if "sort" in args: - args["sort"] = list(args["sort"].items()) - if "hint" in args: - args["hint"] = list(args["hint"].items()) - for arg in "skip", "limit": - if arg in args: - args[arg] = int(args[arg]) - try: - # Iterate the cursor. - tuple(coll.find(**args)) - except OperationFailure: - pass - else: - try: - getattr(coll, name)(**args) - except OperationFailure: - pass - - started_events = self.listener.started_events - succeeded_events = self.listener.succeeded_events - failed_events = self.listener.failed_events - for expectation in test["expectations"]: - event_type = next(iter(expectation)) - if event_type == "command_started_event": - event = started_events[0] if len(started_events) else None - if event is not None: - # The tests substitute 42 for any number other than 0. - if event.command_name == "getMore" and event.command["getMore"]: - event.command["getMore"] = 42 - elif event.command_name == "killCursors": - event.command["cursors"] = [42] - elif event.command_name == "update": - # TODO: remove this once PYTHON-1744 is done. - # Add upsert and multi fields back into - # expectations. - updates = expectation[event_type]["command"]["updates"] - for update in updates: - update.setdefault("upsert", False) - update.setdefault("multi", False) - elif event_type == "command_succeeded_event": - event = succeeded_events.pop(0) if len(succeeded_events) else None - if event is not None: - reply = event.reply - # The tests substitute 42 for any number other than 0, - # and "" for any error message. - if "writeErrors" in reply: - for doc in reply["writeErrors"]: - # Remove any new fields the server adds. The tests - # only have index, code, and errmsg. - diff = set(doc) - set(["index", "code", "errmsg"]) - for field in diff: - doc.pop(field) - doc["code"] = 42 - doc["errmsg"] = "" - elif "cursor" in reply: - if reply["cursor"]["id"]: - reply["cursor"]["id"] = 42 - elif event.command_name == "killCursors": - # Make the tests continue to pass when the killCursors - # command is actually in use. - if "cursorsKilled" in reply: - reply.pop("cursorsKilled") - reply["cursorsUnknown"] = [42] - # Found succeeded event. Pop related started event. - started_events.pop(0) - elif event_type == "command_failed_event": - event = failed_events.pop(0) if len(failed_events) else None - if event is not None: - # Found failed event. Pop related started event. - started_events.pop(0) - else: - self.fail("Unknown event type") - - if event is None: - event_name = event_type.split("_")[1] - self.fail( - "Expected %s event for %s command. Actual " - "results:\n%s" - % ( - event_name, - expectation[event_type]["command_name"], - "\n".join(str(e) for e in self.listener.events), - ) - ) - - for attr, expected in expectation[event_type].items(): - if "options" in expected: - options = expected.pop("options") - expected.update(options) - actual = getattr(event, attr) - if isinstance(expected, dict): - for key, val in expected.items(): - self.assertEqual(val, actual[key]) - else: - self.assertEqual(actual, expected) - - return run_scenario - - -def create_tests(): - for dirpath, _, filenames in os.walk(os.path.join(_TEST_PATH, "legacy")): - dirname = os.path.split(dirpath)[-1] - for filename in filenames: - with open(os.path.join(dirpath, filename)) as scenario_stream: - scenario_def = json_util.loads(scenario_stream.read()) - assert bool(scenario_def.get("tests")), "tests cannot be empty" - # Construct test from scenario. - for test in scenario_def["tests"]: - new_test = create_test(scenario_def, test) - if "ignore_if_server_version_greater_than" in test: - version = test["ignore_if_server_version_greater_than"] - ver = tuple(int(elt) for elt in version.split(".")) - new_test = client_context.require_version_max(*ver)(new_test) - if "ignore_if_server_version_less_than" in test: - version = test["ignore_if_server_version_less_than"] - ver = tuple(int(elt) for elt in version.split(".")) - new_test = client_context.require_version_min(*ver)(new_test) - if "ignore_if_topology_type" in test: - types = set(test["ignore_if_topology_type"]) - if "sharded" in types: - new_test = client_context.require_no_mongos(None)(new_test) - - test_name = "test_%s_%s_%s" % ( - dirname, - os.path.splitext(filename)[0], - str(test["description"].replace(" ", "_")), - ) - new_test.__name__ = test_name - setattr(TestAllScenarios, new_test.__name__, new_test) - - -create_tests() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_monitoring.py b/test/test_monitoring.py index ffa535eeed..39b3d2f896 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -1078,6 +1078,7 @@ def test_first_batch_helper(self): self.listener.reset() + @client_context.require_version_max(6, 1, 99) def test_sensitive_commands(self): listeners = self.client._event_listeners From 57f757b74c7d0bfac3d0ef8e9e8df4f37edd7018 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 17 Jan 2023 23:14:26 -0800 Subject: [PATCH 0321/1588] PYTHON-3446 Do not connect to mongocryptd if shared library is loaded (#1136) --- test/test_encryption.py | 61 ++++++++++++++++++++++++++++++++--------- 1 file changed, 48 insertions(+), 13 deletions(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index 3c422b8c87..35dea51885 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -19,11 +19,13 @@ import os import re import socket +import socketserver import ssl import sys import textwrap import traceback import uuid +from threading import Thread from typing import Any, Dict, Mapping from pymongo.collection import Collection @@ -730,6 +732,11 @@ def create_key_vault(vault, *data_keys): vault.drop() if data_keys: vault.insert_many(data_keys) + vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) return vault @@ -1786,10 +1793,7 @@ class TestDecryptProse(EncryptionIntegrationTest): def setUp(self): self.client = client_context.client self.client.db.drop_collection("decryption_events") - self.client.keyvault.drop_collection("datakeys") - self.client.keyvault.datakeys.create_index( - "keyAltNames", unique=True, partialFilterExpression={"keyAltNames": {"$exists": True}} - ) + create_key_vault(self.client.keyvault.datakeys) kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} self.client_encryption = ClientEncryption( @@ -1912,12 +1916,9 @@ def test_bypassAutoEncryption(self): @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") def test_via_loading_shared_library(self): - key_vault = client_context.client.keyvault.datakeys - key_vault.drop() - key_vault.create_index( - "keyAltNames", unique=True, partialFilterExpression={"keyAltNames": {"$exists": True}} + create_key_vault( + client_context.client.keyvault.datakeys, json_data("external", "external-key.json") ) - key_vault.insert_one(json_data("external", "external-key.json")) schemas = {"db.coll": json_data("external", "external-schema.json")} opts = AutoEncryptionOpts( kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, @@ -1942,6 +1943,43 @@ def test_via_loading_shared_library(self): with self.assertRaises(ServerSelectionTimeoutError): no_mongocryptd_client.db.command("ping") + # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#20-bypass-creating-mongocryptd-client-when-shared-library-is-loaded + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + def test_client_via_loading_shared_library(self): + connection_established = False + + class Handler(socketserver.BaseRequestHandler): + def handle(self): + nonlocal connection_established + connection_established = True + + server = socketserver.TCPServer(("localhost", 47021), Handler) + + def listener(): + with server: + server.serve_forever(poll_interval=0.05) # Short poll timeout to speed up the test + + listener_t = Thread(target=listener) + listener_t.start() + create_key_vault( + client_context.client.keyvault.datakeys, json_data("external", "external-key.json") + ) + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + schema_map=schemas, + mongocryptd_uri="mongodb://localhost:47021", + crypt_shared_lib_required=False, + ) + client_encrypted = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(client_encrypted.close) + client_encrypted.db.coll.drop() + client_encrypted.db.coll.insert_one({"encrypted": "test"}) + server.shutdown() + listener_t.join() + self.assertFalse(connection_established, "a connection was established on port 47021") + # https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#kms-tls-tests class TestKmsTLSProse(EncryptionIntegrationTest): @@ -2112,10 +2150,7 @@ def test_04_kmip(self): class TestUniqueIndexOnKeyAltNamesProse(EncryptionIntegrationTest): def setUp(self): self.client = client_context.client - self.client.keyvault.drop_collection("datakeys") - self.client.keyvault.datakeys.create_index( - "keyAltNames", unique=True, partialFilterExpression={"keyAltNames": {"$exists": True}} - ) + create_key_vault(self.client.keyvault.datakeys) kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} self.client_encryption = ClientEncryption( kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() From 124dee66c3df47883b85df82b34f740ce35f69e1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 18 Jan 2023 13:39:38 -0600 Subject: [PATCH 0322/1588] PYTHON-3565 The docs page appears to be missing a component reference (#1137) --- pymongo/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 789df62071..a3bdb4c163 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -85,7 +85,7 @@ """ from pymongo import _csot -from pymongo._version import __version__, get_version_string, version, version_tuple +from pymongo._version import __version__, get_version_string, version_tuple from pymongo.collection import ReturnDocument from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION from pymongo.cursor import CursorType @@ -102,6 +102,9 @@ from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern +version = __version__ +"""Current version of PyMongo.""" + def has_c() -> bool: """Is the C extension installed?""" From ec074010d81f72826fdba230ef6cda1bbf034a27 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 24 Jan 2023 14:38:48 -0800 Subject: [PATCH 0323/1588] PYTHON-3523 Resync unified test format tests for getnonce (#1141) --- test/test_auth.py | 4 +++- .../valid-pass/observeSensitiveCommands.json | 15 +++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/test/test_auth.py b/test/test_auth.py index 9d80f06c00..7db2247746 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -581,7 +581,9 @@ def test_scram_threaded(self): coll.insert_one({"_id": 1}) # The first thread to call find() will authenticate - coll = rs_or_single_client().db.test + client = rs_or_single_client() + self.addCleanup(client.close) + coll = client.db.test threads = [] for _ in range(4): threads.append(AutoAuthenticateThread(coll)) diff --git a/test/unified-test-format/valid-pass/observeSensitiveCommands.json b/test/unified-test-format/valid-pass/observeSensitiveCommands.json index 411ca19c5d..d3ae5665be 100644 --- a/test/unified-test-format/valid-pass/observeSensitiveCommands.json +++ b/test/unified-test-format/valid-pass/observeSensitiveCommands.json @@ -61,6 +61,11 @@ "tests": [ { "description": "getnonce is observed with observeSensitiveCommands=true", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], "operations": [ { "name": "runCommand", @@ -106,6 +111,11 @@ }, { "description": "getnonce is not observed with observeSensitiveCommands=false", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], "operations": [ { "name": "runCommand", @@ -127,6 +137,11 @@ }, { "description": "getnonce is not observed by default", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], "operations": [ { "name": "runCommand", From d3117ce75dfe86fd6a7ab2380759f4efaa9cfb3f Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 24 Jan 2023 15:33:56 -0800 Subject: [PATCH 0324/1588] PYTHON-3280 Support for Range Indexes (#1140) --- pymongo/encryption.py | 134 ++++++++++-- pymongo/encryption_options.py | 44 +++- .../etc/data/encryptedFields-Range-Date.json | 36 ++++ .../data/encryptedFields-Range-Decimal.json | 26 +++ ...ncryptedFields-Range-DecimalPrecision.json | 35 ++++ .../data/encryptedFields-Range-Double.json | 26 +++ ...encryptedFields-Range-DoublePrecision.json | 35 ++++ .../etc/data/encryptedFields-Range-Int.json | 32 +++ .../etc/data/encryptedFields-Range-Long.json | 32 +++ .../etc/data/range-encryptedFields-Date.json | 30 +++ ...ge-encryptedFields-DecimalNoPrecision.json | 21 ++ ...ange-encryptedFields-DecimalPrecision.json | 29 +++ ...nge-encryptedFields-DoubleNoPrecision.json | 21 ++ ...range-encryptedFields-DoublePrecision.json | 30 +++ .../etc/data/range-encryptedFields-Int.json | 27 +++ .../etc/data/range-encryptedFields-Long.json | 27 +++ test/test_encryption.py | 197 +++++++++++++++++- 17 files changed, 759 insertions(+), 23 deletions(-) create mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Date.json create mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Decimal.json create mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-DecimalPrecision.json create mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Double.json create mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-DoublePrecision.json create mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Int.json create mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Long.json create mode 100644 test/client-side-encryption/etc/data/range-encryptedFields-Date.json create mode 100644 test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json create mode 100644 test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json create mode 100644 test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json create mode 100644 test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json create mode 100644 test/client-side-encryption/etc/data/range-encryptedFields-Int.json create mode 100644 test/client-side-encryption/etc/data/range-encryptedFields-Long.json diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 92a268f452..8b51863f96 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -41,7 +41,7 @@ from pymongo import _csot from pymongo.cursor import Cursor from pymongo.daemon import _spawn_daemon -from pymongo.encryption_options import AutoEncryptionOpts +from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts from pymongo.errors import ( ConfigurationError, EncryptionError, @@ -416,6 +416,14 @@ class Algorithm(str, enum.Enum): .. versionadded:: 4.2 """ + RANGEPREVIEW = "RangePreview" + """RangePreview. + + .. note:: Support for Range queries is in beta. + Backwards-breaking changes may be made before the final release. + + .. versionadded:: 4.4 + """ class QueryType(str, enum.Enum): @@ -430,6 +438,9 @@ class QueryType(str, enum.Enum): EQUALITY = "equality" """Used to encrypt a value for an equality query.""" + RANGEPREVIEW = "rangePreview" + """Used to encrypt a value for a range query.""" + class ClientEncryption(Generic[_DocumentType]): """Explicit client-side field level encryption.""" @@ -627,6 +638,45 @@ def create_data_key( key_material=key_material, ) + def _encrypt_helper( + self, + value, + algorithm, + key_id=None, + key_alt_name=None, + query_type=None, + contention_factor=None, + range_opts=None, + is_expression=False, + ): + self._check_closed() + if key_id is not None and not ( + isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE + ): + raise TypeError("key_id must be a bson.binary.Binary with subtype 4") + + doc = encode( + {"v": value}, + codec_options=self._codec_options, + ) + if range_opts: + range_opts = encode( + range_opts.document, + codec_options=self._codec_options, + ) + with _wrap_encryption_errors(): + encrypted_doc = self._encryption.encrypt( + value=doc, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=is_expression, + ) + return decode(encrypted_doc)["v"] # type: ignore[index] + def encrypt( self, value: Any, @@ -635,6 +685,7 @@ def encrypt( key_alt_name: Optional[str] = None, query_type: Optional[str] = None, contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, ) -> Binary: """Encrypt a BSON value with a given key and algorithm. @@ -655,10 +706,10 @@ def encrypt( when the algorithm is :attr:`Algorithm.INDEXED`. An integer value *must* be given when the :attr:`Algorithm.INDEXED` algorithm is used. + - `range_opts`: **(BETA)** An instance of RangeOpts. - .. note:: `query_type` and `contention_factor` are part of the - Queryable Encryption beta. Backwards-breaking changes may be made before the - final release. + .. note:: `query_type`, `contention_factor` and `range_opts` are part of the Queryable Encryption beta. + Backwards-breaking changes may be made before the final release. :Returns: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. @@ -667,23 +718,66 @@ def encrypt( Added the `query_type` and `contention_factor` parameters. """ - self._check_closed() - if key_id is not None and not ( - isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE - ): - raise TypeError("key_id must be a bson.binary.Binary with subtype 4") + return self._encrypt_helper( + value=value, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=False, + ) - doc = encode({"v": value}, codec_options=self._codec_options) - with _wrap_encryption_errors(): - encrypted_doc = self._encryption.encrypt( - doc, - algorithm, - key_id=key_id, - key_alt_name=key_alt_name, - query_type=query_type, - contention_factor=contention_factor, - ) - return decode(encrypted_doc)["v"] # type: ignore[index] + def encrypt_expression( + self, + expression: Mapping[str, Any], + algorithm: str, + key_id: Optional[Binary] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + ) -> RawBSONDocument: + """Encrypt a BSON expression with a given key and algorithm. + + Note that exactly one of ``key_id`` or ``key_alt_name`` must be + provided. + + :Parameters: + - `expression`: **(BETA)** The BSON aggregate or match expression to encrypt. + - `algorithm` (string): The encryption algorithm to use. See + :class:`Algorithm` for some valid options. + - `key_id`: Identifies a data key by ``_id`` which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + - `key_alt_name`: Identifies a key vault document by 'keyAltName'. + - `query_type` (str): **(BETA)** The query type to execute. See + :class:`QueryType` for valid options. + - `contention_factor` (int): **(BETA)** The contention factor to use + when the algorithm is :attr:`Algorithm.INDEXED`. An integer value + *must* be given when the :attr:`Algorithm.INDEXED` algorithm is + used. + - `range_opts`: **(BETA)** An instance of RangeOpts. + + .. note:: Support for range queries is in beta. + Backwards-breaking changes may be made before the final release. + + :Returns: + The encrypted expression, a :class:`~bson.RawBSONDocument`. + + .. versionadded:: 4.4 + """ + return self._encrypt_helper( + value=expression, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=True, + ) def decrypt(self, value: Binary) -> Any: """Decrypt an encrypted value. diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index c5e6f47837..6c966e30cd 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -22,7 +22,7 @@ _HAVE_PYMONGOCRYPT = True except ImportError: _HAVE_PYMONGOCRYPT = False - +from bson import int64 from pymongo.common import validate_is_mapping from pymongo.errors import ConfigurationError from pymongo.uri_parser import _parse_kms_tls_options @@ -219,3 +219,45 @@ def __init__( # Maps KMS provider name to a SSLContext. self._kms_ssl_contexts = _parse_kms_tls_options(kms_tls_options) self._bypass_query_analysis = bypass_query_analysis + + +class RangeOpts: + """Options to configure encrypted queries using the rangePreview algorithm.""" + + def __init__( + self, + sparsity: int, + min: Optional[Any] = None, + max: Optional[Any] = None, + precision: Optional[int] = None, + ) -> None: + """Options to configure encrypted queries using the rangePreview algorithm. + + .. note:: Support for Range queries is in beta. + Backwards-breaking changes may be made before the final release. + + :Parameters: + - `sparsity`: An integer. + - `min`: A BSON scalar value corresponding to the type being queried. + - `max`: A BSON scalar value corresponding to the type being queried. + - `precision`: An integer, may only be set for double or decimal128 types. + + .. versionadded:: 4.4 + """ + self.min = min + self.max = max + self.sparsity = sparsity + self.precision = precision + + @property + def document(self) -> Mapping[str, Any]: + doc = {} + for k, v in [ + ("sparsity", int64.Int64(self.sparsity)), + ("precision", self.precision), + ("min", self.min), + ("max", self.max), + ]: + if v is not None: + doc[k] = v + return doc diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Date.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Date.json new file mode 100644 index 0000000000..c9ad1ffdd4 --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-Range-Date.json @@ -0,0 +1,36 @@ +{ + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Decimal.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Decimal.json new file mode 100644 index 0000000000..f209536c9c --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-Range-Decimal.json @@ -0,0 +1,26 @@ +{ + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-DecimalPrecision.json b/test/client-side-encryption/etc/data/encryptedFields-Range-DecimalPrecision.json new file mode 100644 index 0000000000..e7634152ba --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-Range-DecimalPrecision.json @@ -0,0 +1,35 @@ +{ + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Double.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Double.json new file mode 100644 index 0000000000..4e9e8d6d81 --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-Range-Double.json @@ -0,0 +1,26 @@ +{ + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-DoublePrecision.json b/test/client-side-encryption/etc/data/encryptedFields-Range-DoublePrecision.json new file mode 100644 index 0000000000..17c725ec44 --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-Range-DoublePrecision.json @@ -0,0 +1,35 @@ +{ + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Int.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Int.json new file mode 100644 index 0000000000..661d7395c5 --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-Range-Int.json @@ -0,0 +1,32 @@ +{ + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Long.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Long.json new file mode 100644 index 0000000000..b36bfb2c46 --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-Range-Long.json @@ -0,0 +1,32 @@ +{ + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Date.json b/test/client-side-encryption/etc/data/range-encryptedFields-Date.json new file mode 100644 index 0000000000..e19fc1e182 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Date.json @@ -0,0 +1,30 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json new file mode 100644 index 0000000000..c6d129d4ca --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json @@ -0,0 +1,21 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "sparsity": { + "$numberInt": "1" + } + } + } + ] + } + \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json new file mode 100644 index 0000000000..c23c3fa923 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json @@ -0,0 +1,29 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "sparsity": { + "$numberInt": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json new file mode 100644 index 0000000000..4af6422714 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json @@ -0,0 +1,21 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json new file mode 100644 index 0000000000..c1f388219d --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json @@ -0,0 +1,30 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Int.json b/test/client-side-encryption/etc/data/range-encryptedFields-Int.json new file mode 100644 index 0000000000..217bf6743c --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Int.json @@ -0,0 +1,27 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Long.json b/test/client-side-encryption/etc/data/range-encryptedFields-Long.json new file mode 100644 index 0000000000..0fb87edaef --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Long.json @@ -0,0 +1,27 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + \ No newline at end of file diff --git a/test/test_encryption.py b/test/test_encryption.py index 35dea51885..fc6d62c727 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -59,7 +59,7 @@ ) from test.utils_spec_runner import SpecRunner -from bson import encode, json_util +from bson import DatetimeMS, Decimal128, encode, json_util from bson.binary import UUID_SUBTYPE, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.errors import BSONError @@ -68,7 +68,7 @@ from pymongo import encryption from pymongo.cursor import CursorType from pymongo.encryption import Algorithm, ClientEncryption, QueryType -from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts, RangeOpts from pymongo.errors import ( AutoReconnect, BulkWriteError, @@ -2494,5 +2494,198 @@ def MongoClient(**kwargs): client_encryption.close() +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#range-explicit-encryption +class TestRangeQueryProse(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(6, 2, -1) + def setUp(self): + super().setUp() + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + self.client.drop_database(self.db) + key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(key_vault.drop) + self.key_vault_client = self.client + self.client_encryption = ClientEncryption( + {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS + ) + self.addCleanup(self.client_encryption.close) + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + key_vault.full_name, + bypass_query_analysis=True, + ) + self.encrypted_client = rs_or_single_client(auto_encryption_opts=opts) + self.db = self.encrypted_client.db + self.addCleanup(self.encrypted_client.close) + + def run_expression_find(self, name, expression, expected_elems, range_opts, use_expr=False): + find_payload = self.client_encryption.encrypt_expression( + expression=expression, + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + query_type=QueryType.RANGEPREVIEW, + contention_factor=0, + range_opts=range_opts, + ) + if use_expr: + find_payload = {"$expr": find_payload} + sorted_find = sorted( + self.encrypted_client.db.explicit_encryption.find(find_payload), key=lambda x: x["_id"] + ) + for elem, expected in zip(sorted_find, expected_elems): + self.assertEqual(elem[f"encrypted{name}"], expected) + + def run_test_cases(self, name, range_opts, cast_func): + encrypted_fields = json_data("etc", "data", f"range-encryptedFields-{name}.json") + self.db.drop_collection("explicit_encryption", encrypted_fields=encrypted_fields) + self.db.create_collection("explicit_encryption", encryptedFields=encrypted_fields) + + def encrypt_and_cast(i): + return self.client_encryption.encrypt( + cast_func(i), + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + contention_factor=0, + range_opts=range_opts, + ) + + for elem in [{f"encrypted{name}": encrypt_and_cast(i)} for i in [0, 6, 30, 200]]: + self.encrypted_client.db.explicit_encryption.insert_one(elem) + + # Case 1. + insert_payload = self.client_encryption.encrypt( + cast_func(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + contention_factor=0, + range_opts=range_opts, + ) + self.assertEqual(self.client_encryption.decrypt(insert_payload), cast_func(6)) + + # Case 2. + self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gte": cast_func(6)}}, + {f"encrypted{name}": {"$lte": cast_func(200)}}, + ] + }, + [cast_func(i) for i in [6, 30, 200]], + range_opts, + ) + + # Case 3. + self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gte": cast_func(0)}}, + {f"encrypted{name}": {"$lte": cast_func(6)}}, + ] + }, + [cast_func(i) for i in [0, 6]], + range_opts, + ) + + # Case 4. + self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gt": cast_func(30)}}, + ] + }, + [cast_func(i) for i in [200]], + range_opts, + ) + + # Case 5. + self.run_expression_find( + name, + {"$and": [{"$lt": [f"$encrypted{name}", cast_func(30)]}]}, + [cast_func(i) for i in [0, 6]], + range_opts, + use_expr=True, + ) + + # The spec says to skip the following tests for no precision decimal or double types. + if name not in ("DoubleNoPrecision", "DecimalNoPrecision"): + # Case 6. + with self.assertRaisesRegex( + EncryptionError, + "greater than or equal to the minimum value and less than or equal to the maximum value", + ): + self.client_encryption.encrypt( + cast_func(201), + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + contention_factor=0, + range_opts=range_opts, + ) + + # Case 7. + with self.assertRaisesRegex( + EncryptionError, "expected matching 'min' and value type. Got range option" + ): + self.client_encryption.encrypt( + int(6) if cast_func != int else float(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + contention_factor=0, + range_opts=range_opts, + ) + + # Case 8. + # The spec says we must additionally not run this case with any precision type, not just the ones above. + if "Precision" not in name: + with self.assertRaisesRegex( + EncryptionError, + "expected 'precision' to be set with double or decimal128 index, but got:", + ): + self.client_encryption.encrypt( + cast_func(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + contention_factor=0, + range_opts=RangeOpts( + min=cast_func(0), max=cast_func(200), sparsity=1, precision=2 + ), + ) + + def test_double_no_precision(self): + self.run_test_cases("DoubleNoPrecision", RangeOpts(sparsity=1), float) + + def test_double_precision(self): + self.run_test_cases( + "DoublePrecision", + RangeOpts(min=0.0, max=200.0, sparsity=1, precision=2), + float, + ) + + def test_decimal_no_precision(self): + self.run_test_cases( + "DecimalNoPrecision", RangeOpts(sparsity=1), lambda x: Decimal128(str(x)) + ) + + def test_decimal_precision(self): + self.run_test_cases( + "DecimalPrecision", + RangeOpts(min=Decimal128("0.0"), max=Decimal128("200.0"), sparsity=1, precision=2), + lambda x: Decimal128(str(x)), + ) + + def test_datetime(self): + self.run_test_cases( + "Date", + RangeOpts(min=DatetimeMS(0), max=DatetimeMS(200), sparsity=1), + lambda x: DatetimeMS(x).as_datetime(), + ) + + def test_int(self): + self.run_test_cases("Int", RangeOpts(min=0, max=200, sparsity=1), int) + + if __name__ == "__main__": unittest.main() From 2b21e7359f4dd0ad9a30bf919e8bb4114da6f522 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 24 Jan 2023 21:40:18 -0800 Subject: [PATCH 0325/1588] PYTHON-3582 Add specification tests for range indexes (#1142) --- .../spec/legacy/fle2-BypassQueryAnalysis.json | 3 +- .../spec/legacy/fle2-Compact.json | 3 +- .../spec/legacy/fle2-CreateCollection.json | 3 +- .../spec/legacy/fle2-DecryptExistingData.json | 3 +- .../spec/legacy/fle2-Delete.json | 3 +- ...EncryptedFields-vs-EncryptedFieldsMap.json | 3 +- .../fle2-EncryptedFields-vs-jsonSchema.json | 3 +- .../fle2-EncryptedFieldsMap-defaults.json | 3 +- .../spec/legacy/fle2-FindOneAndUpdate.json | 3 +- .../spec/legacy/fle2-InsertFind-Indexed.json | 3 +- .../legacy/fle2-InsertFind-Unindexed.json | 3 +- .../spec/legacy/fle2-MissingKey.json | 3 +- .../spec/legacy/fle2-NoEncryption.json | 3 +- .../legacy/fle2-Range-Date-Aggregate.json | 514 +++++ .../legacy/fle2-Range-Date-Correctness.json | 1842 ++++++++++++++++ .../spec/legacy/fle2-Range-Date-Delete.json | 459 ++++ .../fle2-Range-Date-FindOneAndUpdate.json | 538 +++++ .../legacy/fle2-Range-Date-InsertFind.json | 505 +++++ .../spec/legacy/fle2-Range-Date-Update.json | 540 +++++ .../legacy/fle2-Range-Decimal-Aggregate.json | 1908 ++++++++++++++++ .../fle2-Range-Decimal-Correctness.json | 1158 ++++++++++ .../legacy/fle2-Range-Decimal-Delete.json | 1133 ++++++++++ .../fle2-Range-Decimal-FindOneAndUpdate.json | 1930 ++++++++++++++++ .../legacy/fle2-Range-Decimal-InsertFind.json | 1899 ++++++++++++++++ .../legacy/fle2-Range-Decimal-Update.json | 1934 +++++++++++++++++ ...fle2-Range-DecimalPrecision-Aggregate.json | 590 +++++ ...e2-Range-DecimalPrecision-Correctness.json | 1650 ++++++++++++++ .../fle2-Range-DecimalPrecision-Delete.json | 493 +++++ ...nge-DecimalPrecision-FindOneAndUpdate.json | 612 ++++++ ...le2-Range-DecimalPrecision-InsertFind.json | 577 +++++ .../fle2-Range-DecimalPrecision-Update.json | 612 ++++++ .../legacy/fle2-Range-Double-Aggregate.json | 1138 ++++++++++ .../legacy/fle2-Range-Double-Correctness.json | 1160 ++++++++++ .../spec/legacy/fle2-Range-Double-Delete.json | 749 +++++++ .../fle2-Range-Double-FindOneAndUpdate.json | 1160 ++++++++++ .../legacy/fle2-Range-Double-InsertFind.json | 1129 ++++++++++ .../spec/legacy/fle2-Range-Double-Update.json | 1164 ++++++++++ .../fle2-Range-DoublePrecision-Aggregate.json | 586 +++++ ...le2-Range-DoublePrecision-Correctness.json | 1650 ++++++++++++++ .../fle2-Range-DoublePrecision-Delete.json | 491 +++++ ...ange-DoublePrecision-FindOneAndUpdate.json | 608 ++++++ ...fle2-Range-DoublePrecision-InsertFind.json | 577 +++++ .../fle2-Range-DoublePrecision-Update.json | 612 ++++++ .../spec/legacy/fle2-Range-Int-Aggregate.json | 490 +++++ .../legacy/fle2-Range-Int-Correctness.json | 1644 ++++++++++++++ .../spec/legacy/fle2-Range-Int-Delete.json | 437 ++++ .../fle2-Range-Int-FindOneAndUpdate.json | 512 +++++ .../legacy/fle2-Range-Int-InsertFind.json | 481 ++++ .../spec/legacy/fle2-Range-Int-Update.json | 516 +++++ .../legacy/fle2-Range-Long-Aggregate.json | 490 +++++ .../legacy/fle2-Range-Long-Correctness.json | 1644 ++++++++++++++ .../spec/legacy/fle2-Range-Long-Delete.json | 437 ++++ .../fle2-Range-Long-FindOneAndUpdate.json | 512 +++++ .../legacy/fle2-Range-Long-InsertFind.json | 481 ++++ .../spec/legacy/fle2-Range-Long-Update.json | 516 +++++ .../spec/legacy/fle2-Range-WrongType.json | 162 ++ .../spec/legacy/fle2-Update.json | 3 +- ...e2-validatorAndPartialFieldExpression.json | 3 +- 58 files changed, 38270 insertions(+), 15 deletions(-) create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Date-Aggregate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Date-Correctness.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Date-Delete.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Date-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Date-InsertFind.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Date-Update.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Aggregate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Correctness.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Delete.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Decimal-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Decimal-InsertFind.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Update.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Aggregate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Correctness.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Delete.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-InsertFind.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Update.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Double-Aggregate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Double-Correctness.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Double-Delete.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Double-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Double-InsertFind.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Double-Update.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Aggregate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Correctness.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Delete.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-InsertFind.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Update.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Int-Aggregate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Int-Correctness.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Int-Delete.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Int-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Int-InsertFind.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Int-Update.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Long-Aggregate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Long-Correctness.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Long-Delete.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Long-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Long-InsertFind.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Long-Update.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-WrongType.json diff --git a/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json b/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json index 629faf189d..b8d06e8bcd 100644 --- a/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json +++ b/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-Compact.json b/test/client-side-encryption/spec/legacy/fle2-Compact.json index 46da99cbfc..6ca0f9ba02 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Compact.json +++ b/test/client-side-encryption/spec/legacy/fle2-Compact.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json b/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json index 6836f40e04..9f8db41f87 100644 --- a/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json +++ b/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json b/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json index c6d0bca0d1..e622d3334d 100644 --- a/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json +++ b/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Delete.json index 0e3e06396e..8687127748 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2-Delete.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json index ea3eb4850c..911b428633 100644 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json +++ b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json index 1d3227ee7f..f4386483da 100644 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json +++ b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json index 030952e056..60820aae95 100644 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json +++ b/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json index b31438876f..de1b5c5aad 100644 --- a/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json index 81a549590e..84b69d7de9 100644 --- a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json +++ b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json index c1bdc90760..9b31438525 100644 --- a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json +++ b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-MissingKey.json b/test/client-side-encryption/spec/legacy/fle2-MissingKey.json index 2db1cd7702..4210da09e4 100644 --- a/test/client-side-encryption/spec/legacy/fle2-MissingKey.json +++ b/test/client-side-encryption/spec/legacy/fle2-MissingKey.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json b/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json index e9dd586c26..9d255bd493 100644 --- a/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json +++ b/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Date-Aggregate.json new file mode 100644 index 0000000000..a35321cd35 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Date-Aggregate.json @@ -0,0 +1,514 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Correctness.json b/test/client-side-encryption/spec/legacy/fle2-Range-Date-Correctness.json new file mode 100644 index 0000000000..5832e85418 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Date-Correctness.json @@ -0,0 +1,1842 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lte": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + }, + "$lt": { + "$date": { + "$numberLong": "2" + } + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + }, + "$lte": { + "$date": { + "$numberLong": "200" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$in": [ + { + "$date": { + "$numberLong": "0" + } + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "-1" + } + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDate": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 200, + "encryptedDate": { + "$date": { + "$numberLong": "200" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "1" + } + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "1" + } + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lte": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "0" + } + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "200" + } + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + }, + "$lt": { + "$date": { + "$numberLong": "2" + } + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + }, + "$lte": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$in": [ + { + "$date": { + "$numberLong": "0" + } + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "errorContains": "value type is a date" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Range-Date-Delete.json new file mode 100644 index 0000000000..b5856e7620 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Date-Delete.json @@ -0,0 +1,459 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDate": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Date-FindOneAndUpdate.json new file mode 100644 index 0000000000..a59258a466 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Date-FindOneAndUpdate.json @@ -0,0 +1,538 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$date": { + "$numberLong": "2" + } + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDate": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2-Range-Date-InsertFind.json new file mode 100644 index 0000000000..4357fafeea --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Date-InsertFind.json @@ -0,0 +1,505 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Update.json b/test/client-side-encryption/spec/legacy/fle2-Range-Date-Update.json new file mode 100644 index 0000000000..fd170554f6 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Date-Update.json @@ -0,0 +1,540 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$date": { + "$numberLong": "2" + } + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDate": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDate": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Aggregate.json new file mode 100644 index 0000000000..73d2cf4892 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Aggregate.json @@ -0,0 +1,1908 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$gt": { + "$binary": { + "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RlQWwhU+uVv0a+9IB5cUkEfvHBvOw3B1Sx6WfPWMqes=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubb81XTC7U+4tcNzf1oYvOY6gR5hC2Izqx54f4GuJ0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6M4Q5NMQ9TqNnjzGOxIkiUIY8TEL0I3XD1QnhefQUqU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BtInzk9t2FFMCEY6AQ7zN8jwrrZEs2irSv6q0Q4NaIw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vxXfETu9cuBIpRBo3jUUU04mJIH/aAhLX8K6VI5Xv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXPCdS+q23zi1bkPnaVG2j0PsVtxdeSLJ//h6J1x8RU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KY3KkfBAsN2l80wbpj41G0gwBR5KmmFnZcagg7D3ENk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI8NFAxXCX4VOnY5X73K6KI/Yspd3aR94KV39MhJlAw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nFxH0UC3mATKA6Vboz+QX/hAjj19kF/SH6H5Cne7qC0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q8hYqIYaIi7nOdG/7qQZYnz8Bsacfi66M1nVku4SH08=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4saA92R4arp4anvD9xFtze+sNcQqTEhPHyl1h70A8NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DbIziOBRRyeQS6RtBR09E37LV+CTKrEjGoRMLSpG6eE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Fv80Plp/7w2gnVqrwawLd6qhJ10G4NCDm3re67cNq4Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T/T2oiQCBBES4YN7EodzPRdabZSFlYIClHBym+bQUZE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQgHD3l46Ujqtbnj1VbbeM29C9wJzOhz+yZ/7XdSrxk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ltlFKzWvyZvHxDFOYDd/XXJ6kUiJj0ln2HTCEz2o4Z4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "flW8A7bltC1u8bzx0WJtxosGJdOVsJFfbx33jxnpFGg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SXO+92QbMKwUSG2t27ciunV1c3VvFkUuDmSczpRe008=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+KioGs1GM+xRBzFE67ePTWj04KMSE5/Y6qUF7nJ5kvU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L3xNVbh6YH+RzqABN+5Jgb7T234Efpn766DmUvxIxgg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hPF+60mBYPjh21dEmPlBhKgyc9S2qLtTkypYvnqP2Fc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EletRsETy2HcjaPIm2c8CkT7ch/P3pJJDC8hasepcSU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "r5bMXUaNKqLPxZ+TG9HYTG4aSDgcpim27rN8rQFkM0w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Q7Erdr8+/S0wUEDDIqlS5XjBVWvhZY65K0uUDb6+Ns=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xEcnhXy35hbXNVBPOOt3TUHbxvKfQ48KjA9b6/rbMqQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T8bEpiQNgsEudXvyKE9SZlSvbpV/LUaslsdqgSFltyo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hIoiaF2YjnxDbODfhFEB+JGZ5nf8suD3Shck5bwQ3N0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qnA6qzejeRJ0rsZaZ0zOvKAaXyxt5lpscKQNYFZNl4k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "anAKCL2DN/le2VaP0n2ucYSEH/DaaEH/8Sa4OqTZsRA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JCZlBJaFm618oWYSnT9Jr1MtwFVw4BZjOzO+5yWgR90=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yxyk4n9762WzcDVGnTn4jCqUnSMIVCrLDIjCX1QVj34=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fDI6fdKvDJwim5/CQwWZEzcrXE3LHgy7FTtffcC7tXE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Vex+gcz5T+WkzsVZQrkqUR2ryyZbnaOGuWpYvjN0zCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8TLEXz+Gbbp6llHpZXVjLsdlYY9f6hrKpHVpyfDe0RY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fTyt5BrunypS65TfOzFW2E2qdIuT4SLeDeGlbQoJCs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8fKGrkqN0/KuSjyXgDBmRauDKrSa//JBKRWHEB9xBf4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s4codmG7uN4ss6P357jL21lazEe90M9GOK5WrOknSV0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RkSpua8XF+NUdxVDU90EbLUTTyZFX3tt3atBTroFaRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LnTCuCDyAHK5B9KXzjtwGmWB+qergQk2OCjnIx9MI2A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cBFh0virAX4pVXf/udIGI2951i0+0aZAdJcBVGtYnT4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G54X6myQXWZ5fw/G31en3QbdgfXzL9+hFTtJpnWMqDI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EdsiiuezcsFJFnYIyGjCOhnqMj1BOwTB5EFxN+ERUkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dVH9MXLtk0WTwGQ3xmrhOqfropMUkDW3o6paNPGl3NU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sB3HqXKWY3pKbuEH8BTbfNIGfbY+7/ZbOc3XC+JRNNI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WHyDk62Xhqbo4/iie2aLIM4x2uuAjv6102dJSHI58oM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pNUFuHpeNRDUZ/NrtII2c6sNc9eGR1lIUlIyXKERA+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UPa+pdCqnN0bfAptdzldQOSd01gidrDKy8KhWrpSKAI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l+7dOAlo+HUffMqFYXL6pgUFeTbwOM9CjKQLxEoLtc4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SRnDXV/rN6C8xwMutv9E1luv3DOUio3VkgPr8Cpm7Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QcH6gl+gX7xZ7OWhUNQMbndJy0Piz49pDo6RsnLkVSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "t+uL4DnfsI/Zll/KXWW1cOKX3Hu8WIkm3pt9efCVSAQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "myutHDctku/+Uug/nD8gRbYvmx/IovtoAAC2/fz2oHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6C+cjD0e0nSCP6cPqQYbNG7SlOd6Mfvi8hyfm7Ng+D8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zg01JSoOj9oBKT0S1ldJucXzY5AKgreS+h2xJreWTOs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7qQ80/FjodHl1m1py/Oii0/9C/xWbLdhaRXQ+kkCP10=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YwWMNH07vL6c5Nhg+MRnVByhzUunu8y0VLM9z/XvR5U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dle8bU98+fudAbc14SToZFkwvV3tcYVsjDug0NWljpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "J+eKL1vPJmlzltvhI6Li5Fz/TJmi3Ng+ehRTcs46API=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB3XzfFygLwC3WHkj0up+VbEd25KKoce1vOpG/5bwK4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vnVnmOnL+z2pqwE+A6cVKS0Iwy4F4/2IiElJca9bUQM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+lG5r/Fpqry3BtFuvY67+RntmHAMDoLVOSGc6ZoXPb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L5MXQertqc6uj7ADe8aWKbd1sYHPCE7P1VYVg9Zc3VI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "imKONuZgopt0bhM3GMX2WVPwQYMTobuUUEdhcLfHs4c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOkU1J1uVbiVFWBerbXsSIVcF2nqiicTkFy4x7kFHB8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gI0uDhXeoH/UatDQKEf4qo8FHzWZDhb/wuWTqbq/ID4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cOkd5Aa3btYhtojE/smsF/PJnULqQ4NNqTkU6KXTFmo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AWNJMs1MTe294oFipp8Y6P0CjpkZ4qCZoClQF3XcHq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6gJtlzXOFhGYrVbTuRMmvMlDTwXdNtR9aGBlHZPwIMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LEmwVGA/xsEG7UrcOoYLFu6KCXgijzFznenknuDacm8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mIRFPTXRrGaPtp/Ydij2jgkRe4uoUvAKxW2d8b9zYL0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+Uv2u48WALOO0L311z+eryjYQzKJVMfdHMZPhOAFmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "INXXp0wDyVCq+NtfIrrC2ciETmyW/dWB/48/u4yLEZ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "se7DGo8XrlrQDLEcco1tZrQt9kDe+0RTyl2bw/quG4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vr0m2+Zk9lbN6UgWCyn8xJWJOokU3IDYab5U5q1+CgQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XI+eJ8Gy2JktG1gICgoj1qpsfy1tKmH0kglWbaQH6DA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A+UCuNnuAUqnQzspA6TVqUPRmtZmpSex5HFw7THRxs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaH2Ehfljd19uo0Fvb3iwkdaiWEVQd2YPoitgEPkhSM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S/iZBJGcc8+qZxyMtab65MMBoSglybwk3x58Nb86gnY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w14ZE5qqY5YgkS4Zcs9YNbrQbY1XfGOOHNn9bOYnFVQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0MhGd/jEF1vjkKGp+ZMn9SjLK54jkp9W4Hg+Sp/oxaI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92QZ73e/NRTYgCm4aifaKth6aAsKnLLccBc0zx/qUTY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WOjzemCgFJOiGIp81RSVh/tFlzSTj9eFWcBnsiv2Ycs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DrsP9CmfKPjw5yLL8bnSeAxfNzAwlb+Z8OqCiKgBY7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lMogqg8veBv6mri3/drMe9afJiKMvevkmGcw9BedfLo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TxqwNcY8Tg2MPpNdkPBwvfpuTttSYRHU26DGECKYQ9o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l0u1b4b4vYACWIwfnB7PZac4oDEgjQZCzHruNPTgAIY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iVSGQ+cCfhbWIrY/v/WBORK92elu9gfRKyGhr6r/k00=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yK1forG50diEXte8ECzjfpHeYsPyuQ/dgxbxn/nzY5k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gIfTLCD3VwnOwkC0zPXWTqaITxX6ZplA69PO2a6zolc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O/Zxlgh3WqpzJ7+Sd8XWMVID4/GXJUUWaSqfgDUi3b0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQ6yv368zwahUqSUYH/StL0Qgz/TwS1CzlMjVDvCciI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m2rPEYkjwyiKdonMrKlcF7hya4lFOAUwEePJ3SgrNx8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mq0yl5iVKlq71bT/dT/fXOWf2n90bTnXFnOdGDN0JOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6qDGMXipPLC2O6EAAMjO2F9xx4rdqZso4IkPpH2304U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jvQHRQQa2RIszE2LX2Hv2LbRhYawJ6qmtRt8HZzFQXg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ovJXQrkZlpeHRciKyE/WWNm5O389gRgzx1W+Dw596X4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "a4kgRNvYctGYqyQv9qScL/WkljTYVylJ9pE9KDULlxU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qV4Q48vPiCJMTjljotzYKI/zfExWpkKOSHGcAjGyDig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jtI7zbBF+QW/aYYTkn90zzyHLXLgmy7l1bzgMb2oqic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q0KmJl9txPdn962UNvnfe6UFhdk9YaFZuTm33F+csso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ULNdEqeZJgtmNOhN/Y9INzsE9AnxWYwOMn+pIbRXIFs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "R4oz9+wkdjpKe5tE1jpG7IURAnfvS5fLP4LrD5cZfTE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qG5Z7VhwSu/HT/YFTgDzyAAzJKq51xPw2HeEV5btYC4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OM/1DmIIZ5Qyhtq8TGkHTBEMVKjAnKRZMRXYtTG8ctc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2R5vZbljLXnDFA99YfGuRB7pAdPJVKsT25zLNMC0fUk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OMbavF2EmdAz1fHkLV3ctFEUDfriKhoT2gidwHZ9z1o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MWT4Zrw3/vVvTYMa1Is5Pjr3wEwnBfnEAPPUAHKQhNU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tBkRPfG9yxfKocQx5pAJX0oEHKPL0Tgtr+0UYe09InE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lqxpnDR/H0YgH7RcfKoNoaaRhe1SIazIeMbQ1fu9y3Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "utT1UdR22PWOTrOkZauztX613lAplV4eh/ejTRb7ZSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S+Y2yFyKi/a6FXhih4yGo29X8I8OT6/zwEoX6NMKT4o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QSjVppg29x6oS5yBg8OFjrFt0tuTpWCuKxfIy0k8YnE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y3r6/Xsfvsl3HksXlVYkJgHUqpQGfICxg3x9f8Zw1qM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BSltHzEwDjFN4du9rDHAPvl22atlcTioEtt+gC5L1tk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0arGXjSN0006UnXbrWsGqhvBair569DeFDUME3Df3rA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s/DumaMad08S+PBUUcrS+v42K0z8HgcdiQtrFAEu2Qs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EzJ8Y8N0OQBTlnvrK82PdevDNZZO4E6CNgYVu8Cj6Ks=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VA4vr8jBPI5QdiPrULzzZjBMIUbG3V7Slg5zm0bFcKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YAOvEB2ZLtq9LQiFViBHWaxxWVVonC2rNYj9tN9s3L0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hgaHMo9aAGS+nBwvqnTjZO+YkiQPY1c1XcIYeaYKHyI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YvaoLt3ZpH0atB0tNzwMjpoxRYJXl0DqSjisMJiGVBE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EMmW6CptFsiLoPOi5/uAJQ2FmeLg6mCpuVLLrRWk7Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1jQsNMarSnarlYmXEuoFokeBMg/090qUD9wqo1Zn8Gs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hupXNKhRpJxpyDAAP1TgJ5JMZh9lhbMk6s7D7dMS3C8=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Correctness.json b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Correctness.json new file mode 100644 index 0000000000..89b7bd3118 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Correctness.json @@ -0,0 +1,1158 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gte": { + "$numberDecimal": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$lte": { + "$numberDecimal": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$gte": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$lte": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Delete.json new file mode 100644 index 0000000000..0463be1c69 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Delete.json @@ -0,0 +1,1133 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDecimal": { + "$gt": { + "$binary": { + "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDecimal": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-FindOneAndUpdate.json new file mode 100644 index 0000000000..d0e2967771 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-FindOneAndUpdate.json @@ -0,0 +1,1930 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimal": { + "$numberDecimal": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDecimal": { + "$gt": { + "$binary": { + "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDecimal": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDecimal": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VDCpBYsJIxTfcI6Zgf7FTmKMxUffQv+Ys8zt5dlK76I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zYDslUwOUVNwTYkETfjceH/PU3bac9X3UuQyYJ19qK0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rAOmHSz18Jx107xpbv9fYcPOmh/KPAqge0PAtuhIRnc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BFOB1OGVUen7VsOuS0g8Ti7oDsTt2Yj/k/7ta8YAdGM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fckE5SPs0GU+akDkUEM6mm0EtcV3WDE/sQsnTtodlk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mi9+aNjuwIvaMpSHENvKzKRAmX9cYguo2mXLvOoftHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K6TWn4VcWWkz/gkUkLmbtwkG7SNeABICmLDnoYJFlLU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z+2/cEtGU0Fq7QJFNGA/0y4aWAsw0ncG6X0LYRqwS3c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rrSIf+lgcNZFbbUkS9BmE045jRWBpcBJXHzfMVEFuzE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KlHL3Kyje1/LMIfgbCqw1SolxffJvvgsYBV5y77wxuA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hzJ1YBoETmYeCh352dBmG8d8Wse/bUcqojTWpWQlgsc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lSdcllDXx8MA+s0GULjDA1lQkcV0L8/aHtZ6dM2pZ2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HGr7JLTTA7ksAnlmjSIwwdBVvgr3fv46/FTdiCPYpos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMr25v1VwOEVZ8xaNUTHJCcsYqV+kwK6RzGYilxPtJ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "129hJbziPJzNo0IoTU3bECdge0FtaPW8dm4dyNVNwYU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "doiLJ96qoo+v7NqIAZLq6BI5axV8Id8gT5vyJ1ZZ0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cW/Lcul3xYmfyvI/0x/+ybN78aQmBK1XIGs1EEU09N8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1aVIwzu9N5EJV9yEES+/g6hOTH7cA2NTcLIc59cu0wU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kw5tyl7Ew0r1wFyrN1mB9FiVW2hK2BxxxUuJDNWjyjQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ADAY2YBrm6RJBDY/eLLcfNxmSJku+mefz74gH66oyco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8gkqB1LojzPrstpFG7RHYmWxXpIlPDTqWnNsXH7XDRU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TESfVQMDQjfTZmHmUeYUE2XrokJ6CcrsKx/GmypGjOw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qFM+HFVQ539S0Ouynd1fBHoemFxtU9PRxE5+Dq7Ljy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jPiFgUZteSmOg4wf3bsEKCZzcnxmMoILsgp/GaZD+dM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YaWUgJhYgPNN7TkFK16H8SsQS226JguaVhOIQxZwQNQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x90/Qk3AgyaFsvWf2KUCu5XF3j76WFSjt/GrnG01060=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZGWybWL/xlEdMYRFCZDUoz10sywTf7U/7wufsb78lH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8l4ganN66jIcdxfHAdYLaym/mdzUUQ8TViw3MDRySPc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c8p5XEGTqxqvRGVlR+nkxw9uUdoqDqTB0jlYQ361qMA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZGFLlpQBcU3zIUg8MmgWwFKVz/SaA7eSYFrfe3Hb70=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "34529174M77rHr3Ftn9r8jU4a5ztYtyVhMn1wryZSkU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YkQ4pxFWzc49MS0vZM6S8mNo4wAwo21rePBeF3C+9mI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MhOf4mYY00KKVhptOcXf0bXB7WfuuM801MRJg4vXPgc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7pbbD8ihNIYIBJ3tAUPGzHpFPpIeCTAk5L88qCB0/9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C9Q5PoNJTQo6pmNzXEEXUEqH22//UUWY1gqILcIywec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AqGVk1QjDNDLYWGRBX/nv9QdGR2SEgXZEhF0EWBAiSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/sGI3VCbJUKATULJmhTayPOeVW+5MjWSvVCqS77sRbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yOtbL0ih7gsuoxVtRrACMz+4N5uo7jIR7zzmtih2Beo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uA6dkb2Iyg9Su8UNDvZzkPx33kPZtWr/CCuEY+XgzUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1DoSFPdHIplqZk+DyWAmEPckWwXw/GdB25NLmzeEZhk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OfDVS0T3ZuIXI/LNbTp6C9UbPIWLKiMy6Wx+9tqNl+g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3PZjHXbmG6GtPz+iapKtQ3yY4PoFFgjIy+fV2xQv1YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kaoLN0BoBWsmqE7kKkJQejATmLShd8qffcAmlhsxsGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpiw9KgQdegGmp7IJnSGX2miujRLU0xzs0ITTqbPW7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NuXFf7xGUefYjIUTuMxNUTCfVHrF8oL0AT7dPv5Plk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8Tz53LxtfEBJ9eR+d2690kwNsqPV6XyKo2PlqZCbUrc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e6zsOmHSyV8tyQtSX6BSwui6wK9v1xG3giY/IILJQ2w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fedFMCxa2DzmIpfbDKGXhQg0PPwbUv6vIWdwwlvhms=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yEJKMFnWXTC8tJUfzCInzQRByNEPjHxpw4L4m8No91Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YbFuWwOiFuQyOzIJXDbOkCWC2DyrG+248TBuVCa1pXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w7IkwGdrguwDrar5+w0Z3va5wXyZ4VXJkDMISyRjPGo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YmJUoILTRJPhyIyWyXJTsQ6KSZHHbEpwPVup6Ldm/Ko=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FvMjcwVZJmfh6FP/yBg2wgskK+KHD8YVUY6WtrE8xbg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4HCtD4HyYz0nci49IVAa10Z4NJD/FHnRMV4sRX6qro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nC7BpXCmym+a0Is2kReM9cYN2M1Eh5rVo8fjms14Oiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1qtVWaeVo649ZZZtN8gXbwLgMWGLhz8beODbvru0I7Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ej+mC0QFyMNIiSjR939S+iGBm7dm+1xObu5IcF/OpbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UQ8LbUG3cMegbr9yKfKanAPQE1EfPkFciVDrNqZ5GHY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4iI3mXIDjnX+ralk1HhJY43mZx2uTJM7hsv9MQzTX7E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0WQCcs3rvsasgohERHHCaBM4Iy6yomS4qJ5To3/yYiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qDCTVPoue1/DOAGNAlUstdA9Sid8MgEY4e5EzHcVHRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9F9Mus0UnlzHb8E8ImxgXtz6SU98YXD0JqswOKw/Bzs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pctHpHKVBBcsahQ6TNh6/1V1ZrqOtKSAPtATV6BJqh0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vfR3C/4cPkVdxtNaqtF/v635ONbhTf5WbwJM6s4EXNE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ejP43xUBIex6szDcqExAFpx1IE/Ksi5ywJ84GKDFRrs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jbP4AWYd3S2f3ejmMG7dS5IbrFol48UUoT+ve3JLN6U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CiDifI7958sUjNqJUBQULeyF7x0Up3loPWvYKw9uAuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e2dQFsiHqd2BFHNhlSxocjd+cPs4wkcUW/CnCz4KNuM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PJFckVmzBipqaEqsuP2mkjhJE4qhw36NhfQ9DcOHyEU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S3MeuJhET/B8VcfZYDR9fvX0nscDj416jdDekhmK11s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CGVHZRXpuNtQviDB2Kj03Q8uvs4w3RwTgV847R7GwPw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yUGgmgyLrxbEpDVy89XN3c2cmFpZXWWmuJ/35zVZ+Jw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "inb6Q97mL1a9onfNTT8v9wsoi/fz7KXKq3p8j90AU9c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CCyYx/4npq9xGO1lsCo8ZJhFO9/tN7DB+/DTE778rYg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LNnYw4fwbiAZu0kBdAHPEm/OFnreS+oArdB5O/l/I98=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P006SxmUS/RjiQJVYPdMFnNo3827GIEmSzagggkg05Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oyvwY+WsnYV6UHuPki1o0ILJ2jN4uyXf9yaUNtZJyBA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "36Lk3RHWh1wmtCWC/Yj6jNIo17U5y6SofAgQjzjVxD8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOOo8FqeHnuO9mqOYjIb4vgwIwVyXZ5Y+bY5d9tGFUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bJiDJjwQRNxqxlGjRm5lLziFhcfTDCnQ/qU1V85qcRg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2Qgrm1n0wUELAQnpkEiIHB856yv76q8jLbpiucetcm0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5ciPOYxTK0WDwwYyfs7yiVymwtYQXDELLxmM4JLl4/o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "31dC2WUSIOKQc4jwT6PikfeYTwi80mTlh7P31T5KNQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YluTV2Mu53EGCKLcWfHZb0BM/IPW2xJdG3vYlDMEsM4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dh/8lGo2Ek6KukSwutH6Q35iy8TgV0FN0SJqe0ZVHN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EVw6HpIs3BKen2qY2gz4y5dw1JpXilfh07msZfQqJpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FYolLla9L8EZMROEdWetozroU40Dnmwwx2jIMrr7c1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8M6k4QIutSIj6CM41vvkQtuFsaGrjoR9SZJVSLbfGKQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9LM0VoddDNHway442MqY+Z7vohB2UHau/cddshhzf40=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66i8Ytco4Yq/FMl6pIRZazz3CZlu8fO2OI6Pne0pvHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2a/HgX+MjZxjXtSvHgF1yEpHMJBkl8Caee8XrJtn0WM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "frhBM662c4ZVG7mWP8K/HhRjd01lydW/cPcHnDjifqc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6k1T7Q1t668PBqv6fwpVnT1HWh7Am5LtbKvwPJKcpGU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UlJ5Edfusp8S/Pyhw6KTglIejmbr1HO0zUeHn/qFETA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jsxsB+1ECB3assUdoC333do9tYH+LglHmVSJHy4N8Hg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2nzIQxGYF7j3bGsIesECEOqhObKs/9ywknPHeJ3yges=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xJYKtuWrX90JrJVoYtnwP7Ce59XQGFYoalxpNfBXEH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NLI5lriBTleGCELcHBtNnmnvwSRkHHaLOX4cKboMgTw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hUOQV0RmE5aJdJww1AR9rirJG4zOYPo+6cCkgn/BGvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4G2Of76AgxcUziBwCyH+ayMOpdBWzg4yFrTfehSC2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VuamM75RzGfQpj2/Y1jSVuQLrhy6OAwlZxjuQLB/9Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn9+hLq7hvw02xr9vrplOCDXKBTuFhfbX7d5v/l85Pg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fAiGqKyLZpGngBYFbtYUYt8LUrJ49vYafiboifTDjxs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BxRILymgfVJCczqjUIWXcfrfSgrrYkxTM5VTg0HkZLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CrFY/PzfPU2zsFkGLu/dI6mEeizZzCR+uYgjZBAHro0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AEbrIuwvXLTtYgMjOqnGQ8y8axUn5Ukrn7UZRSyfQVw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ouWeVH3PEFg+dKWlXc6BmqirJOaVWjJbMzZbCsce4dA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+hd6xFB+EG+kVP7WH4uMd1CLaWMnt5xJRaY/Guuga9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zmpGalfAOL3gmcUMJYcLYIRT/2VDO/1Dw4KdYZoNcng=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2PbHAoM/46J2UIZ/vyksKzmVVfxA7YUyIxWeL/N/vBk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fD9x+zk5MVFesb59Klqiwwmve7P5ON/5COURXj5smE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tlrNQ4jaq051iaWonuv1sSrYhKkL1LtNZuHsvATha3s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fBodm28iClNpvlRyVq0dOdXQ08S7/N3aDwid+PdWvRo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O+/nnRqT3Zv7yMMGug8GhKHaWy6u7BfRGtZoj0sdN1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5AZZ/RTMY4Photnm/cpXZr/HnFRi3eljacMsipkJLHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oFVyo/kgoMxBIk2VE52ySSimeyU+Gr0EfCwapXnTpKA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z8v59DfcnviA0mzvnUk+URVO0UuqAWvtarEgJva/n1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P64GOntZ+zBJEHkigoh9FSxSO+rJTqR20z5aiGQ9an4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xMbSuDPfWuO/Dm7wuVl06GnzG9uzTlJJX9vFy7boGlY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kXPB19mRClxdH2UsHwlttS6lLU2uHvzuZgZz7kC45jU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NDVjVYXAw4k0w4tFzvs7QDq39aaU3HQor4I2XMKKnCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uKw/+ErVfpTO1dGUfd3T/eWfZW3nUxXCdBGdjvHtZ88=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "av0uxEzWkizYWm0QUM/MN1hLibnxPvCWJKwjOV4yVQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ERwUC47dvgOBzIsEESMIioLYbFOxOe8PtJTnmDkKuHM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2gseKlG5Le12fS/vj4eaED4lturF16kAgJ1TpW3HxEE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7Cvg0Y3j/5i2F1TeXxlMmU7xwif5dCmwkZAOrVC5K2Y=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-InsertFind.json new file mode 100644 index 0000000000..cea03e23fe --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-InsertFind.json @@ -0,0 +1,1899 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDecimal": { + "$gt": { + "$binary": { + "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RlQWwhU+uVv0a+9IB5cUkEfvHBvOw3B1Sx6WfPWMqes=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubb81XTC7U+4tcNzf1oYvOY6gR5hC2Izqx54f4GuJ0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6M4Q5NMQ9TqNnjzGOxIkiUIY8TEL0I3XD1QnhefQUqU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BtInzk9t2FFMCEY6AQ7zN8jwrrZEs2irSv6q0Q4NaIw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vxXfETu9cuBIpRBo3jUUU04mJIH/aAhLX8K6VI5Xv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXPCdS+q23zi1bkPnaVG2j0PsVtxdeSLJ//h6J1x8RU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KY3KkfBAsN2l80wbpj41G0gwBR5KmmFnZcagg7D3ENk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI8NFAxXCX4VOnY5X73K6KI/Yspd3aR94KV39MhJlAw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nFxH0UC3mATKA6Vboz+QX/hAjj19kF/SH6H5Cne7qC0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q8hYqIYaIi7nOdG/7qQZYnz8Bsacfi66M1nVku4SH08=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4saA92R4arp4anvD9xFtze+sNcQqTEhPHyl1h70A8NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DbIziOBRRyeQS6RtBR09E37LV+CTKrEjGoRMLSpG6eE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Fv80Plp/7w2gnVqrwawLd6qhJ10G4NCDm3re67cNq4Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T/T2oiQCBBES4YN7EodzPRdabZSFlYIClHBym+bQUZE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQgHD3l46Ujqtbnj1VbbeM29C9wJzOhz+yZ/7XdSrxk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ltlFKzWvyZvHxDFOYDd/XXJ6kUiJj0ln2HTCEz2o4Z4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "flW8A7bltC1u8bzx0WJtxosGJdOVsJFfbx33jxnpFGg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SXO+92QbMKwUSG2t27ciunV1c3VvFkUuDmSczpRe008=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+KioGs1GM+xRBzFE67ePTWj04KMSE5/Y6qUF7nJ5kvU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L3xNVbh6YH+RzqABN+5Jgb7T234Efpn766DmUvxIxgg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hPF+60mBYPjh21dEmPlBhKgyc9S2qLtTkypYvnqP2Fc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EletRsETy2HcjaPIm2c8CkT7ch/P3pJJDC8hasepcSU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "r5bMXUaNKqLPxZ+TG9HYTG4aSDgcpim27rN8rQFkM0w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Q7Erdr8+/S0wUEDDIqlS5XjBVWvhZY65K0uUDb6+Ns=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xEcnhXy35hbXNVBPOOt3TUHbxvKfQ48KjA9b6/rbMqQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T8bEpiQNgsEudXvyKE9SZlSvbpV/LUaslsdqgSFltyo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hIoiaF2YjnxDbODfhFEB+JGZ5nf8suD3Shck5bwQ3N0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qnA6qzejeRJ0rsZaZ0zOvKAaXyxt5lpscKQNYFZNl4k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "anAKCL2DN/le2VaP0n2ucYSEH/DaaEH/8Sa4OqTZsRA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JCZlBJaFm618oWYSnT9Jr1MtwFVw4BZjOzO+5yWgR90=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yxyk4n9762WzcDVGnTn4jCqUnSMIVCrLDIjCX1QVj34=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fDI6fdKvDJwim5/CQwWZEzcrXE3LHgy7FTtffcC7tXE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Vex+gcz5T+WkzsVZQrkqUR2ryyZbnaOGuWpYvjN0zCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8TLEXz+Gbbp6llHpZXVjLsdlYY9f6hrKpHVpyfDe0RY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fTyt5BrunypS65TfOzFW2E2qdIuT4SLeDeGlbQoJCs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8fKGrkqN0/KuSjyXgDBmRauDKrSa//JBKRWHEB9xBf4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s4codmG7uN4ss6P357jL21lazEe90M9GOK5WrOknSV0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RkSpua8XF+NUdxVDU90EbLUTTyZFX3tt3atBTroFaRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LnTCuCDyAHK5B9KXzjtwGmWB+qergQk2OCjnIx9MI2A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cBFh0virAX4pVXf/udIGI2951i0+0aZAdJcBVGtYnT4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G54X6myQXWZ5fw/G31en3QbdgfXzL9+hFTtJpnWMqDI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EdsiiuezcsFJFnYIyGjCOhnqMj1BOwTB5EFxN+ERUkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dVH9MXLtk0WTwGQ3xmrhOqfropMUkDW3o6paNPGl3NU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sB3HqXKWY3pKbuEH8BTbfNIGfbY+7/ZbOc3XC+JRNNI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WHyDk62Xhqbo4/iie2aLIM4x2uuAjv6102dJSHI58oM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pNUFuHpeNRDUZ/NrtII2c6sNc9eGR1lIUlIyXKERA+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UPa+pdCqnN0bfAptdzldQOSd01gidrDKy8KhWrpSKAI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l+7dOAlo+HUffMqFYXL6pgUFeTbwOM9CjKQLxEoLtc4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SRnDXV/rN6C8xwMutv9E1luv3DOUio3VkgPr8Cpm7Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QcH6gl+gX7xZ7OWhUNQMbndJy0Piz49pDo6RsnLkVSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "t+uL4DnfsI/Zll/KXWW1cOKX3Hu8WIkm3pt9efCVSAQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "myutHDctku/+Uug/nD8gRbYvmx/IovtoAAC2/fz2oHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6C+cjD0e0nSCP6cPqQYbNG7SlOd6Mfvi8hyfm7Ng+D8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zg01JSoOj9oBKT0S1ldJucXzY5AKgreS+h2xJreWTOs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7qQ80/FjodHl1m1py/Oii0/9C/xWbLdhaRXQ+kkCP10=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YwWMNH07vL6c5Nhg+MRnVByhzUunu8y0VLM9z/XvR5U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dle8bU98+fudAbc14SToZFkwvV3tcYVsjDug0NWljpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "J+eKL1vPJmlzltvhI6Li5Fz/TJmi3Ng+ehRTcs46API=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB3XzfFygLwC3WHkj0up+VbEd25KKoce1vOpG/5bwK4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vnVnmOnL+z2pqwE+A6cVKS0Iwy4F4/2IiElJca9bUQM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+lG5r/Fpqry3BtFuvY67+RntmHAMDoLVOSGc6ZoXPb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L5MXQertqc6uj7ADe8aWKbd1sYHPCE7P1VYVg9Zc3VI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "imKONuZgopt0bhM3GMX2WVPwQYMTobuUUEdhcLfHs4c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOkU1J1uVbiVFWBerbXsSIVcF2nqiicTkFy4x7kFHB8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gI0uDhXeoH/UatDQKEf4qo8FHzWZDhb/wuWTqbq/ID4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cOkd5Aa3btYhtojE/smsF/PJnULqQ4NNqTkU6KXTFmo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AWNJMs1MTe294oFipp8Y6P0CjpkZ4qCZoClQF3XcHq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6gJtlzXOFhGYrVbTuRMmvMlDTwXdNtR9aGBlHZPwIMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LEmwVGA/xsEG7UrcOoYLFu6KCXgijzFznenknuDacm8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mIRFPTXRrGaPtp/Ydij2jgkRe4uoUvAKxW2d8b9zYL0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+Uv2u48WALOO0L311z+eryjYQzKJVMfdHMZPhOAFmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "INXXp0wDyVCq+NtfIrrC2ciETmyW/dWB/48/u4yLEZ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "se7DGo8XrlrQDLEcco1tZrQt9kDe+0RTyl2bw/quG4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vr0m2+Zk9lbN6UgWCyn8xJWJOokU3IDYab5U5q1+CgQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XI+eJ8Gy2JktG1gICgoj1qpsfy1tKmH0kglWbaQH6DA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A+UCuNnuAUqnQzspA6TVqUPRmtZmpSex5HFw7THRxs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaH2Ehfljd19uo0Fvb3iwkdaiWEVQd2YPoitgEPkhSM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S/iZBJGcc8+qZxyMtab65MMBoSglybwk3x58Nb86gnY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w14ZE5qqY5YgkS4Zcs9YNbrQbY1XfGOOHNn9bOYnFVQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0MhGd/jEF1vjkKGp+ZMn9SjLK54jkp9W4Hg+Sp/oxaI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92QZ73e/NRTYgCm4aifaKth6aAsKnLLccBc0zx/qUTY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WOjzemCgFJOiGIp81RSVh/tFlzSTj9eFWcBnsiv2Ycs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DrsP9CmfKPjw5yLL8bnSeAxfNzAwlb+Z8OqCiKgBY7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lMogqg8veBv6mri3/drMe9afJiKMvevkmGcw9BedfLo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TxqwNcY8Tg2MPpNdkPBwvfpuTttSYRHU26DGECKYQ9o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l0u1b4b4vYACWIwfnB7PZac4oDEgjQZCzHruNPTgAIY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iVSGQ+cCfhbWIrY/v/WBORK92elu9gfRKyGhr6r/k00=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yK1forG50diEXte8ECzjfpHeYsPyuQ/dgxbxn/nzY5k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gIfTLCD3VwnOwkC0zPXWTqaITxX6ZplA69PO2a6zolc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O/Zxlgh3WqpzJ7+Sd8XWMVID4/GXJUUWaSqfgDUi3b0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQ6yv368zwahUqSUYH/StL0Qgz/TwS1CzlMjVDvCciI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m2rPEYkjwyiKdonMrKlcF7hya4lFOAUwEePJ3SgrNx8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mq0yl5iVKlq71bT/dT/fXOWf2n90bTnXFnOdGDN0JOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6qDGMXipPLC2O6EAAMjO2F9xx4rdqZso4IkPpH2304U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jvQHRQQa2RIszE2LX2Hv2LbRhYawJ6qmtRt8HZzFQXg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ovJXQrkZlpeHRciKyE/WWNm5O389gRgzx1W+Dw596X4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "a4kgRNvYctGYqyQv9qScL/WkljTYVylJ9pE9KDULlxU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qV4Q48vPiCJMTjljotzYKI/zfExWpkKOSHGcAjGyDig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jtI7zbBF+QW/aYYTkn90zzyHLXLgmy7l1bzgMb2oqic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q0KmJl9txPdn962UNvnfe6UFhdk9YaFZuTm33F+csso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ULNdEqeZJgtmNOhN/Y9INzsE9AnxWYwOMn+pIbRXIFs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "R4oz9+wkdjpKe5tE1jpG7IURAnfvS5fLP4LrD5cZfTE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qG5Z7VhwSu/HT/YFTgDzyAAzJKq51xPw2HeEV5btYC4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OM/1DmIIZ5Qyhtq8TGkHTBEMVKjAnKRZMRXYtTG8ctc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2R5vZbljLXnDFA99YfGuRB7pAdPJVKsT25zLNMC0fUk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OMbavF2EmdAz1fHkLV3ctFEUDfriKhoT2gidwHZ9z1o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MWT4Zrw3/vVvTYMa1Is5Pjr3wEwnBfnEAPPUAHKQhNU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tBkRPfG9yxfKocQx5pAJX0oEHKPL0Tgtr+0UYe09InE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lqxpnDR/H0YgH7RcfKoNoaaRhe1SIazIeMbQ1fu9y3Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "utT1UdR22PWOTrOkZauztX613lAplV4eh/ejTRb7ZSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S+Y2yFyKi/a6FXhih4yGo29X8I8OT6/zwEoX6NMKT4o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QSjVppg29x6oS5yBg8OFjrFt0tuTpWCuKxfIy0k8YnE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y3r6/Xsfvsl3HksXlVYkJgHUqpQGfICxg3x9f8Zw1qM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BSltHzEwDjFN4du9rDHAPvl22atlcTioEtt+gC5L1tk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0arGXjSN0006UnXbrWsGqhvBair569DeFDUME3Df3rA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s/DumaMad08S+PBUUcrS+v42K0z8HgcdiQtrFAEu2Qs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EzJ8Y8N0OQBTlnvrK82PdevDNZZO4E6CNgYVu8Cj6Ks=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VA4vr8jBPI5QdiPrULzzZjBMIUbG3V7Slg5zm0bFcKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YAOvEB2ZLtq9LQiFViBHWaxxWVVonC2rNYj9tN9s3L0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hgaHMo9aAGS+nBwvqnTjZO+YkiQPY1c1XcIYeaYKHyI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YvaoLt3ZpH0atB0tNzwMjpoxRYJXl0DqSjisMJiGVBE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EMmW6CptFsiLoPOi5/uAJQ2FmeLg6mCpuVLLrRWk7Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1jQsNMarSnarlYmXEuoFokeBMg/090qUD9wqo1Zn8Gs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hupXNKhRpJxpyDAAP1TgJ5JMZh9lhbMk6s7D7dMS3C8=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Update.json b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Update.json new file mode 100644 index 0000000000..2f8b991cf7 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Update.json @@ -0,0 +1,1934 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimal": { + "$numberDecimal": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDecimal": { + "$gt": { + "$binary": { + "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDecimal": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDecimal": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VDCpBYsJIxTfcI6Zgf7FTmKMxUffQv+Ys8zt5dlK76I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zYDslUwOUVNwTYkETfjceH/PU3bac9X3UuQyYJ19qK0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rAOmHSz18Jx107xpbv9fYcPOmh/KPAqge0PAtuhIRnc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BFOB1OGVUen7VsOuS0g8Ti7oDsTt2Yj/k/7ta8YAdGM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fckE5SPs0GU+akDkUEM6mm0EtcV3WDE/sQsnTtodlk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mi9+aNjuwIvaMpSHENvKzKRAmX9cYguo2mXLvOoftHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K6TWn4VcWWkz/gkUkLmbtwkG7SNeABICmLDnoYJFlLU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z+2/cEtGU0Fq7QJFNGA/0y4aWAsw0ncG6X0LYRqwS3c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rrSIf+lgcNZFbbUkS9BmE045jRWBpcBJXHzfMVEFuzE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KlHL3Kyje1/LMIfgbCqw1SolxffJvvgsYBV5y77wxuA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hzJ1YBoETmYeCh352dBmG8d8Wse/bUcqojTWpWQlgsc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lSdcllDXx8MA+s0GULjDA1lQkcV0L8/aHtZ6dM2pZ2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HGr7JLTTA7ksAnlmjSIwwdBVvgr3fv46/FTdiCPYpos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMr25v1VwOEVZ8xaNUTHJCcsYqV+kwK6RzGYilxPtJ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "129hJbziPJzNo0IoTU3bECdge0FtaPW8dm4dyNVNwYU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "doiLJ96qoo+v7NqIAZLq6BI5axV8Id8gT5vyJ1ZZ0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cW/Lcul3xYmfyvI/0x/+ybN78aQmBK1XIGs1EEU09N8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1aVIwzu9N5EJV9yEES+/g6hOTH7cA2NTcLIc59cu0wU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kw5tyl7Ew0r1wFyrN1mB9FiVW2hK2BxxxUuJDNWjyjQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ADAY2YBrm6RJBDY/eLLcfNxmSJku+mefz74gH66oyco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8gkqB1LojzPrstpFG7RHYmWxXpIlPDTqWnNsXH7XDRU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TESfVQMDQjfTZmHmUeYUE2XrokJ6CcrsKx/GmypGjOw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qFM+HFVQ539S0Ouynd1fBHoemFxtU9PRxE5+Dq7Ljy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jPiFgUZteSmOg4wf3bsEKCZzcnxmMoILsgp/GaZD+dM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YaWUgJhYgPNN7TkFK16H8SsQS226JguaVhOIQxZwQNQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x90/Qk3AgyaFsvWf2KUCu5XF3j76WFSjt/GrnG01060=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZGWybWL/xlEdMYRFCZDUoz10sywTf7U/7wufsb78lH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8l4ganN66jIcdxfHAdYLaym/mdzUUQ8TViw3MDRySPc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c8p5XEGTqxqvRGVlR+nkxw9uUdoqDqTB0jlYQ361qMA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZGFLlpQBcU3zIUg8MmgWwFKVz/SaA7eSYFrfe3Hb70=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "34529174M77rHr3Ftn9r8jU4a5ztYtyVhMn1wryZSkU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YkQ4pxFWzc49MS0vZM6S8mNo4wAwo21rePBeF3C+9mI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MhOf4mYY00KKVhptOcXf0bXB7WfuuM801MRJg4vXPgc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7pbbD8ihNIYIBJ3tAUPGzHpFPpIeCTAk5L88qCB0/9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C9Q5PoNJTQo6pmNzXEEXUEqH22//UUWY1gqILcIywec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AqGVk1QjDNDLYWGRBX/nv9QdGR2SEgXZEhF0EWBAiSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/sGI3VCbJUKATULJmhTayPOeVW+5MjWSvVCqS77sRbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yOtbL0ih7gsuoxVtRrACMz+4N5uo7jIR7zzmtih2Beo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uA6dkb2Iyg9Su8UNDvZzkPx33kPZtWr/CCuEY+XgzUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1DoSFPdHIplqZk+DyWAmEPckWwXw/GdB25NLmzeEZhk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OfDVS0T3ZuIXI/LNbTp6C9UbPIWLKiMy6Wx+9tqNl+g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3PZjHXbmG6GtPz+iapKtQ3yY4PoFFgjIy+fV2xQv1YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kaoLN0BoBWsmqE7kKkJQejATmLShd8qffcAmlhsxsGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpiw9KgQdegGmp7IJnSGX2miujRLU0xzs0ITTqbPW7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NuXFf7xGUefYjIUTuMxNUTCfVHrF8oL0AT7dPv5Plk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8Tz53LxtfEBJ9eR+d2690kwNsqPV6XyKo2PlqZCbUrc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e6zsOmHSyV8tyQtSX6BSwui6wK9v1xG3giY/IILJQ2w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fedFMCxa2DzmIpfbDKGXhQg0PPwbUv6vIWdwwlvhms=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yEJKMFnWXTC8tJUfzCInzQRByNEPjHxpw4L4m8No91Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YbFuWwOiFuQyOzIJXDbOkCWC2DyrG+248TBuVCa1pXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w7IkwGdrguwDrar5+w0Z3va5wXyZ4VXJkDMISyRjPGo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YmJUoILTRJPhyIyWyXJTsQ6KSZHHbEpwPVup6Ldm/Ko=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FvMjcwVZJmfh6FP/yBg2wgskK+KHD8YVUY6WtrE8xbg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4HCtD4HyYz0nci49IVAa10Z4NJD/FHnRMV4sRX6qro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nC7BpXCmym+a0Is2kReM9cYN2M1Eh5rVo8fjms14Oiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1qtVWaeVo649ZZZtN8gXbwLgMWGLhz8beODbvru0I7Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ej+mC0QFyMNIiSjR939S+iGBm7dm+1xObu5IcF/OpbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UQ8LbUG3cMegbr9yKfKanAPQE1EfPkFciVDrNqZ5GHY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4iI3mXIDjnX+ralk1HhJY43mZx2uTJM7hsv9MQzTX7E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0WQCcs3rvsasgohERHHCaBM4Iy6yomS4qJ5To3/yYiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qDCTVPoue1/DOAGNAlUstdA9Sid8MgEY4e5EzHcVHRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9F9Mus0UnlzHb8E8ImxgXtz6SU98YXD0JqswOKw/Bzs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pctHpHKVBBcsahQ6TNh6/1V1ZrqOtKSAPtATV6BJqh0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vfR3C/4cPkVdxtNaqtF/v635ONbhTf5WbwJM6s4EXNE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ejP43xUBIex6szDcqExAFpx1IE/Ksi5ywJ84GKDFRrs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jbP4AWYd3S2f3ejmMG7dS5IbrFol48UUoT+ve3JLN6U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CiDifI7958sUjNqJUBQULeyF7x0Up3loPWvYKw9uAuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e2dQFsiHqd2BFHNhlSxocjd+cPs4wkcUW/CnCz4KNuM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PJFckVmzBipqaEqsuP2mkjhJE4qhw36NhfQ9DcOHyEU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S3MeuJhET/B8VcfZYDR9fvX0nscDj416jdDekhmK11s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CGVHZRXpuNtQviDB2Kj03Q8uvs4w3RwTgV847R7GwPw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yUGgmgyLrxbEpDVy89XN3c2cmFpZXWWmuJ/35zVZ+Jw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "inb6Q97mL1a9onfNTT8v9wsoi/fz7KXKq3p8j90AU9c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CCyYx/4npq9xGO1lsCo8ZJhFO9/tN7DB+/DTE778rYg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LNnYw4fwbiAZu0kBdAHPEm/OFnreS+oArdB5O/l/I98=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P006SxmUS/RjiQJVYPdMFnNo3827GIEmSzagggkg05Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oyvwY+WsnYV6UHuPki1o0ILJ2jN4uyXf9yaUNtZJyBA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "36Lk3RHWh1wmtCWC/Yj6jNIo17U5y6SofAgQjzjVxD8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOOo8FqeHnuO9mqOYjIb4vgwIwVyXZ5Y+bY5d9tGFUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bJiDJjwQRNxqxlGjRm5lLziFhcfTDCnQ/qU1V85qcRg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2Qgrm1n0wUELAQnpkEiIHB856yv76q8jLbpiucetcm0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5ciPOYxTK0WDwwYyfs7yiVymwtYQXDELLxmM4JLl4/o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "31dC2WUSIOKQc4jwT6PikfeYTwi80mTlh7P31T5KNQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YluTV2Mu53EGCKLcWfHZb0BM/IPW2xJdG3vYlDMEsM4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dh/8lGo2Ek6KukSwutH6Q35iy8TgV0FN0SJqe0ZVHN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EVw6HpIs3BKen2qY2gz4y5dw1JpXilfh07msZfQqJpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FYolLla9L8EZMROEdWetozroU40Dnmwwx2jIMrr7c1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8M6k4QIutSIj6CM41vvkQtuFsaGrjoR9SZJVSLbfGKQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9LM0VoddDNHway442MqY+Z7vohB2UHau/cddshhzf40=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66i8Ytco4Yq/FMl6pIRZazz3CZlu8fO2OI6Pne0pvHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2a/HgX+MjZxjXtSvHgF1yEpHMJBkl8Caee8XrJtn0WM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "frhBM662c4ZVG7mWP8K/HhRjd01lydW/cPcHnDjifqc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6k1T7Q1t668PBqv6fwpVnT1HWh7Am5LtbKvwPJKcpGU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UlJ5Edfusp8S/Pyhw6KTglIejmbr1HO0zUeHn/qFETA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jsxsB+1ECB3assUdoC333do9tYH+LglHmVSJHy4N8Hg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2nzIQxGYF7j3bGsIesECEOqhObKs/9ywknPHeJ3yges=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xJYKtuWrX90JrJVoYtnwP7Ce59XQGFYoalxpNfBXEH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NLI5lriBTleGCELcHBtNnmnvwSRkHHaLOX4cKboMgTw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hUOQV0RmE5aJdJww1AR9rirJG4zOYPo+6cCkgn/BGvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4G2Of76AgxcUziBwCyH+ayMOpdBWzg4yFrTfehSC2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VuamM75RzGfQpj2/Y1jSVuQLrhy6OAwlZxjuQLB/9Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn9+hLq7hvw02xr9vrplOCDXKBTuFhfbX7d5v/l85Pg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fAiGqKyLZpGngBYFbtYUYt8LUrJ49vYafiboifTDjxs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BxRILymgfVJCczqjUIWXcfrfSgrrYkxTM5VTg0HkZLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CrFY/PzfPU2zsFkGLu/dI6mEeizZzCR+uYgjZBAHro0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AEbrIuwvXLTtYgMjOqnGQ8y8axUn5Ukrn7UZRSyfQVw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ouWeVH3PEFg+dKWlXc6BmqirJOaVWjJbMzZbCsce4dA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+hd6xFB+EG+kVP7WH4uMd1CLaWMnt5xJRaY/Guuga9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zmpGalfAOL3gmcUMJYcLYIRT/2VDO/1Dw4KdYZoNcng=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2PbHAoM/46J2UIZ/vyksKzmVVfxA7YUyIxWeL/N/vBk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fD9x+zk5MVFesb59Klqiwwmve7P5ON/5COURXj5smE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tlrNQ4jaq051iaWonuv1sSrYhKkL1LtNZuHsvATha3s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fBodm28iClNpvlRyVq0dOdXQ08S7/N3aDwid+PdWvRo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O+/nnRqT3Zv7yMMGug8GhKHaWy6u7BfRGtZoj0sdN1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5AZZ/RTMY4Photnm/cpXZr/HnFRi3eljacMsipkJLHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oFVyo/kgoMxBIk2VE52ySSimeyU+Gr0EfCwapXnTpKA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z8v59DfcnviA0mzvnUk+URVO0UuqAWvtarEgJva/n1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P64GOntZ+zBJEHkigoh9FSxSO+rJTqR20z5aiGQ9an4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xMbSuDPfWuO/Dm7wuVl06GnzG9uzTlJJX9vFy7boGlY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kXPB19mRClxdH2UsHwlttS6lLU2uHvzuZgZz7kC45jU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NDVjVYXAw4k0w4tFzvs7QDq39aaU3HQor4I2XMKKnCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uKw/+ErVfpTO1dGUfd3T/eWfZW3nUxXCdBGdjvHtZ88=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "av0uxEzWkizYWm0QUM/MN1hLibnxPvCWJKwjOV4yVQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ERwUC47dvgOBzIsEESMIioLYbFOxOe8PtJTnmDkKuHM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2gseKlG5Le12fS/vj4eaED4lturF16kAgJ1TpW3HxEE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7Cvg0Y3j/5i2F1TeXxlMmU7xwif5dCmwkZAOrVC5K2Y=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Aggregate.json new file mode 100644 index 0000000000..a3e605d1bb --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Aggregate.json @@ -0,0 +1,590 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Correctness.json b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Correctness.json new file mode 100644 index 0000000000..9fafc243d6 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Correctness.json @@ -0,0 +1,1650 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "0.0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "200.0" + } + } + } + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + }, + "$lte": { + "$numberDecimal": "200.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDecimalPrecision": { + "$numberDecimal": "200.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 200, + "encryptedDecimalPrecision": { + "$numberDecimal": "200.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "0.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "200.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + }, + "$lte": { + "$numberDecimal": "200.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Delete.json new file mode 100644 index 0000000000..3d7d359af6 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Delete.json @@ -0,0 +1,493 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDecimalPrecision": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-FindOneAndUpdate.json new file mode 100644 index 0000000000..b1442c3a3c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-FindOneAndUpdate.json @@ -0,0 +1,612 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$numberDecimal": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDecimalPrecision": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-InsertFind.json new file mode 100644 index 0000000000..3b8202ff87 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-InsertFind.json @@ -0,0 +1,577 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Update.json new file mode 100644 index 0000000000..3dc6631c61 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Update.json @@ -0,0 +1,612 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$numberDecimal": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDecimalPrecision": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Double-Aggregate.json new file mode 100644 index 0000000000..3d54be3d18 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Double-Aggregate.json @@ -0,0 +1,1138 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$gt": { + "$binary": { + "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Correctness.json b/test/client-side-encryption/spec/legacy/fle2-Range-Double-Correctness.json new file mode 100644 index 0000000000..b09e966324 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Double-Correctness.json @@ -0,0 +1,1160 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$gte": { + "$numberDouble": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$lte": { + "$numberDouble": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$gte": { + "$numberDouble": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$lte": { + "$numberDouble": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Range-Double-Delete.json new file mode 100644 index 0000000000..fa09cb87df --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Double-Delete.json @@ -0,0 +1,749 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDouble": { + "$gt": { + "$binary": { + "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDouble": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Double-FindOneAndUpdate.json new file mode 100644 index 0000000000..59a304166b --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Double-FindOneAndUpdate.json @@ -0,0 +1,1160 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDouble": { + "$numberDouble": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDouble": { + "$gt": { + "$binary": { + "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDouble": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDouble": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2-Range-Double-InsertFind.json new file mode 100644 index 0000000000..634230eaca --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Double-InsertFind.json @@ -0,0 +1,1129 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDouble": { + "$gt": { + "$binary": { + "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Update.json b/test/client-side-encryption/spec/legacy/fle2-Range-Double-Update.json new file mode 100644 index 0000000000..cdc9f28e76 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Double-Update.json @@ -0,0 +1,1164 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDouble": { + "$numberDouble": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDouble": { + "$gt": { + "$binary": { + "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDouble": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDouble": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Aggregate.json new file mode 100644 index 0000000000..f2ea49ad75 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Aggregate.json @@ -0,0 +1,586 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Correctness.json b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Correctness.json new file mode 100644 index 0000000000..e69d912694 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Correctness.json @@ -0,0 +1,1650 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Delete.json new file mode 100644 index 0000000000..d6a9c4b7e7 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Delete.json @@ -0,0 +1,491 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDoublePrecision": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-FindOneAndUpdate.json new file mode 100644 index 0000000000..0511c2e37e --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-FindOneAndUpdate.json @@ -0,0 +1,608 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$numberDouble": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDoublePrecision": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-InsertFind.json new file mode 100644 index 0000000000..616101b4d4 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-InsertFind.json @@ -0,0 +1,577 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Update.json new file mode 100644 index 0000000000..300202e227 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Update.json @@ -0,0 +1,612 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$numberDouble": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDoublePrecision": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Int-Aggregate.json new file mode 100644 index 0000000000..536415f3fe --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Int-Aggregate.json @@ -0,0 +1,490 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Correctness.json b/test/client-side-encryption/spec/legacy/fle2-Range-Int-Correctness.json new file mode 100644 index 0000000000..6abd773da8 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Int-Correctness.json @@ -0,0 +1,1644 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "1" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lt": { + "$numberInt": "1" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lte": { + "$numberInt": "1" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lt": { + "$numberInt": "0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "200" + } + } + } + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + }, + "$lt": { + "$numberInt": "2" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + }, + "$lte": { + "$numberInt": "200" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$in": [ + { + "$numberInt": "0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedInt": { + "$numberInt": "200" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 200, + "encryptedInt": { + "$numberInt": "200" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "1" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lt": { + "$numberInt": "1" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lte": { + "$numberInt": "1" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lt": { + "$numberInt": "0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "200" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + }, + "$lt": { + "$numberInt": "2" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$numberInt": "0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$numberInt": "1" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + }, + "$lte": { + "$numberInt": "200" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$in": [ + { + "$numberInt": "0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Range-Int-Delete.json new file mode 100644 index 0000000000..9d5bff1d19 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Int-Delete.json @@ -0,0 +1,437 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedInt": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Int-FindOneAndUpdate.json new file mode 100644 index 0000000000..4bf57700c9 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Int-FindOneAndUpdate.json @@ -0,0 +1,512 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$numberInt": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedInt": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2-Range-Int-InsertFind.json new file mode 100644 index 0000000000..6f6022e749 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Int-InsertFind.json @@ -0,0 +1,481 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Update.json b/test/client-side-encryption/spec/legacy/fle2-Range-Int-Update.json new file mode 100644 index 0000000000..17d23b957f --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Int-Update.json @@ -0,0 +1,516 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$numberInt": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedInt": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedInt": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Long-Aggregate.json new file mode 100644 index 0000000000..3f1c723bd2 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Long-Aggregate.json @@ -0,0 +1,490 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Correctness.json b/test/client-side-encryption/spec/legacy/fle2-Range-Long-Correctness.json new file mode 100644 index 0000000000..972388c6c4 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Long-Correctness.json @@ -0,0 +1,1644 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "1" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lt": { + "$numberLong": "1" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lte": { + "$numberLong": "1" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lt": { + "$numberLong": "0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "200" + } + } + } + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + }, + "$lt": { + "$numberLong": "2" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + }, + "$lte": { + "$numberLong": "200" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$in": [ + { + "$numberLong": "0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedLong": { + "$numberLong": "200" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 200, + "encryptedLong": { + "$numberLong": "200" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "1" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lt": { + "$numberLong": "1" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lte": { + "$numberLong": "1" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lt": { + "$numberLong": "0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "200" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + }, + "$lt": { + "$numberLong": "2" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$numberLong": "0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$numberLong": "1" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + }, + "$lte": { + "$numberLong": "200" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$in": [ + { + "$numberLong": "0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Range-Long-Delete.json new file mode 100644 index 0000000000..89e1898406 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Long-Delete.json @@ -0,0 +1,437 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedLong": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Long-FindOneAndUpdate.json new file mode 100644 index 0000000000..59342a343a --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Long-FindOneAndUpdate.json @@ -0,0 +1,512 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$numberLong": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedLong": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2-Range-Long-InsertFind.json new file mode 100644 index 0000000000..882e52170d --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Long-InsertFind.json @@ -0,0 +1,481 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Update.json b/test/client-side-encryption/spec/legacy/fle2-Range-Long-Update.json new file mode 100644 index 0000000000..92e3e390a5 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Long-Update.json @@ -0,0 +1,516 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$numberLong": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedLong": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedLong": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-WrongType.json b/test/client-side-encryption/spec/legacy/fle2-Range-WrongType.json new file mode 100644 index 0000000000..9eddf1c99c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-WrongType.json @@ -0,0 +1,162 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberDouble": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Update.json b/test/client-side-encryption/spec/legacy/fle2-Update.json index 87830af32d..090f44f9ac 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2-Update.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json index fab36f75a1..e70ca7c72d 100644 --- a/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json +++ b/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], From a3720d9ceaa699e196fc1624f4c62515c2e5410d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 25 Jan 2023 09:41:23 -0600 Subject: [PATCH 0326/1588] PYTHON-3568 Intellisense highlights multiple PyMongo methods because of CodecOptions (#1139) --- .github/workflows/test-python.yml | 13 ++++++++- bson/__init__.py | 34 +++++++++++------------- bson/codec_options.pyi | 10 +++---- bson/typings.py | 30 +++++++++++++++++++++ doc/examples/type_hints.rst | 4 +-- mypy.ini | 2 +- pymongo/collection.py | 7 ++--- pymongo/database.py | 17 ++++++------ pymongo/message.py | 4 +-- pymongo/mongo_client.py | 17 ++++++++---- pymongo/typings.py | 30 ++++++++++----------- test/test_database.py | 4 +-- test/{test_mypy.py => test_typing.py} | 6 +++-- test/test_typing_strict.py | 38 +++++++++++++++++++++++++++ 14 files changed, 150 insertions(+), 66 deletions(-) create mode 100644 bson/typings.py rename test/{test_mypy.py => test_typing.py} (98%) create mode 100644 test/test_typing_strict.py diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 414eef7a1b..8dad68ab20 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -67,7 +67,18 @@ jobs: mypy --install-types --non-interactive bson/codec_options.py mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test python -m pip install -U typing_extensions - mypy --install-types --non-interactive test/test_mypy.py + mypy --install-types --non-interactive test/test_typing.py test/test_typing_strict.py + - name: Run mypy strict + run: | + mypy --strict test/test_typing_strict.py + - name: Run pyright + run: | + python -m pip install -U pip pyright==1.1.290 + pyright test/test_typing.py test/test_typing_strict.py + - name: Run pyright strict + run: | + echo '{"strict": ["tests/test_typing_strict.py"]}' >> pyrightconfig.json + pyright test/test_typing_strict.py linkcheck: name: Check Links diff --git a/bson/__init__.py b/bson/__init__.py index c6a81d97ec..2fe4aa173e 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -101,7 +101,6 @@ DEFAULT_CODEC_OPTIONS, CodecOptions, DatetimeConversion, - _DocumentType, _raw_document_class, ) from bson.datetime_ms import ( @@ -125,8 +124,7 @@ # Import some modules for type-checking only. if TYPE_CHECKING: - from array import array - from mmap import mmap + from bson.typings import _DocumentIn, _DocumentType, _ReadableBuffer try: from bson import _cbson # type: ignore[attr-defined] @@ -986,12 +984,8 @@ def _dict_to_bson(doc: Any, check_keys: bool, opts: CodecOptions, top_level: boo _CODEC_OPTIONS_TYPE_ERROR = TypeError("codec_options must be an instance of CodecOptions") -_DocumentIn = Mapping[str, Any] -_ReadableBuffer = Union[bytes, memoryview, "mmap", "array"] - - def encode( - document: _DocumentIn, + document: "_DocumentIn", check_keys: bool = False, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS, ) -> bytes: @@ -1022,8 +1016,8 @@ def encode( def decode( - data: _ReadableBuffer, codec_options: "Optional[CodecOptions[_DocumentType]]" = None -) -> _DocumentType: + data: "_ReadableBuffer", codec_options: "Optional[CodecOptions[_DocumentType]]" = None +) -> "_DocumentType": """Decode BSON to a document. By default, returns a BSON document represented as a Python @@ -1056,11 +1050,13 @@ def decode( return _bson_to_dict(data, opts) -def _decode_all(data: _ReadableBuffer, opts: "CodecOptions[_DocumentType]") -> List[_DocumentType]: +def _decode_all( + data: "_ReadableBuffer", opts: "CodecOptions[_DocumentType]" +) -> "List[_DocumentType]": """Decode a BSON data to multiple documents.""" data, view = get_data_and_view(data) data_len = len(data) - docs: List[_DocumentType] = [] + docs: "List[_DocumentType]" = [] position = 0 end = data_len - 1 use_raw = _raw_document_class(opts.document_class) @@ -1091,8 +1087,8 @@ def _decode_all(data: _ReadableBuffer, opts: "CodecOptions[_DocumentType]") -> L def decode_all( - data: _ReadableBuffer, codec_options: "Optional[CodecOptions[_DocumentType]]" = None -) -> List[_DocumentType]: + data: "_ReadableBuffer", codec_options: "Optional[CodecOptions[_DocumentType]]" = None +) -> "List[_DocumentType]": """Decode BSON data to multiple documents. `data` must be a bytes-like object implementing the buffer protocol that @@ -1213,7 +1209,7 @@ def _decode_all_selective(data: Any, codec_options: CodecOptions, fields: Any) - # Decode documents for internal use. from bson.raw_bson import RawBSONDocument - internal_codec_options = codec_options.with_options( + internal_codec_options: CodecOptions[RawBSONDocument] = codec_options.with_options( document_class=RawBSONDocument, type_registry=None ) _doc = _bson_to_dict(data, internal_codec_options) @@ -1228,7 +1224,7 @@ def _decode_all_selective(data: Any, codec_options: CodecOptions, fields: Any) - def decode_iter( data: bytes, codec_options: "Optional[CodecOptions[_DocumentType]]" = None -) -> Iterator[_DocumentType]: +) -> "Iterator[_DocumentType]": """Decode BSON data to multiple documents as a generator. Works similarly to the decode_all function, but yields one document at a @@ -1264,7 +1260,7 @@ def decode_iter( def decode_file_iter( file_obj: Union[BinaryIO, IO], codec_options: "Optional[CodecOptions[_DocumentType]]" = None -) -> Iterator[_DocumentType]: +) -> "Iterator[_DocumentType]": """Decode bson data from a file to multiple documents as a generator. Works similarly to the decode_all function, but reads from the file object @@ -1325,7 +1321,7 @@ class BSON(bytes): @classmethod def encode( cls: Type["BSON"], - document: _DocumentIn, + document: "_DocumentIn", check_keys: bool = False, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS, ) -> "BSON": @@ -1352,7 +1348,7 @@ def encode( """ return cls(encode(document, check_keys, codec_options)) - def decode(self, codec_options: "CodecOptions[_DocumentType]" = DEFAULT_CODEC_OPTIONS) -> _DocumentType: # type: ignore[override,assignment] + def decode(self, codec_options: "CodecOptions[_DocumentType]" = DEFAULT_CODEC_OPTIONS) -> "_DocumentType": # type: ignore[override,assignment] """Decode this BSON data. By default, returns a BSON document represented as a Python diff --git a/bson/codec_options.pyi b/bson/codec_options.pyi index 2424516f08..8242bd4cb2 100644 --- a/bson/codec_options.pyi +++ b/bson/codec_options.pyi @@ -22,7 +22,8 @@ you get the error: "TypeError: 'type' object is not subscriptable". import datetime import abc import enum -from typing import Tuple, Generic, Optional, Mapping, Any, TypeVar, Type, Dict, Iterable, Tuple, MutableMapping, Callable, Union +from typing import Tuple, Generic, Optional, Mapping, Any, Type, Dict, Iterable, Tuple, Callable, Union +from bson.typings import _DocumentType, _DocumentTypeArg class TypeEncoder(abc.ABC, metaclass=abc.ABCMeta): @@ -52,9 +53,6 @@ class TypeRegistry: def __init__(self, type_codecs: Optional[Iterable[Codec]] = ..., fallback_encoder: Optional[Fallback] = ...) -> None: ... def __eq__(self, other: Any) -> Any: ... - -_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) - class DatetimeConversion(int, enum.Enum): DATETIME = ... DATETIME_CLAMP = ... @@ -82,7 +80,7 @@ class CodecOptions(Tuple, Generic[_DocumentType]): ) -> CodecOptions[_DocumentType]: ... # CodecOptions API - def with_options(self, **kwargs: Any) -> CodecOptions[_DocumentType]: ... + def with_options(self, **kwargs: Any) -> CodecOptions[_DocumentTypeArg]: ... def _arguments_repr(self) -> str: ... @@ -100,7 +98,7 @@ class CodecOptions(Tuple, Generic[_DocumentType]): _fields: Tuple[str] -DEFAULT_CODEC_OPTIONS: CodecOptions[MutableMapping[str, Any]] +DEFAULT_CODEC_OPTIONS: "CodecOptions[Mapping[str, Any]]" _RAW_BSON_DOCUMENT_MARKER: int def _raw_document_class(document_class: Any) -> bool: ... diff --git a/bson/typings.py b/bson/typings.py new file mode 100644 index 0000000000..14a8131f69 --- /dev/null +++ b/bson/typings.py @@ -0,0 +1,30 @@ +# Copyright 2023-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Type aliases used by bson""" +from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, TypeVar, Union + +if TYPE_CHECKING: + from array import array + from mmap import mmap + + from bson.raw_bson import RawBSONDocument + + +# Common Shared Types. +_DocumentIn = Union[MutableMapping[str, Any], "RawBSONDocument"] +_DocumentOut = _DocumentIn +_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) +_DocumentTypeArg = TypeVar("_DocumentTypeArg", bound=Mapping[str, Any]) +_ReadableBuffer = Union[bytes, memoryview, "mmap", "array"] diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst index b413ad7b24..e5ad3338e1 100644 --- a/doc/examples/type_hints.rst +++ b/doc/examples/type_hints.rst @@ -20,7 +20,7 @@ type of document object returned when decoding BSON documents. Due to `limitations in mypy`_, the default values for generic document types are not yet provided (they will eventually be ``Dict[str, any]``). -For a larger set of examples that use types, see the PyMongo `test_mypy module`_. +For a larger set of examples that use types, see the PyMongo `test_typing module`_. If you would like to opt out of using the provided types, add the following to your `mypy config`_: :: @@ -326,5 +326,5 @@ Another example is trying to set a value on a :class:`~bson.raw_bson.RawBSONDocu .. _mypy: https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html .. _limitations in mypy: https://github.com/python/mypy/issues/3737 .. _mypy config: https://mypy.readthedocs.io/en/stable/config_file.html -.. _test_mypy module: https://github.com/mongodb/mongo-python-driver/blob/master/test/test_mypy.py +.. _test_typing module: https://github.com/mongodb/mongo-python-driver/blob/master/test/test_typing.py .. _schema validation: https://www.mongodb.com/docs/manual/core/schema-validation/#when-to-use-schema-validation diff --git a/mypy.ini b/mypy.ini index 2562177ab1..d0e6ab5ff9 100644 --- a/mypy.ini +++ b/mypy.ini @@ -32,7 +32,7 @@ ignore_missing_imports = True [mypy-snappy.*] ignore_missing_imports = True -[mypy-test.test_mypy] +[mypy-test.test_typing] warn_unused_ignores = True [mypy-winkerberos.*] diff --git a/pymongo/collection.py b/pymongo/collection.py index 77f154f5e7..4cb3fa79c9 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -72,7 +72,7 @@ InsertOneResult, UpdateResult, ) -from pymongo.typings import _CollationIn, _DocumentType, _Pipeline +from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline from pymongo.write_concern import WriteConcern _FIND_AND_MODIFY_DOC_FIELDS = {"value": 1} @@ -103,6 +103,7 @@ class ReturnDocument(object): if TYPE_CHECKING: + import bson from pymongo.client_session import ClientSession from pymongo.database import Database from pymongo.read_concern import ReadConcern @@ -116,7 +117,7 @@ def __init__( database: "Database[_DocumentType]", name: str, create: Optional[bool] = False, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional["CodecOptions[_DocumentTypeArg]"] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, @@ -394,7 +395,7 @@ def database(self) -> "Database[_DocumentType]": def with_options( self, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, diff --git a/pymongo/database.py b/pymongo/database.py index 259c22d558..86754b2c05 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -29,7 +29,7 @@ cast, ) -from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions +from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.dbref import DBRef from bson.son import SON from bson.timestamp import Timestamp @@ -41,7 +41,7 @@ from pymongo.common import _ecc_coll_name, _ecoc_coll_name, _esc_coll_name from pymongo.errors import CollectionInvalid, InvalidName from pymongo.read_preferences import ReadPreference, _ServerMode -from pymongo.typings import _CollationIn, _DocumentType, _Pipeline +from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline def _check_name(name): @@ -55,6 +55,7 @@ def _check_name(name): if TYPE_CHECKING: + import bson import bson.codec_options from pymongo.client_session import ClientSession from pymongo.mongo_client import MongoClient @@ -72,7 +73,7 @@ def __init__( self, client: "MongoClient[_DocumentType]", name: str, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, @@ -152,7 +153,7 @@ def name(self) -> str: def with_options( self, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, @@ -239,7 +240,7 @@ def __getitem__(self, name: str) -> "Collection[_DocumentType]": def get_collection( self, name: str, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, @@ -295,7 +296,7 @@ def get_collection( def create_collection( self, name: str, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, @@ -976,7 +977,7 @@ def _drop_helper(self, name, session=None, comment=None): @_csot.apply def drop_collection( self, - name_or_collection: Union[str, Collection], + name_or_collection: Union[str, Collection[_DocumentTypeArg]], session: Optional["ClientSession"] = None, comment: Optional[Any] = None, encrypted_fields: Optional[Mapping[str, Any]] = None, @@ -1068,7 +1069,7 @@ def drop_collection( def validate_collection( self, - name_or_collection: Union[str, Collection], + name_or_collection: Union[str, Collection[_DocumentTypeArg]], scandata: bool = False, full: bool = False, session: Optional["ClientSession"] = None, diff --git a/pymongo/message.py b/pymongo/message.py index 960832cb9e..9fa64a875a 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -24,7 +24,7 @@ import random import struct from io import BytesIO as _BytesIO -from typing import Any, Dict, NoReturn +from typing import Any, Mapping, NoReturn import bson from bson import CodecOptions, _decode_selective, _dict_to_bson, _make_c_string, encode @@ -81,7 +81,7 @@ } _FIELD_MAP = {"insert": "documents", "update": "updates", "delete": "deletes"} -_UNICODE_REPLACE_CODEC_OPTIONS: "CodecOptions[Dict[str, Any]]" = CodecOptions( +_UNICODE_REPLACE_CODEC_OPTIONS: "CodecOptions[Mapping[str, Any]]" = CodecOptions( unicode_decode_error_handler="replace" ) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index dccd4bb6b1..ab0c749889 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -53,7 +53,8 @@ cast, ) -from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry +import bson +from bson.codec_options import DEFAULT_CODEC_OPTIONS, TypeRegistry from bson.son import SON from bson.timestamp import Timestamp from pymongo import ( @@ -90,7 +91,13 @@ from pymongo.settings import TopologySettings from pymongo.topology import Topology, _ErrorContext from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription -from pymongo.typings import _Address, _CollationIn, _DocumentType, _Pipeline +from pymongo.typings import ( + _Address, + _CollationIn, + _DocumentType, + _DocumentTypeArg, + _Pipeline, +) from pymongo.uri_parser import ( _check_options, _handle_option_deprecations, @@ -1875,7 +1882,7 @@ def list_database_names( @_csot.apply def drop_database( self, - name_or_database: Union[str, database.Database], + name_or_database: Union[str, database.Database[_DocumentTypeArg]], session: Optional[client_session.ClientSession] = None, comment: Optional[Any] = None, ) -> None: @@ -1928,7 +1935,7 @@ def drop_database( def get_default_database( self, default: Optional[str] = None, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, @@ -1989,7 +1996,7 @@ def get_default_database( def get_database( self, name: Optional[str] = None, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, diff --git a/pymongo/typings.py b/pymongo/typings.py index fe0e8bd523..32cd980c97 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -13,30 +13,18 @@ # limitations under the License. """Type aliases used by PyMongo""" -from typing import ( - TYPE_CHECKING, - Any, - Mapping, - MutableMapping, - Optional, - Sequence, - Tuple, - TypeVar, - Union, -) +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, Tuple, Union + +from bson.typings import _DocumentIn, _DocumentOut, _DocumentType, _DocumentTypeArg if TYPE_CHECKING: - from bson.raw_bson import RawBSONDocument from pymongo.collation import Collation # Common Shared Types. _Address = Tuple[str, Optional[int]] _CollationIn = Union[Mapping[str, Any], "Collation"] -_DocumentIn = Union[MutableMapping[str, Any], "RawBSONDocument"] _Pipeline = Sequence[Mapping[str, Any]] -_DocumentOut = _DocumentIn -_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) def strip_optional(elem): @@ -44,3 +32,15 @@ def strip_optional(elem): while inside a list comprehension.""" assert elem is not None return elem + + +__all__ = [ + "_DocumentIn", + "_DocumentOut", + "_DocumentType", + "_DocumentTypeArg", + "_Address", + "_CollationIn", + "_Pipeline", + "strip_optional", +] diff --git a/test/test_database.py b/test/test_database.py index 53af4912e4..b6be380aab 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -435,7 +435,7 @@ def test_id_ordering(self): db.test.insert_one(SON([("hello", "world"), ("_id", 5)])) db = self.client.get_database( - "pymongo_test", codec_options=CodecOptions(document_class=SON) + "pymongo_test", codec_options=CodecOptions(document_class=SON[str, Any]) ) cursor = db.test.find() for x in cursor: @@ -469,7 +469,7 @@ def test_deref_kwargs(self): db.test.insert_one({"_id": 4, "foo": "bar"}) db = self.client.get_database( - "pymongo_test", codec_options=CodecOptions(document_class=SON) + "pymongo_test", codec_options=CodecOptions(document_class=SON[str, Any]) ) self.assertEqual( SON([("foo", "bar")]), db.dereference(DBRef("test", 4), projection={"_id": False}) diff --git a/test/test_mypy.py b/test/test_typing.py similarity index 98% rename from test/test_mypy.py rename to test/test_typing.py index 3b29bbf20e..8fc0f5a23e 100644 --- a/test/test_mypy.py +++ b/test/test_typing.py @@ -422,7 +422,8 @@ def test_typeddict_not_required_document_type(self) -> None: assert out is not None # This should fail because the output is a Movie. assert out["foo"] # type:ignore[typeddict-item] - assert out["_id"] + # pyright gives reportTypedDictNotRequiredAccess for the following: + assert out["_id"] # type:ignore @only_type_check def test_typeddict_empty_document_type(self) -> None: @@ -442,7 +443,8 @@ def test_typeddict_find_notrequired(self): coll.insert_one(ImplicitMovie(name="THX-1138", year=1971)) out = coll.find_one({}) assert out is not None - assert out["_id"] + # pyright gives reportTypedDictNotRequiredAccess for the following: + assert out["_id"] # type:ignore @only_type_check def test_raw_bson_document_type(self) -> None: diff --git a/test/test_typing_strict.py b/test/test_typing_strict.py new file mode 100644 index 0000000000..55cb1454bc --- /dev/null +++ b/test/test_typing_strict.py @@ -0,0 +1,38 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test typings in strict mode.""" +import unittest +from typing import TYPE_CHECKING, Any, Dict + +import pymongo +from pymongo.collection import Collection +from pymongo.database import Database + + +def test_generic_arguments() -> None: + """Ensure known usages of generic arguments pass strict typing""" + if not TYPE_CHECKING: + raise unittest.SkipTest("Used for Type Checking Only") + mongo_client: pymongo.MongoClient[Dict[str, Any]] = pymongo.MongoClient() + mongo_client.drop_database("foo") + mongo_client.get_default_database() + db = mongo_client.get_database("test_db") + db = Database(mongo_client, "test_db") + db.with_options() + db.validate_collection("py_test") + col = db.get_collection("py_test") + col.insert_one({"abc": 123}) + col = Collection(db, "py_test") + col.with_options() From 0b843b76f6a426b1a6840872cd2991d1aaa3f0e9 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 25 Jan 2023 14:09:15 -0800 Subject: [PATCH 0327/1588] BUMP 4.4.0b0 (#1144) --- doc/changelog.rst | 6 ++++++ doc/installation.rst | 2 +- pymongo/_version.py | 2 +- setup.py | 2 +- 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 6913f09fc3..6a6e6fef2d 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,6 +6,12 @@ Changes in Version 4.4 - Added support for passing a list containing (key, direction) pairs or keys to :meth:`~pymongo.collection.Collection.create_index`. +- **BETA** Added support for range queries on client side field level encrypted collections. +- pymongocrypt 1.5.0 or later is now required for client side field level + encryption support. +- Improved support for Pyright to improve typing support for IDEs like Visual Studio Code or Visual Studio. +- Improved support for type-checking with MyPy "strict" mode (`--strict`). +- Added support for Python 3.11. Issues Resolved ............... diff --git a/doc/installation.rst b/doc/installation.rst index 4810353f98..c4cbc78d93 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -197,4 +197,4 @@ but can be found on the `GitHub tags page `_. They can be installed by passing the full URL for the tag to pip:: - $ python3 -m pip install https://github.com/mongodb/mongo-python-driver/archive/3.11.0rc0.tar.gz + $ python3 -m pip install https://github.com/mongodb/mongo-python-driver/archive/4.4.0b0.tar.gz diff --git a/pymongo/_version.py b/pymongo/_version.py index 78c325a23c..71a59a0dee 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, ".dev0") +version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, "b0") def get_version_string() -> str: diff --git a/setup.py b/setup.py index 6d1a711708..4fa51fa314 100755 --- a/setup.py +++ b/setup.py @@ -281,7 +281,7 @@ def build_extension(self, ext): aws_reqs = ["pymongo-auth-aws<2.0.0"] extras_require = { - "encryption": ["pymongocrypt>=1.3.0,<2.0.0"] + aws_reqs, + "encryption": ["pymongocrypt>=1.5.0,<2.0.0"] + aws_reqs, "ocsp": pyopenssl_reqs, "snappy": ["python-snappy"], "zstd": ["zstandard"], From 4af7a076186a93f87a542051d19684ded0d00fe8 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 25 Jan 2023 14:13:17 -0800 Subject: [PATCH 0328/1588] BUMP 4.5.0.dev0 --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 71a59a0dee..db32b1ddb2 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, "b0") +version_tuple: Tuple[Union[int, str], ...] = (4, 5, 0, ".dev0") def get_version_string() -> str: From e353d5791b020a40a875cbf401a788c6699eb044 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 25 Jan 2023 15:29:54 -0800 Subject: [PATCH 0329/1588] BUMP 4.5.0.dev1 --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index db32b1ddb2..514dd7c366 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 5, 0, ".dev0") +version_tuple: Tuple[Union[int, str], ...] = (4, 5, 0, ".dev1") def get_version_string() -> str: From 06dd53666909e3a0cbbe58bc81a64bda33cd16b4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 27 Jan 2023 12:32:18 -0800 Subject: [PATCH 0330/1588] BUMP 4.4.0.dev1 --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 514dd7c366..a5885d8cc5 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 5, 0, ".dev1") +version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, ".dev1") def get_version_string() -> str: From b3099c62de61205b87d9578c5d2ed1bba9451eb8 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 30 Jan 2023 12:13:30 -0800 Subject: [PATCH 0331/1588] PYTHON-3558 Missing docs for JSONOptions (#1143) --- bson/codec_options.py | 2 +- bson/json_util.py | 112 +++++++++++++++++++++--------------------- 2 files changed, 58 insertions(+), 56 deletions(-) diff --git a/bson/codec_options.py b/bson/codec_options.py index 6f4fdaac8d..c09de8a931 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -330,7 +330,7 @@ def __init__(self, *args, **kwargs): retrieved from the server will be modified in the client application and stored back to the server. """ - return super().__init__() + super().__init__() def __new__( cls: Type["CodecOptions"], diff --git a/bson/json_util.py b/bson/json_util.py index 517adff4e0..0c2ca58283 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -205,66 +205,68 @@ class JSONMode: class JSONOptions(CodecOptions): - """Encapsulates JSON options for :func:`dumps` and :func:`loads`. - - :Parameters: - - `strict_number_long`: If ``True``, :class:`~bson.int64.Int64` objects - are encoded to MongoDB Extended JSON's *Strict mode* type - `NumberLong`, ie ``'{"$numberLong": "" }'``. Otherwise they - will be encoded as an `int`. Defaults to ``False``. - - `datetime_representation`: The representation to use when encoding - instances of :class:`datetime.datetime`. Defaults to - :const:`~DatetimeRepresentation.LEGACY`. - - `strict_uuid`: If ``True``, :class:`uuid.UUID` object are encoded to - MongoDB Extended JSON's *Strict mode* type `Binary`. Otherwise it - will be encoded as ``'{"$uuid": "" }'``. Defaults to ``False``. - - `json_mode`: The :class:`JSONMode` to use when encoding BSON types to - Extended JSON. Defaults to :const:`~JSONMode.LEGACY`. - - `document_class`: BSON documents returned by :func:`loads` will be - decoded to an instance of this class. Must be a subclass of - :class:`collections.MutableMapping`. Defaults to :class:`dict`. - - `uuid_representation`: The :class:`~bson.binary.UuidRepresentation` - to use when encoding and decoding instances of :class:`uuid.UUID`. - Defaults to :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. - - `tz_aware`: If ``True``, MongoDB Extended JSON's *Strict mode* type - `Date` will be decoded to timezone aware instances of - :class:`datetime.datetime`. Otherwise they will be naive. Defaults - to ``False``. - - `tzinfo`: A :class:`datetime.tzinfo` subclass that specifies the - timezone from which :class:`~datetime.datetime` objects should be - decoded. Defaults to :const:`~bson.tz_util.utc`. - - `datetime_conversion`: Specifies how UTC datetimes should be decoded - within BSON. Valid options include 'datetime_ms' to return as a - DatetimeMS, 'datetime' to return as a datetime.datetime and - raising a ValueError for out-of-range values, 'datetime_auto' to - return DatetimeMS objects when the underlying datetime is - out-of-range and 'datetime_clamp' to clamp to the minimum and - maximum possible datetimes. Defaults to 'datetime'. See - :ref:`handling-out-of-range-datetimes` for details. - - `args`: arguments to :class:`~bson.codec_options.CodecOptions` - - `kwargs`: arguments to :class:`~bson.codec_options.CodecOptions` - - .. seealso:: The specification for Relaxed and Canonical `Extended JSON`_. - - .. versionchanged:: 4.0 - The default for `json_mode` was changed from :const:`JSONMode.LEGACY` - to :const:`JSONMode.RELAXED`. - The default for `uuid_representation` was changed from - :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to - :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. - - .. versionchanged:: 3.5 - Accepts the optional parameter `json_mode`. - - .. versionchanged:: 4.0 - Changed default value of `tz_aware` to False. - """ - json_mode: int strict_number_long: bool datetime_representation: int strict_uuid: bool + def __init__(self, *args, **kwargs): + """Encapsulates JSON options for :func:`dumps` and :func:`loads`. + + :Parameters: + - `strict_number_long`: If ``True``, :class:`~bson.int64.Int64` objects + are encoded to MongoDB Extended JSON's *Strict mode* type + `NumberLong`, ie ``'{"$numberLong": "" }'``. Otherwise they + will be encoded as an `int`. Defaults to ``False``. + - `datetime_representation`: The representation to use when encoding + instances of :class:`datetime.datetime`. Defaults to + :const:`~DatetimeRepresentation.LEGACY`. + - `strict_uuid`: If ``True``, :class:`uuid.UUID` object are encoded to + MongoDB Extended JSON's *Strict mode* type `Binary`. Otherwise it + will be encoded as ``'{"$uuid": "" }'``. Defaults to ``False``. + - `json_mode`: The :class:`JSONMode` to use when encoding BSON types to + Extended JSON. Defaults to :const:`~JSONMode.LEGACY`. + - `document_class`: BSON documents returned by :func:`loads` will be + decoded to an instance of this class. Must be a subclass of + :class:`collections.MutableMapping`. Defaults to :class:`dict`. + - `uuid_representation`: The :class:`~bson.binary.UuidRepresentation` + to use when encoding and decoding instances of :class:`uuid.UUID`. + Defaults to :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + - `tz_aware`: If ``True``, MongoDB Extended JSON's *Strict mode* type + `Date` will be decoded to timezone aware instances of + :class:`datetime.datetime`. Otherwise they will be naive. Defaults + to ``False``. + - `tzinfo`: A :class:`datetime.tzinfo` subclass that specifies the + timezone from which :class:`~datetime.datetime` objects should be + decoded. Defaults to :const:`~bson.tz_util.utc`. + - `datetime_conversion`: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. See + :ref:`handling-out-of-range-datetimes` for details. + - `args`: arguments to :class:`~bson.codec_options.CodecOptions` + - `kwargs`: arguments to :class:`~bson.codec_options.CodecOptions` + + .. seealso:: The specification for Relaxed and Canonical `Extended JSON`_. + + .. versionchanged:: 4.0 + The default for `json_mode` was changed from :const:`JSONMode.LEGACY` + to :const:`JSONMode.RELAXED`. + The default for `uuid_representation` was changed from + :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to + :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + + .. versionchanged:: 3.5 + Accepts the optional parameter `json_mode`. + + .. versionchanged:: 4.0 + Changed default value of `tz_aware` to False. + """ + super().__init__() + def __new__( cls: Type["JSONOptions"], strict_number_long: Optional[bool] = None, From b492263826123a78513d94eec186e184eb97f421 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 31 Jan 2023 14:58:37 -0800 Subject: [PATCH 0332/1588] PYTHON-3357 Automatically create Queryable Encryption keys (#1145) --- pymongo/database.py | 56 ++++++----- pymongo/encryption.py | 106 +++++++++++++++++++- test/test_encryption.py | 214 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 348 insertions(+), 28 deletions(-) diff --git a/pymongo/database.py b/pymongo/database.py index 86754b2c05..b3c6c60851 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -13,6 +13,7 @@ # limitations under the License. """Database level operations.""" +from copy import deepcopy from typing import ( TYPE_CHECKING, Any, @@ -292,6 +293,28 @@ def get_collection( read_concern, ) + def _get_encrypted_fields(self, kwargs, coll_name, ask_db): + encrypted_fields = kwargs.get("encryptedFields") + if encrypted_fields: + return deepcopy(encrypted_fields) + if ( + self.client.options.auto_encryption_opts + and self.client.options.auto_encryption_opts._encrypted_fields_map + and self.client.options.auto_encryption_opts._encrypted_fields_map.get( + f"{self.name}.{coll_name}" + ) + ): + return deepcopy( + self.client.options.auto_encryption_opts._encrypted_fields_map[ + f"{self.name}.{coll_name}" + ] + ) + if ask_db and self.client.options.auto_encryption_opts: + options = self[coll_name].options() + if options.get("encryptedFields"): + return deepcopy(options["encryptedFields"]) + return None + @_csot.apply def create_collection( self, @@ -419,19 +442,10 @@ def create_collection( .. _create collection command: https://mongodb.com/docs/manual/reference/command/create """ - encrypted_fields = kwargs.get("encryptedFields") - if ( - not encrypted_fields - and self.client.options.auto_encryption_opts - and self.client.options.auto_encryption_opts._encrypted_fields_map - ): - encrypted_fields = self.client.options.auto_encryption_opts._encrypted_fields_map.get( - "%s.%s" % (self.name, name) - ) - kwargs["encryptedFields"] = encrypted_fields - + encrypted_fields = self._get_encrypted_fields(kwargs, name, False) if encrypted_fields: common.validate_is_mapping("encryptedFields", encrypted_fields) + kwargs["encryptedFields"] = encrypted_fields clustered_index = kwargs.get("clusteredIndex") if clustered_index: @@ -1038,21 +1052,11 @@ def drop_collection( if not isinstance(name, str): raise TypeError("name_or_collection must be an instance of str") - full_name = "%s.%s" % (self.name, name) - if ( - not encrypted_fields - and self.client.options.auto_encryption_opts - and self.client.options.auto_encryption_opts._encrypted_fields_map - ): - encrypted_fields = self.client.options.auto_encryption_opts._encrypted_fields_map.get( - full_name - ) - if not encrypted_fields and self.client.options.auto_encryption_opts: - colls = list( - self.list_collections(filter={"name": name}, session=session, comment=comment) - ) - if colls and colls[0]["options"].get("encryptedFields"): - encrypted_fields = colls[0]["options"]["encryptedFields"] + encrypted_fields = self._get_encrypted_fields( + {"encryptedFields": encrypted_fields}, + name, + True, + ) if encrypted_fields: common.validate_is_mapping("encrypted_fields", encrypted_fields) self._drop_helper( diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 8b51863f96..0e281f7b37 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -18,7 +18,8 @@ import enum import socket import weakref -from typing import Any, Generic, Mapping, Optional, Sequence +from copy import deepcopy +from typing import Any, Generic, Mapping, Optional, Sequence, Tuple try: from pymongocrypt.auto_encrypter import AutoEncrypter @@ -39,8 +40,10 @@ from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson from bson.son import SON from pymongo import _csot +from pymongo.collection import Collection from pymongo.cursor import Cursor from pymongo.daemon import _spawn_daemon +from pymongo.database import Database from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts from pymongo.errors import ( ConfigurationError, @@ -552,6 +555,107 @@ def __init__( # Use the same key vault collection as the callback. self._key_vault_coll = self._io_callbacks.key_vault_coll + def create_encrypted_collection( + self, + database: Database, + name: str, + encrypted_fields: Mapping[str, Any], + kms_provider: Optional[str] = None, + master_key: Optional[Mapping[str, Any]] = None, + key_alt_names: Optional[Sequence[str]] = None, + key_material: Optional[bytes] = None, + **kwargs: Any, + ) -> Tuple[Collection[_DocumentType], Mapping[str, Any]]: + """Create a collection with encryptedFields. + + .. warning:: + This function does not update the encryptedFieldsMap in the client's + AutoEncryptionOpts, thus the user must create a new client after calling this function with + the encryptedFields returned. + + Normally collection creation is automatic. This method should + only be used to specify options on + creation. :class:`~pymongo.errors.EncryptionError` will be + raised if the collection already exists. + + :Parameters: + - `name`: the name of the collection to create + - `encrypted_fields` (dict): **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + + The "keyId" may be set to ``None`` to auto-generate the data keys. + - `kms_provider` (optional): the KMS provider to be used + - `master_key` (optional): Identifies a KMS-specific key used to encrypt the + new data key. If the kmsProvider is "local" the `master_key` is + not applicable and may be omitted. + - `key_alt_names` (optional): An optional list of string alternate + names used to reference a key. If a key is created with alternate + names, then encryption may refer to the key by the unique alternate + name instead of by ``key_id``. + - `key_material` (optional): Sets the custom key material to be used + by the data key for encryption and decryption. + - `**kwargs` (optional): additional keyword arguments are the same as "create_collection". + + All optional `create collection command`_ parameters should be passed + as keyword arguments to this method. + See the documentation for :meth:`~pymongo.database.Database.create_collection` for all valid options. + + .. versionadded:: 4.4 + + .. _create collection command: + https://mongodb.com/docs/manual/reference/command/create + + """ + encrypted_fields = deepcopy(encrypted_fields) + for i, field in enumerate(encrypted_fields["fields"]): + if isinstance(field, dict) and field.get("keyId") is None: + try: + encrypted_fields["fields"][i]["keyId"] = self.create_data_key( + kms_provider=kms_provider, # type:ignore[arg-type] + master_key=master_key, + key_alt_names=key_alt_names, + key_material=key_material, + ) + except EncryptionError as exc: + raise EncryptionError( + Exception( + "Error occurred while creating data key for field %s with encryptedFields=%s" + % (field["path"], encrypted_fields) + ) + ) from exc + kwargs["encryptedFields"] = encrypted_fields + kwargs["check_exists"] = False + try: + return ( + database.create_collection(name=name, **kwargs), + encrypted_fields, + ) + except Exception as exc: + raise EncryptionError( + Exception( + f"Error: {str(exc)} occurred while creating collection with encryptedFields={str(encrypted_fields)}" + ) + ) from exc + def create_data_key( self, kms_provider: str, diff --git a/test/test_encryption.py b/test/test_encryption.py index fc6d62c727..0df875d956 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -65,7 +65,7 @@ from bson.errors import BSONError from bson.json_util import JSONOptions from bson.son import SON -from pymongo import encryption +from pymongo import ReadPreference, encryption from pymongo.cursor import CursorType from pymongo.encryption import Algorithm, ClientEncryption, QueryType from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts, RangeOpts @@ -2687,5 +2687,217 @@ def test_int(self): self.run_test_cases("Int", RangeOpts(min=0, max=200, sparsity=1), int) +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#automatic-data-encryption-keys +class TestAutomaticDecryptionKeys(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(6, 0, -1) + def setUp(self): + super().setUp() + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + self.client.drop_database(self.db) + self.key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(self.key_vault.drop) + self.client_encryption = ClientEncryption( + {"local": {"key": LOCAL_MASTER_KEY}}, + self.key_vault.full_name, + self.client, + OPTS, + ) + self.addCleanup(self.client_encryption.close) + + def test_01_simple_create(self): + coll, _ = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + kms_provider="local", + ) + with self.assertRaises(WriteError) as exc: + coll.insert_one({"ssn": "123-45-6789"}) + self.assertEqual(exc.exception.code, 121) + + def test_02_no_fields(self): + with self.assertRaisesRegex( + TypeError, + "create_encrypted_collection.* missing 1 required positional argument: 'encrypted_fields'", + ): + self.client_encryption.create_encrypted_collection( # type:ignore[call-arg] + database=self.db, + name="testing1", + ) + + def test_03_invalid_keyid(self): + with self.assertRaisesRegex( + EncryptionError, + "create.encryptedFields.fields.keyId' is the wrong type 'bool', expected type 'binData", + ): + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [{"path": "ssn", "bsonType": "string", "keyId": False}] + }, + kms_provider="local", + ) + + def test_04_insert_encrypted(self): + coll, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + kms_provider="local", + ) + key1_id = ef["fields"][0]["keyId"] + encrypted_value = self.client_encryption.encrypt( + "123-45-6789", + key_id=key1_id, + algorithm=Algorithm.UNINDEXED, + ) + coll.insert_one({"ssn": encrypted_value}) + + def test_copy_encrypted_fields(self): + encrypted_fields = { + "fields": [ + { + "path": "ssn", + "bsonType": "string", + "keyId": None, + } + ] + } + _, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + kms_provider="local", + encrypted_fields=encrypted_fields, + ) + self.assertIsNotNone(ef["fields"][0]["keyId"]) + self.assertIsNone(encrypted_fields["fields"][0]["keyId"]) + + def test_options_forward(self): + coll, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + kms_provider="local", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + read_preference=ReadPreference.NEAREST, + ) + self.assertEqual(coll.read_preference, ReadPreference.NEAREST) + self.assertEqual(coll.name, "testing1") + + def test_mixed_null_keyids(self): + key = self.client_encryption.create_data_key(kms_provider="local") + coll, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [ + {"path": "ssn", "bsonType": "string", "keyId": None}, + {"path": "dob", "bsonType": "string", "keyId": key}, + {"path": "secrets", "bsonType": "string"}, + {"path": "address", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + ) + encrypted_values = [ + self.client_encryption.encrypt( + val, + key_id=key, + algorithm=Algorithm.UNINDEXED, + ) + for val, key in zip( + ["123-45-6789", "11/22/1963", "My secret", "New Mexico, 87104"], + [field["keyId"] for field in ef["fields"]], + ) + ] + coll.insert_one( + { + "ssn": encrypted_values[0], + "dob": encrypted_values[1], + "secrets": encrypted_values[2], + "address": encrypted_values[3], + } + ) + + def test_create_datakey_fails(self): + key = self.client_encryption.create_data_key(kms_provider="local") + # Make sure the error message includes the previous keys in the error message even when generating keys fails. + with self.assertRaisesRegex( + EncryptionError, + f"data key for field ssn with encryptedFields=.*{re.escape(repr(key))}.*keyId.*Binary.*keyId.*None", + ): + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + # Because this is the second one to use the altName "1", it will fail when creating the data_key. + {"path": "ssn", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + key_alt_names=["1"], + ) + + def test_create_failure(self): + key = self.client_encryption.create_data_key(kms_provider="local") + # Make sure the error message includes the previous keys in the error message even when it is the creation + # of the collection that fails. + with self.assertRaisesRegex( + EncryptionError, + f"while creating collection with encryptedFields=.*{re.escape(repr(key))}.*keyId.*Binary", + ): + self.client_encryption.create_encrypted_collection( + database=self.db, + name=1, # type:ignore[arg-type] + encrypted_fields={ + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + ) + + def test_collection_name_collision(self): + encrypted_fields = { + "fields": [ + {"path": "address", "bsonType": "string", "keyId": None}, + ] + } + self.db.create_collection("testing1") + with self.assertRaisesRegex( + EncryptionError, + "while creating collection with encryptedFields=.*keyId.*Binary", + ): + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + self.db.drop_collection("testing1", encrypted_fields=encrypted_fields) + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + with self.assertRaisesRegex( + EncryptionError, + "while creating collection with encryptedFields=.*keyId.*Binary", + ): + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + + if __name__ == "__main__": unittest.main() From 540562a60630a57d3eb0c06358b19d3882a5de18 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 31 Jan 2023 15:22:28 -0800 Subject: [PATCH 0333/1588] PYTHON-3577 Fix test_aggregate_out on 4.0 replica set (#1146) --- test/test_load_balancer.py | 2 ++ test/test_read_concern.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index 378ae33e03..728b4e567f 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -122,6 +122,8 @@ def test_session_gc(self): session = client.start_session() session.start_transaction() client.test_session_gc.test.find_one({}, session=session) + # Cleanup the transaction left open on the server. + self.addCleanup(self.client.admin.command, "killSessions", [session.session_id]) if client_context.load_balancer: self.assertEqual(pool.active_sockets, 1) # Pinned. diff --git a/test/test_read_concern.py b/test/test_read_concern.py index 3a1c8f3a54..2230f2bef2 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -20,7 +20,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context -from test.utils import OvertCommandListener, rs_or_single_client, single_client +from test.utils import OvertCommandListener, rs_or_single_client from bson.son import SON from pymongo.errors import OperationFailure @@ -35,7 +35,7 @@ class TestReadConcern(IntegrationTest): def setUpClass(cls): super(TestReadConcern, cls).setUpClass() cls.listener = OvertCommandListener() - cls.client = single_client(event_listeners=[cls.listener]) + cls.client = rs_or_single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test client_context.client.pymongo_test.create_collection("coll") From 79ccf4e2874c7ed73fb17fa880806bc75dc1b8de Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 3 Feb 2023 21:10:30 -0800 Subject: [PATCH 0334/1588] PYTHON-3589 createEncryptedCollection should not accept keyAltNames (#1147) --- pymongo/encryption.py | 13 +++---------- test/test_encryption.py | 7 ++----- 2 files changed, 5 insertions(+), 15 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 0e281f7b37..cf76cbe146 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -562,12 +562,13 @@ def create_encrypted_collection( encrypted_fields: Mapping[str, Any], kms_provider: Optional[str] = None, master_key: Optional[Mapping[str, Any]] = None, - key_alt_names: Optional[Sequence[str]] = None, - key_material: Optional[bytes] = None, **kwargs: Any, ) -> Tuple[Collection[_DocumentType], Mapping[str, Any]]: """Create a collection with encryptedFields. + .. note:: Support for Queryable Encryption is in beta. + Backwards-breaking changes may be made before the final release. + .. warning:: This function does not update the encryptedFieldsMap in the client's AutoEncryptionOpts, thus the user must create a new client after calling this function with @@ -607,12 +608,6 @@ def create_encrypted_collection( - `master_key` (optional): Identifies a KMS-specific key used to encrypt the new data key. If the kmsProvider is "local" the `master_key` is not applicable and may be omitted. - - `key_alt_names` (optional): An optional list of string alternate - names used to reference a key. If a key is created with alternate - names, then encryption may refer to the key by the unique alternate - name instead of by ``key_id``. - - `key_material` (optional): Sets the custom key material to be used - by the data key for encryption and decryption. - `**kwargs` (optional): additional keyword arguments are the same as "create_collection". All optional `create collection command`_ parameters should be passed @@ -632,8 +627,6 @@ def create_encrypted_collection( encrypted_fields["fields"][i]["keyId"] = self.create_data_key( kms_provider=kms_provider, # type:ignore[arg-type] master_key=master_key, - key_alt_names=key_alt_names, - key_material=key_material, ) except EncryptionError as exc: raise EncryptionError( diff --git a/test/test_encryption.py b/test/test_encryption.py index 0df875d956..eb9bf8e984 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2826,7 +2826,7 @@ def test_create_datakey_fails(self): # Make sure the error message includes the previous keys in the error message even when generating keys fails. with self.assertRaisesRegex( EncryptionError, - f"data key for field ssn with encryptedFields=.*{re.escape(repr(key))}.*keyId.*Binary.*keyId.*None", + f"data key for field dob with encryptedFields=.*{re.escape(repr(key))}.*keyId.*None", ): self.client_encryption.create_encrypted_collection( database=self.db, @@ -2835,12 +2835,9 @@ def test_create_datakey_fails(self): "fields": [ {"path": "address", "bsonType": "string", "keyId": key}, {"path": "dob", "bsonType": "string", "keyId": None}, - # Because this is the second one to use the altName "1", it will fail when creating the data_key. - {"path": "ssn", "bsonType": "string", "keyId": None}, ] }, - kms_provider="local", - key_alt_names=["1"], + kms_provider="does not exist", ) def test_create_failure(self): From dcbba962dd480f67a54eaefdb7018ae164cd46da Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Feb 2023 07:35:19 -0800 Subject: [PATCH 0335/1588] PYTHON-3596 Guarantee a document update in retryable writes tests (#1149) --- test/test_retryable_writes.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index a22c776534..1e978f21be 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -128,23 +128,23 @@ def retryable_single_statement_ops(coll): return [ (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {}), (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {"ordered": False}), - (coll.bulk_write, [[ReplaceOne({}, {})]], {}), - (coll.bulk_write, [[ReplaceOne({}, {}), ReplaceOne({}, {})]], {}), + (coll.bulk_write, [[ReplaceOne({}, {"a1": 1})]], {}), + (coll.bulk_write, [[ReplaceOne({}, {"a2": 1}), ReplaceOne({}, {"a3": 1})]], {}), ( coll.bulk_write, - [[UpdateOne({}, {"$set": {"a": 1}}), UpdateOne({}, {"$set": {"a": 1}})]], + [[UpdateOne({}, {"$set": {"a4": 1}}), UpdateOne({}, {"$set": {"a5": 1}})]], {}, ), (coll.bulk_write, [[DeleteOne({})]], {}), (coll.bulk_write, [[DeleteOne({}), DeleteOne({})]], {}), (coll.insert_one, [{}], {}), (coll.insert_many, [[{}, {}]], {}), - (coll.replace_one, [{}, {}], {}), - (coll.update_one, [{}, {"$set": {"a": 1}}], {}), + (coll.replace_one, [{}, {"a6": 1}], {}), + (coll.update_one, [{}, {"$set": {"a7": 1}}], {}), (coll.delete_one, [{}], {}), - (coll.find_one_and_replace, [{}, {"a": 3}], {}), - (coll.find_one_and_update, [{}, {"$set": {"a": 1}}], {}), - (coll.find_one_and_delete, [{}, {}], {}), + (coll.find_one_and_replace, [{}, {"a8": 1}], {}), + (coll.find_one_and_update, [{}, {"$set": {"a9": 1}}], {}), + (coll.find_one_and_delete, [{}, {"a10": 1}], {}), ] @@ -490,6 +490,7 @@ def setUpClass(cls): } @client_context.require_version_min(4, 0) + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) def test_RetryableWriteError_error_label(self): listener = OvertCommandListener() client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) From 2e6e9a85070a766c2eb53351dabdc85956367f1f Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 7 Feb 2023 10:23:59 -0800 Subject: [PATCH 0336/1588] PYTHON-3592 createEncryptedCollection should raise a specialized exception to report the intermediate encryptedFields (#1148) --- pymongo/encryption.py | 17 +++++-------- pymongo/errors.py | 25 ++++++++++++++++++ test/test_encryption.py | 56 ++++++++++++++++++++++------------------- 3 files changed, 61 insertions(+), 37 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index cf76cbe146..6a6150d0c0 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -47,6 +47,7 @@ from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts from pymongo.errors import ( ConfigurationError, + EncryptedCollectionError, EncryptionError, InvalidOperation, ServerSelectionTimeoutError, @@ -614,6 +615,9 @@ def create_encrypted_collection( as keyword arguments to this method. See the documentation for :meth:`~pymongo.database.Database.create_collection` for all valid options. + :Raises: + - :class:`~pymongo.errors.EncryptedCollectionError`: When either data-key creation or creating the collection fails. + .. versionadded:: 4.4 .. _create collection command: @@ -629,12 +633,7 @@ def create_encrypted_collection( master_key=master_key, ) except EncryptionError as exc: - raise EncryptionError( - Exception( - "Error occurred while creating data key for field %s with encryptedFields=%s" - % (field["path"], encrypted_fields) - ) - ) from exc + raise EncryptedCollectionError(exc, encrypted_fields) from exc kwargs["encryptedFields"] = encrypted_fields kwargs["check_exists"] = False try: @@ -643,11 +642,7 @@ def create_encrypted_collection( encrypted_fields, ) except Exception as exc: - raise EncryptionError( - Exception( - f"Error: {str(exc)} occurred while creating collection with encryptedFields={str(encrypted_fields)}" - ) - ) from exc + raise EncryptedCollectionError(exc, encrypted_fields) from exc def create_data_key( self, diff --git a/pymongo/errors.py b/pymongo/errors.py index efc7e2eca0..192eec99d9 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -359,6 +359,31 @@ def timeout(self) -> bool: return False +class EncryptedCollectionError(EncryptionError): + """Raised when creating a collection with encrypted_fields fails. + + .. note:: EncryptedCollectionError and `create_encrypted_collection` are both part of the + Queryable Encryption beta. Backwards-breaking changes may be made before the final release. + + .. versionadded:: 4.4 + """ + + def __init__(self, cause: Exception, encrypted_fields: Mapping[str, Any]) -> None: + super(EncryptedCollectionError, self).__init__(cause) + self.__encrypted_fields = encrypted_fields + + @property + def encrypted_fields(self) -> Mapping[str, Any]: + """The encrypted_fields document that allows inferring which data keys are *known* to be created. + + Note that the returned document is not guaranteed to contain information about *all* of the data keys that + were created, for example in the case of an indefinite error like a timeout. Use the `cause` property to + determine whether a definite or indefinite error caused this error, and only rely on the accuracy of the + encrypted_fields if the error is definite. + """ + return self.__encrypted_fields + + class _OperationCancelled(AutoReconnect): """Internal error raised when a socket operation is cancelled.""" diff --git a/test/test_encryption.py b/test/test_encryption.py index eb9bf8e984..dcfb639160 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -74,6 +74,7 @@ BulkWriteError, ConfigurationError, DuplicateKeyError, + EncryptedCollectionError, EncryptionError, InvalidOperation, OperationFailure, @@ -2729,7 +2730,7 @@ def test_02_no_fields(self): def test_03_invalid_keyid(self): with self.assertRaisesRegex( - EncryptionError, + EncryptedCollectionError, "create.encryptedFields.fields.keyId' is the wrong type 'bool', expected type 'binData", ): self.client_encryption.create_encrypted_collection( @@ -2823,31 +2824,32 @@ def test_mixed_null_keyids(self): def test_create_datakey_fails(self): key = self.client_encryption.create_data_key(kms_provider="local") - # Make sure the error message includes the previous keys in the error message even when generating keys fails. - with self.assertRaisesRegex( - EncryptionError, - f"data key for field dob with encryptedFields=.*{re.escape(repr(key))}.*keyId.*None", - ): + encrypted_fields = { + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + ] + } + # Make sure the exception's encrypted_fields object includes the previous keys in the error message even when + # generating keys fails. + with self.assertRaises( + EncryptedCollectionError, + ) as exc: self.client_encryption.create_encrypted_collection( database=self.db, name="testing1", - encrypted_fields={ - "fields": [ - {"path": "address", "bsonType": "string", "keyId": key}, - {"path": "dob", "bsonType": "string", "keyId": None}, - ] - }, + encrypted_fields=encrypted_fields, kms_provider="does not exist", ) + self.assertEqual(exc.exception.encrypted_fields, encrypted_fields) def test_create_failure(self): key = self.client_encryption.create_data_key(kms_provider="local") - # Make sure the error message includes the previous keys in the error message even when it is the creation - # of the collection that fails. - with self.assertRaisesRegex( - EncryptionError, - f"while creating collection with encryptedFields=.*{re.escape(repr(key))}.*keyId.*Binary", - ): + # Make sure the exception's encrypted_fields object includes the previous keys in the error message even when + # it is the creation of the collection that fails. + with self.assertRaises( + EncryptedCollectionError, + ) as exc: self.client_encryption.create_encrypted_collection( database=self.db, name=1, # type:ignore[arg-type] @@ -2859,6 +2861,8 @@ def test_create_failure(self): }, kms_provider="local", ) + for field in exc.exception.encrypted_fields["fields"]: + self.assertIsInstance(field["keyId"], Binary) def test_collection_name_collision(self): encrypted_fields = { @@ -2867,16 +2871,16 @@ def test_collection_name_collision(self): ] } self.db.create_collection("testing1") - with self.assertRaisesRegex( - EncryptionError, - "while creating collection with encryptedFields=.*keyId.*Binary", - ): + with self.assertRaises( + EncryptedCollectionError, + ) as exc: self.client_encryption.create_encrypted_collection( database=self.db, name="testing1", encrypted_fields=encrypted_fields, kms_provider="local", ) + self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) self.db.drop_collection("testing1", encrypted_fields=encrypted_fields) self.client_encryption.create_encrypted_collection( database=self.db, @@ -2884,16 +2888,16 @@ def test_collection_name_collision(self): encrypted_fields=encrypted_fields, kms_provider="local", ) - with self.assertRaisesRegex( - EncryptionError, - "while creating collection with encryptedFields=.*keyId.*Binary", - ): + with self.assertRaises( + EncryptedCollectionError, + ) as exc: self.client_encryption.create_encrypted_collection( database=self.db, name="testing1", encrypted_fields=encrypted_fields, kms_provider="local", ) + self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) if __name__ == "__main__": From 5635ef9ff02867ba0bb96b85f9c28ce6c69a76bc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 9 Feb 2023 11:39:04 -0800 Subject: [PATCH 0337/1588] PYTHON-3599 Fix create_data_key/key_alt_names docs example (#1151) --- pymongo/encryption.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 6a6150d0c0..2bd6880065 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -706,9 +706,9 @@ def create_data_key( name instead of by ``key_id``. The following example shows creating and referring to a data key by alternate name:: - client_encryption.create_data_key("local", keyAltNames=["name1"]) + client_encryption.create_data_key("local", key_alt_names=["name1"]) # reference the key with the alternate name - client_encryption.encrypt("457-55-5462", keyAltName="name1", + client_encryption.encrypt("457-55-5462", key_alt_name="name1", algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random) - `key_material` (optional): Sets the custom key material to be used by the data key for encryption and decryption. From 05845b803866559957d4880916e1c88604ab6c6d Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 10 Feb 2023 15:28:04 -0800 Subject: [PATCH 0338/1588] PYTHON-3562 Type annotation of `bson.json_utils.loads` is incorrect (only accepts `str`) (#1152) --- bson/json_util.py | 2 +- test/test_json_util.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/bson/json_util.py b/bson/json_util.py index 0c2ca58283..ae464e4ed8 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -440,7 +440,7 @@ def dumps(obj: Any, *args: Any, **kwargs: Any) -> str: return json.dumps(_json_convert(obj, json_options), *args, **kwargs) -def loads(s: str, *args: Any, **kwargs: Any) -> Any: +def loads(s: Union[str, bytes, bytearray], *args: Any, **kwargs: Any) -> Any: """Helper function that wraps :func:`json.loads`. Automatically passes the object_hook for BSON type conversion. diff --git a/test/test_json_util.py b/test/test_json_util.py index 08ee63618f..b7960a16ea 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -71,6 +71,11 @@ def round_trip(self, doc, **kwargs): def test_basic(self): self.round_trip({"hello": "world"}) + def test_loads_bytes(self): + string = b'{"hello": "world"}' + self.assertEqual(json_util.loads(bytes(string)), {"hello": "world"}) + self.assertEqual(json_util.loads(bytearray(string)), {"hello": "world"}) + def test_json_options_with_options(self): opts = JSONOptions( datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY From 6ed6c374f133540ddc5c9d25fff638398ba83649 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 13 Feb 2023 16:50:29 -0600 Subject: [PATCH 0339/1588] PYTHON-3593 Remove bulk api docs page which is blank (#1154) --- doc/api/pymongo/bulk.rst | 6 ------ doc/api/pymongo/index.rst | 1 - 2 files changed, 7 deletions(-) delete mode 100644 doc/api/pymongo/bulk.rst diff --git a/doc/api/pymongo/bulk.rst b/doc/api/pymongo/bulk.rst deleted file mode 100644 index 0d597c26df..0000000000 --- a/doc/api/pymongo/bulk.rst +++ /dev/null @@ -1,6 +0,0 @@ -:mod:`bulk` -- The bulk write operations interface -================================================== - -.. automodule:: pymongo.bulk - :synopsis: The bulk write operations interface. - :members: diff --git a/doc/api/pymongo/index.rst b/doc/api/pymongo/index.rst index a4e15b9878..625c138170 100644 --- a/doc/api/pymongo/index.rst +++ b/doc/api/pymongo/index.rst @@ -29,7 +29,6 @@ Sub-modules: .. toctree:: :maxdepth: 2 - bulk change_stream client_options client_session From 1797785f993ffcc1907613e18c514eeb34ce7fff Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 14 Feb 2023 10:18:55 -0800 Subject: [PATCH 0340/1588] PYTHON-3577 Fix test_session_gc on serverless (#1153) --- test/test_load_balancer.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index 728b4e567f..d4de8debf5 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -122,8 +122,10 @@ def test_session_gc(self): session = client.start_session() session.start_transaction() client.test_session_gc.test.find_one({}, session=session) - # Cleanup the transaction left open on the server. - self.addCleanup(self.client.admin.command, "killSessions", [session.session_id]) + # Cleanup the transaction left open on the server unless we're + # testing serverless which does not support killSessions. + if not client_context.serverless: + self.addCleanup(self.client.admin.command, "killSessions", [session.session_id]) if client_context.load_balancer: self.assertEqual(pool.active_sockets, 1) # Pinned. From b63dfbe1e40be437aa462e1ec96fc6836f25df62 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 15 Feb 2023 11:36:42 -0600 Subject: [PATCH 0341/1588] PYTHON-3533 Permit tlsDisableOCSPEndpointCheck in KMS TLS options (#1155) --- .evergreen/run-tests.sh | 3 ++- pymongo/uri_parser.py | 1 - test/test_encryption.py | 28 ++++++++++++++++++++++------ 3 files changed, 24 insertions(+), 8 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index d495e2671a..3a15163b63 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -151,7 +151,8 @@ fi if [ -n "$TEST_ENCRYPTION" ]; then # Need aws dependency for On-Demand KMS Credentials. - python -m pip install '.[aws]' + # Need OSCP dependency to verify OCSP TSL args. + python -m pip install '.[aws,ocsp]' # Get access to the AWS temporary credentials: # CSFLE_AWS_TEMP_ACCESS_KEY_ID, CSFLE_AWS_TEMP_SECRET_ACCESS_KEY, CSFLE_AWS_TEMP_SESSION_TOKEN diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index f59af2e74c..398dfbff00 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -605,7 +605,6 @@ def _parse_kms_tls_options(kms_tls_options): "tlsInsecure", "tlsAllowInvalidCertificates", "tlsAllowInvalidHostnames", - "tlsDisableOCSPEndpointCheck", "tlsDisableCertificateRevocationCheck", ]: if n in opts: diff --git a/test/test_encryption.py b/test/test_encryption.py index dcfb639160..1b9a0d8233 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -155,7 +155,6 @@ def test_init_kms_tls_options(self): {"kmip": {"tls": True, "tlsInsecure": True}}, {"kmip": {"tls": True, "tlsAllowInvalidCertificates": True}}, {"kmip": {"tls": True, "tlsAllowInvalidHostnames": True}}, - {"kmip": {"tls": True, "tlsDisableOCSPEndpointCheck": True}}, ]: with self.assertRaisesRegex(ConfigurationError, "Insecure TLS options prohibited"): opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) @@ -2014,7 +2013,9 @@ def test_invalid_hostname_in_kms_certificate(self): # Some examples: # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" # hostname '127.0.0.1' doesn't match 'wronghost.com' - with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + ): self.client_encrypted.create_data_key("aws", master_key=key) @@ -2067,7 +2068,7 @@ def setUp(self): # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) self.cert_error = ( "certificate required|SSL handshake failed|" - "KMS connection closed|Connection reset by peer" + "KMS connection closed|Connection reset by peer|ECONNRESET" ) # On Python 3.10+ this error might be: # EOF occurred in violation of protocol (_ssl.c:2384) @@ -2099,7 +2100,9 @@ def test_01_aws(self): # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" # hostname '127.0.0.1' doesn't match 'wronghost.com' key["endpoint"] = "127.0.0.1:8001" - with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + ): self.client_encryption_invalid_hostname.create_data_key("aws", key) def test_02_azure(self): @@ -2114,7 +2117,9 @@ def test_02_azure(self): with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): self.client_encryption_expired.create_data_key("azure", key) # Invalid cert hostname error. - with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + ): self.client_encryption_invalid_hostname.create_data_key("azure", key) def test_03_gcp(self): @@ -2129,7 +2134,9 @@ def test_03_gcp(self): with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): self.client_encryption_expired.create_data_key("gcp", key) # Invalid cert hostname error. - with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + ): self.client_encryption_invalid_hostname.create_data_key("gcp", key) def test_04_kmip(self): @@ -2146,6 +2153,15 @@ def test_04_kmip(self): ): self.client_encryption_invalid_hostname.create_data_key("kmip") + def test_05_tlsDisableOCSPEndpointCheck_is_permitted(self): + providers = {"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}} + options = {"aws": {"tlsDisableOCSPEndpointCheck": True}} + encryption = ClientEncryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=options + ) + self.assertFalse(encryption._io_callbacks.opts._kms_ssl_contexts["aws"].check_ocsp_endpoint) + encryption.close() + # https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.rst#unique-index-on-keyaltnames class TestUniqueIndexOnKeyAltNamesProse(EncryptionIntegrationTest): From 5b96757b0eb14fec16214f942112a6f4293f9fbb Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 15 Feb 2023 13:03:12 -0800 Subject: [PATCH 0342/1588] PYTHON-3579 Test Failure - Amazon Linux 2018 fails downloading crypt_shared when it is not even needed (#1157) --- .evergreen/config.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index ab61725a20..1bdab16bed 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -283,6 +283,10 @@ functions: fi fi + if [ -n "${skip_crypt_shared}" ]; then + export SKIP_CRYPT_SHARED=1 + fi + ${PREPARE_SHELL} MONGODB_VERSION=${VERSION} \ TOPOLOGY=${TOPOLOGY} \ @@ -2133,6 +2137,7 @@ axes: run_on: amazon1-2018-test batchtime: 10080 # 7 days variables: + skip_crypt_shared: true python3_binary: "/opt/python/3.8/bin/python3" libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/linux-64-amazon-ami/master/latest/libmongocrypt.tar.gz - id: archlinux-test From c0dd24e4a7b45f2985a8fca4a0f44ea82974115d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 16 Feb 2023 15:49:00 -0800 Subject: [PATCH 0343/1588] PYTHON-3609 Stop using deprecated setDaemon api in test suite (#1158) --- test/test_gridfs.py | 4 ++-- test/test_gridfs_bucket.py | 4 ++-- test/test_threads.py | 8 ++++---- test/utils_spec_runner.py | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/test/test_gridfs.py b/test/test_gridfs.py index 35a574a1d9..cfa6e43e85 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -47,7 +47,7 @@ def __init__(self, fs, n): threading.Thread.__init__(self) self.fs = fs self.n = n - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): @@ -62,7 +62,7 @@ def __init__(self, fs, n, results): self.fs = fs self.n = n self.results = results - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index d9bf0cf058..b6a33b4ecc 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -44,7 +44,7 @@ def __init__(self, gfs, num): threading.Thread.__init__(self) self.gfs = gfs self.num = num - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.num): @@ -59,7 +59,7 @@ def __init__(self, gfs, num, results): self.gfs = gfs self.num = num self.results = results - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.num): diff --git a/test/test_threads.py b/test/test_threads.py index 2c73de52e7..899392e1a0 100644 --- a/test/test_threads.py +++ b/test/test_threads.py @@ -30,7 +30,7 @@ def __init__(self, collection, num): self.coll = collection self.num = num self.success = False - self.setDaemon(True) + self.daemon = True def run(self): for i in range(self.num): @@ -44,7 +44,7 @@ class SaveAndFind(threading.Thread): def __init__(self, collection): threading.Thread.__init__(self) self.collection = collection - self.setDaemon(True) + self.daemon = True self.passed = False def run(self): @@ -62,7 +62,7 @@ def __init__(self, collection, n, expect_exception): self.collection = collection self.n = n self.expect_exception = expect_exception - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): @@ -85,7 +85,7 @@ def __init__(self, collection, n, expect_exception): self.collection = collection self.n = n self.expect_exception = expect_exception - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 8528ecb8c7..4252420909 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -52,7 +52,7 @@ def __init__(self, name): super(SpecRunnerThread, self).__init__() self.name = name self.exc = None - self.setDaemon(True) + self.daemon = True self.cond = threading.Condition() self.ops = [] self.stopped = False From 1f8080525146e1155fd1688a6862a02fd42f7094 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 17 Feb 2023 08:31:09 -0600 Subject: [PATCH 0344/1588] PYTHON-3607 FAIL: test_01_aws (test.test_encryption.TestKmsTLSOptions) (#1159) --- test/test_encryption.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index 1b9a0d8233..b7d588e747 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2068,7 +2068,7 @@ def setUp(self): # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) self.cert_error = ( "certificate required|SSL handshake failed|" - "KMS connection closed|Connection reset by peer|ECONNRESET" + "KMS connection closed|Connection reset by peer|ECONNRESET|EPIPE" ) # On Python 3.10+ this error might be: # EOF occurred in violation of protocol (_ssl.c:2384) From 6e2e70ab803e14bdf4b07eddbb5385d01be80cbc Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 17 Feb 2023 08:31:57 -0600 Subject: [PATCH 0345/1588] PYTHON-3381 Improve readability of sphinx docs (#1156) --- .pre-commit-config.yaml | 2 +- doc/changelog.rst | 6 ------ doc/common-issues.rst | 2 -- doc/conf.py | 18 +++++++++++------- doc/docs-requirements.txt | 3 ++- doc/faq.rst | 2 -- doc/migrate-to-pymongo4.rst | 2 -- doc/python3.rst | 2 -- 8 files changed, 14 insertions(+), 23 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index cfe0db31cf..f0ee74c785 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,7 +24,7 @@ repos: args: [--line-length=100] - repo: https://github.com/PyCQA/isort - rev: 5.10.1 + rev: 5.12.0 hooks: - id: isort files: \.py$ diff --git a/doc/changelog.rst b/doc/changelog.rst index 6a6e6fef2d..2ad33e41ec 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -3519,9 +3519,3 @@ Changes in Version 0.9.7 :class:`~pymongo.collection.Collection` names - add version as :attr:`pymongo.version` - add ``--no_ext`` command line option to *setup.py* - -.. toctree:: - :hidden: - - python3 - examples/gevent diff --git a/doc/common-issues.rst b/doc/common-issues.rst index 1571b985e0..f0c9716689 100644 --- a/doc/common-issues.rst +++ b/doc/common-issues.rst @@ -3,8 +3,6 @@ Frequently Encountered Issues Also see the :ref:`TLSErrors` section. -.. contents:: - Server reports wire version X, PyMongo requires Y ------------------------------------------------- diff --git a/doc/conf.py b/doc/conf.py index f66de3868a..cbb525b419 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -32,7 +32,6 @@ except ImportError: pass - # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] @@ -108,13 +107,18 @@ # -- Options for HTML output --------------------------------------------------- -# Theme gratefully vendored from CPython source. -html_theme = "pydoctheme" -html_theme_path = ["."] -html_theme_options = {"collapsiblesidebar": True, "googletag": False} +try: + import furo # noqa + + html_theme = "furo" +except ImportError: + # Theme gratefully vendored from CPython source. + html_theme = "pydoctheme" + html_theme_path = ["."] + html_theme_options = {"collapsiblesidebar": True, "googletag": False} -# Additional static files. -html_static_path = ["static"] + # Additional static files. + html_static_path = ["static"] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". diff --git a/doc/docs-requirements.txt b/doc/docs-requirements.txt index 455a47d217..3c66962855 100644 --- a/doc/docs-requirements.txt +++ b/doc/docs-requirements.txt @@ -1,4 +1,5 @@ -Sphinx~=4.2 +Sphinx~=6.1 sphinx_rtd_theme~=0.5 readthedocs-sphinx-search~=0.1 sphinxcontrib-shellcheck~=1.1 +furo==2022.12.7 diff --git a/doc/faq.rst b/doc/faq.rst index acf557a81b..876dc68ed8 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -1,8 +1,6 @@ Frequently Asked Questions ========================== -.. contents:: - Is PyMongo thread-safe? ----------------------- diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 5843a2261b..561261c7ad 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -3,8 +3,6 @@ PyMongo 4 Migration Guide ========================= -.. contents:: - .. testsetup:: from pymongo import MongoClient, ReadPreference diff --git a/doc/python3.rst b/doc/python3.rst index 812bc33b35..40d5fec661 100644 --- a/doc/python3.rst +++ b/doc/python3.rst @@ -1,8 +1,6 @@ Python 3 FAQ ============ -.. contents:: - What Python 3 versions are supported? ------------------------------------- From 715dd348102a8a5f81f620a3b25c7dedebeece8f Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 23 Feb 2023 10:20:17 -0800 Subject: [PATCH 0346/1588] PYTHON-2754 Add Spec Tests For DB Names With Commas (#1162) --- .../dbname-with-commas-escaped.json | 19 +++++++++++++++++++ .../replica-set/dbname-with-commas.json | 19 +++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 test/srv_seedlist/replica-set/dbname-with-commas-escaped.json create mode 100644 test/srv_seedlist/replica-set/dbname-with-commas.json diff --git a/test/srv_seedlist/replica-set/dbname-with-commas-escaped.json b/test/srv_seedlist/replica-set/dbname-with-commas-escaped.json new file mode 100644 index 0000000000..b5fcfd2c07 --- /dev/null +++ b/test/srv_seedlist/replica-set/dbname-with-commas-escaped.json @@ -0,0 +1,19 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/some%2Cdb?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "parsed_options": { + "defaultDatabase": "some,db" + } +} diff --git a/test/srv_seedlist/replica-set/dbname-with-commas.json b/test/srv_seedlist/replica-set/dbname-with-commas.json new file mode 100644 index 0000000000..c1e85f4b99 --- /dev/null +++ b/test/srv_seedlist/replica-set/dbname-with-commas.json @@ -0,0 +1,19 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/some,db?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "parsed_options": { + "defaultDatabase": "some,db" + } +} From 32faa261b68a2fd33c16b1ab88f97bb73b58e85d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 23 Feb 2023 11:09:11 -0800 Subject: [PATCH 0347/1588] PYTHON-3616 Use minimum RTT for CSOT maxTimeMS calculation (#1163) Require at least 2 RTT samples, otherwise use 0 as RTT. Only keep last 10 samples. --- pymongo/_csot.py | 33 +++++- pymongo/monitor.py | 19 ++-- pymongo/server_description.py | 9 ++ pymongo/topology.py | 2 +- test/csot/command-execution.json | 183 +++++++++++++++++++++++++++++-- 5 files changed, 225 insertions(+), 21 deletions(-) diff --git a/pymongo/_csot.py b/pymongo/_csot.py index 5170c0d8ca..8a4617ecaf 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -16,8 +16,9 @@ import functools import time +from collections import deque from contextvars import ContextVar, Token -from typing import Any, Callable, MutableMapping, Optional, Tuple, TypeVar, cast +from typing import Any, Callable, Deque, MutableMapping, Optional, Tuple, TypeVar, cast from pymongo.write_concern import WriteConcern @@ -116,3 +117,33 @@ def apply_write_concern(cmd: MutableMapping, write_concern: Optional[WriteConcer wc.pop("wtimeout", None) if wc: cmd["writeConcern"] = wc + + +_MAX_RTT_SAMPLES: int = 10 +_MIN_RTT_SAMPLES: int = 2 + + +class MovingMinimum: + """Tracks a minimum RTT within the last 10 RTT samples.""" + + samples: Deque[float] + + def __init__(self) -> None: + self.samples = deque(maxlen=_MAX_RTT_SAMPLES) + + def add_sample(self, sample: float) -> None: + if sample < 0: + # Likely system time change while waiting for hello response + # and not using time.monotonic. Ignore it, the next one will + # probably be valid. + return + self.samples.append(sample) + + def get(self) -> float: + """Get the min, or 0.0 if there aren't enough samples yet.""" + if len(self.samples) >= _MIN_RTT_SAMPLES: + return min(self.samples) + return 0.0 + + def reset(self) -> None: + self.samples.clear() diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 44390e9180..9031d4b785 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -20,6 +20,7 @@ from typing import Any, Mapping, cast from pymongo import common, periodic_executor +from pymongo._csot import MovingMinimum from pymongo.errors import NotPrimaryError, OperationFailure, _OperationCancelled from pymongo.hello import Hello from pymongo.lock import _create_lock @@ -40,7 +41,7 @@ class MonitorBase(object): def __init__(self, topology, name, interval, min_interval): """Base class to do periodic work on a background thread. - The the background thread is signaled to stop when the Topology or + The background thread is signaled to stop when the Topology or this instance is freed. """ # We strongly reference the executor and it weakly references us via @@ -250,7 +251,8 @@ def _check_once(self): if not response.awaitable: self._rtt_monitor.add_sample(round_trip_time) - sd = ServerDescription(address, response, self._rtt_monitor.average()) + avg_rtt, min_rtt = self._rtt_monitor.get() + sd = ServerDescription(address, response, avg_rtt, min_round_trip_time=min_rtt) if self._publish: self._listeners.publish_server_heartbeat_succeeded( address, round_trip_time, response, response.awaitable @@ -350,6 +352,7 @@ def __init__(self, topology, topology_settings, pool): self._pool = pool self._moving_average = MovingAverage() + self._moving_min = MovingMinimum() self._lock = _create_lock() def close(self): @@ -362,20 +365,22 @@ def add_sample(self, sample): """Add a RTT sample.""" with self._lock: self._moving_average.add_sample(sample) + self._moving_min.add_sample(sample) - def average(self): - """Get the calculated average, or None if no samples yet.""" + def get(self): + """Get the calculated average, or None if no samples yet and the min.""" with self._lock: - return self._moving_average.get() + return self._moving_average.get(), self._moving_min.get() def reset(self): """Reset the average RTT.""" with self._lock: - return self._moving_average.reset() + self._moving_average.reset() + self._moving_min.reset() def _run(self): try: - # NOTE: This thread is only run when when using the streaming + # NOTE: This thread is only run when using the streaming # heartbeat protocol (MongoDB 4.4+). # XXX: Skip check if the server is unknown? rtt = self._ping() diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 47e27c531b..53f90cea25 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -32,6 +32,7 @@ class ServerDescription(object): - `hello`: Optional Hello instance - `round_trip_time`: Optional float - `error`: Optional, the last error attempting to connect to the server + - `round_trip_time`: Optional float, the min latency from the most recent samples """ __slots__ = ( @@ -47,6 +48,7 @@ class ServerDescription(object): "_min_wire_version", "_max_wire_version", "_round_trip_time", + "_min_round_trip_time", "_me", "_is_writable", "_is_readable", @@ -66,6 +68,7 @@ def __init__( hello: Optional[Hello] = None, round_trip_time: Optional[float] = None, error: Optional[Exception] = None, + min_round_trip_time: float = 0.0, ) -> None: self._address = address if not hello: @@ -88,6 +91,7 @@ def __init__( self._is_readable = hello.is_readable self._ls_timeout_minutes = hello.logical_session_timeout_minutes self._round_trip_time = round_trip_time + self._min_round_trip_time = min_round_trip_time self._me = hello.me self._last_update_time = time.monotonic() self._error = error @@ -203,6 +207,11 @@ def round_trip_time(self) -> Optional[float]: return self._round_trip_time + @property + def min_round_trip_time(self) -> float: + """The min latency from the most recent samples.""" + return self._min_round_trip_time + @property def error(self) -> Optional[Exception]: """The last error attempting to connect to the server, or None.""" diff --git a/pymongo/topology.py b/pymongo/topology.py index 87a566fa6e..904f6b1836 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -271,7 +271,7 @@ def select_server(self, selector, server_selection_timeout=None, address=None): """Like select_servers, but choose a random server if several match.""" server = self._select_server(selector, server_selection_timeout, address) if _csot.get_timeout(): - _csot.set_rtt(server.description.round_trip_time) + _csot.set_rtt(server.description.min_round_trip_time) return server def select_server_by_address(self, address, server_selection_timeout=None): diff --git a/test/csot/command-execution.json b/test/csot/command-execution.json index 92358f2184..10f87d43ac 100644 --- a/test/csot/command-execution.json +++ b/test/csot/command-execution.json @@ -3,7 +3,14 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.9" + "minServerVersion": "4.9", + "topologies": [ + "single", + "replicaset", + "sharded-replicaset", + "sharded" + ], + "serverless": "forbid" } ], "createEntities": [ @@ -45,7 +52,7 @@ ], "appName": "reduceMaxTimeMSTest", "blockConnection": true, - "blockTimeMS": 20 + "blockTimeMS": 50 } } } @@ -62,7 +69,8 @@ "uriOptions": { "appName": "reduceMaxTimeMSTest", "w": 1, - "timeoutMS": 500 + "timeoutMS": 500, + "heartbeatFrequencyMS": 500 }, "observeEvents": [ "commandStartedEvent" @@ -86,6 +94,23 @@ ] } }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1000 + } + }, { "name": "insertOne", "object": "timeoutCollection", @@ -100,6 +125,15 @@ { "client": "client", "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, { "commandStartedEvent": { "commandName": "insert", @@ -107,7 +141,7 @@ "command": { "insert": "timeoutColl", "maxTimeMS": { - "$$lte": 500 + "$$lte": 450 } } } @@ -134,7 +168,7 @@ ], "appName": "rttTooHighTest", "blockConnection": true, - "blockTimeMS": 20 + "blockTimeMS": 50 } } } @@ -151,7 +185,8 @@ "uriOptions": { "appName": "rttTooHighTest", "w": 1, - "timeoutMS": 10 + "timeoutMS": 10, + "heartbeatFrequencyMS": 500 }, "observeEvents": [ "commandStartedEvent" @@ -175,6 +210,23 @@ ] } }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1000 + } + }, { "name": "insertOne", "object": "timeoutCollection", @@ -192,7 +244,7 @@ "object": "timeoutCollection", "arguments": { "document": { - "_id": 2 + "_id": 3 } }, "expectError": { @@ -204,12 +256,100 @@ "object": "timeoutCollection", "arguments": { "document": { - "_id": 2 + "_id": 4 } }, "expectError": { "isTimeoutError": true } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + } + ] + } + ] + }, + { + "description": "short-circuit is not enabled with only 1 RTT measurement", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 90, + "heartbeatFrequencyMS": 100000 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } }, { "name": "insertOne", @@ -218,16 +358,35 @@ "document": { "_id": 2 } - }, - "expectError": { - "isTimeoutError": true } } ], "expectEvents": [ { "client": "client", - "events": [] + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 450 + } + } + } + } + ] } ] } From 715535159968c32b558345789fd70db88ac116c4 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 23 Feb 2023 15:00:57 -0600 Subject: [PATCH 0348/1588] PYTHON-3618 Perf tests are failing on the centos6-perf boxes due to mongosh download (#1164) --- .evergreen/perf.yml | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/.evergreen/perf.yml b/.evergreen/perf.yml index d975fca79f..43b21a65fb 100644 --- a/.evergreen/perf.yml +++ b/.evergreen/perf.yml @@ -199,17 +199,6 @@ post: - func: "cleanup" tasks: - - name: "perf-3.6-standalone" - tags: ["perf"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.6" - TOPOLOGY: "server" - - func: "run perf tests" - - func: "attach benchmark test results" - - func: "send dashboard data" - - name: "perf-4.0-standalone" tags: ["perf"] commands: @@ -221,23 +210,23 @@ tasks: - func: "attach benchmark test results" - func: "send dashboard data" - - name: "perf-4.2-standalone" + - name: "perf-4.4-standalone" tags: ["perf"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "4.2" + VERSION: "4.4" TOPOLOGY: "server" - func: "run perf tests" - func: "attach benchmark test results" - func: "send dashboard data" - - name: "perf-4.4-standalone" + - name: "perf-6.0-standalone" tags: ["perf"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "4.4" + VERSION: "6.0" TOPOLOGY: "server" - func: "run perf tests" - func: "attach benchmark test results" @@ -248,9 +237,8 @@ buildvariants: - name: "perf-tests" display_name: "Performance Benchmark Tests" batchtime: 10080 # 7 days - run_on: centos6-perf + run_on: ubuntu2004-large tasks: - - name: "perf-3.6-standalone" - name: "perf-4.0-standalone" - - name: "perf-4.2-standalone" - name: "perf-4.4-standalone" + - name: "perf-6.0-standalone" From 67023b3835fb9371fb2c6d7fd8980a25626d1973 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 1 Mar 2023 15:46:19 -0600 Subject: [PATCH 0349/1588] PYTHON-3622 Improve Server Log Download in Evergreen (#1166) --- .evergreen/config.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 1bdab16bed..e92cf96a1e 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -165,7 +165,10 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - find $MONGO_ORCHESTRATION_HOME -name \*.log | xargs tar czf mongodb-logs.tar.gz + mkdir out_dir + find $MONGO_ORCHESTRATION_HOME -name \*.log -exec sh -c 'x="{}"; mv $x $PWD/out_dir/$(basename $(dirname $x))_$(basename $x)' \; + tar zcvf mongodb-logs.tar.gz -C out_dir/ . + rm -rf out_dir - command: archive.targz_pack params: target: "mongo-coredumps.tgz" From c27ce70d1c7a7cb0230fc5c33fdb1bbc8a66fc5d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 1 Mar 2023 19:02:24 -0600 Subject: [PATCH 0350/1588] PYTHON-3620 Ensure unittest-xml-reporting is Installed in Evergreen Tasks (#1165) --- .evergreen/run-atlas-tests.sh | 5 ----- .evergreen/utils.sh | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/.evergreen/run-atlas-tests.sh b/.evergreen/run-atlas-tests.sh index 3f8a1b45f0..2e6272040f 100644 --- a/.evergreen/run-atlas-tests.sh +++ b/.evergreen/run-atlas-tests.sh @@ -17,9 +17,4 @@ fi createvirtualenv $PYTHON_BINARY atlastest trap "deactivate; rm -rf atlastest" EXIT HUP -echo "Running tests without dnspython" -python test/atlas/test_connection.py - -python -m pip install dnspython -echo "Running tests with dnspython" MUST_TEST_SRV="1" python test/atlas/test_connection.py diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 30013ed06b..a474ce545e 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -30,7 +30,7 @@ createvirtualenv () { fi python -m pip install --upgrade pip - python -m pip install --upgrade setuptools wheel + python -m pip install --upgrade setuptools wheel unittest-xml-reporting } # Usage: From 10a55001c89049afe7436e6521d8a6b6d0ef9267 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 2 Mar 2023 14:14:47 -0800 Subject: [PATCH 0351/1588] PYTHON-3626 Document srvMaxHosts in MongoClient options (#1167) --- pymongo/mongo_client.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ab0c749889..05f00b48ee 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -401,6 +401,10 @@ def __init__( "mongodb+srv://" URIs. Defaults to "mongodb". Use it like so:: MongoClient("mongodb+srv://example.com/?srvServiceName=customname") + - `srvMaxHosts`: (int) limits the number of mongos-like hosts a client will + connect to. More specifically, when a "mongodb+srv://" connection string + resolves to more than srvMaxHosts number of hosts, the client will randomly + choose an srvMaxHosts sized subset of hosts. | **Write Concern options:** @@ -575,8 +579,8 @@ def __init__( keyword arguments. - The default for `uuidRepresentation` was changed from ``pythonLegacy`` to ``unspecified``. - - Added the ``srvServiceName`` and ``maxConnecting`` URI and - keyword argument. + - Added the ``srvServiceName``, ``maxConnecting``, and ``srvMaxHosts`` URI and + keyword arguments. .. versionchanged:: 3.12 Added the ``server_api`` keyword argument. From 5e203bea8dd8cfd6d6dd9f238656976194e6c769 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Mar 2023 10:55:20 -0800 Subject: [PATCH 0352/1588] PYTHON-3629 Actually install pymongo for Atlas connect tests (#1168) --- .evergreen/run-atlas-tests.sh | 3 ++- test/atlas/test_connection.py | 18 ------------------ 2 files changed, 2 insertions(+), 19 deletions(-) diff --git a/.evergreen/run-atlas-tests.sh b/.evergreen/run-atlas-tests.sh index 2e6272040f..4a39880d0c 100644 --- a/.evergreen/run-atlas-tests.sh +++ b/.evergreen/run-atlas-tests.sh @@ -17,4 +17,5 @@ fi createvirtualenv $PYTHON_BINARY atlastest trap "deactivate; rm -rf atlastest" EXIT HUP -MUST_TEST_SRV="1" python test/atlas/test_connection.py +python -m pip install . +python test/atlas/test_connection.py diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index a1eb97edee..39d817140e 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -24,14 +24,6 @@ import pymongo from pymongo.ssl_support import HAS_SNI -try: - import dns # noqa - - HAS_DNS = True -except ImportError: - HAS_DNS = False - - URIS = { "ATLAS_REPL": os.environ.get("ATLAS_REPL"), "ATLAS_SHRD": os.environ.get("ATLAS_SHRD"), @@ -47,10 +39,6 @@ "ATLAS_SRV_SERVERLESS": os.environ.get("ATLAS_SRV_SERVERLESS"), } -# Set this variable to true to run the SRV tests even when dnspython is not -# installed. -MUST_TEST_SRV = os.environ.get("MUST_TEST_SRV") - def connect(uri): if not uri: @@ -87,27 +75,21 @@ def connect_srv(self, uri): self.assertIn("mongodb+srv://", uri) @unittest.skipUnless(HAS_SNI, "Free tier requires SNI support") - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_free_tier(self): self.connect_srv(URIS["ATLAS_SRV_FREE"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_replica_set(self): self.connect_srv(URIS["ATLAS_SRV_REPL"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_sharded_cluster(self): self.connect_srv(URIS["ATLAS_SRV_SHRD"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_tls_11(self): self.connect_srv(URIS["ATLAS_SRV_TLS11"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_tls_12(self): self.connect_srv(URIS["ATLAS_SRV_TLS12"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_serverless(self): self.connect_srv(URIS["ATLAS_SRV_SERVERLESS"]) From 25ba21770c7f0dbffdbe87f6b6087dd5b521e258 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 13 Mar 2023 14:57:46 -0700 Subject: [PATCH 0353/1588] PYTHON-3624 Update fle2-* tests to match name requirements in SERVER-74069 (#1169) --- .../spec/legacy/fle2-CreateCollection.json | 252 +++++++++--------- ...EncryptedFields-vs-EncryptedFieldsMap.json | 6 +- 2 files changed, 129 insertions(+), 129 deletions(-) diff --git a/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json b/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json index 9f8db41f87..7f4f38161e 100644 --- a/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json +++ b/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json @@ -21,9 +21,9 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -60,7 +60,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.esc" + "collection": "enxcol_.encryptedCollection.esc" } }, { @@ -68,7 +68,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecc" + "collection": "enxcol_.encryptedCollection.ecc" } }, { @@ -76,7 +76,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecoc" + "collection": "enxcol_.encryptedCollection.ecoc" } }, { @@ -101,7 +101,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -110,7 +110,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -119,7 +119,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -137,7 +137,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.esc", + "create": "enxcol_.encryptedCollection.esc", "clusteredIndex": { "key": { "_id": 1 @@ -152,7 +152,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecc", + "create": "enxcol_.encryptedCollection.ecc", "clusteredIndex": { "key": { "_id": 1 @@ -167,7 +167,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecoc", + "create": "enxcol_.encryptedCollection.ecoc", "clusteredIndex": { "key": { "_id": 1 @@ -184,9 +184,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -745,9 +745,9 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -762,9 +762,9 @@ ] }, "default.encryptedCollection.esc": { - "escCollection": "encryptedCollection", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -801,7 +801,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.esc" + "collection": "enxcol_.encryptedCollection.esc" } }, { @@ -809,7 +809,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecc" + "collection": "enxcol_.encryptedCollection.ecc" } }, { @@ -817,7 +817,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecoc" + "collection": "enxcol_.encryptedCollection.ecoc" } }, { @@ -842,7 +842,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -851,7 +851,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -860,7 +860,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -878,7 +878,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.esc", + "create": "enxcol_.encryptedCollection.esc", "clusteredIndex": { "key": { "_id": 1 @@ -893,7 +893,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecc", + "create": "enxcol_.encryptedCollection.ecc", "clusteredIndex": { "key": { "_id": 1 @@ -908,7 +908,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecoc", + "create": "enxcol_.encryptedCollection.ecoc", "clusteredIndex": { "key": { "_id": 1 @@ -925,9 +925,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -974,9 +974,9 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1059,9 +1059,9 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1098,7 +1098,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.esc" + "collection": "enxcol_.encryptedCollection.esc" } }, { @@ -1106,7 +1106,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecc" + "collection": "enxcol_.encryptedCollection.ecc" } }, { @@ -1114,7 +1114,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecoc" + "collection": "enxcol_.encryptedCollection.ecoc" } }, { @@ -1139,7 +1139,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -1148,7 +1148,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -1157,7 +1157,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -1175,7 +1175,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.esc", + "create": "enxcol_.encryptedCollection.esc", "clusteredIndex": { "key": { "_id": 1 @@ -1190,7 +1190,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecc", + "create": "enxcol_.encryptedCollection.ecc", "clusteredIndex": { "key": { "_id": 1 @@ -1205,7 +1205,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecoc", + "create": "enxcol_.encryptedCollection.ecoc", "clusteredIndex": { "key": { "_id": 1 @@ -1222,9 +1222,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1278,9 +1278,9 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1302,9 +1302,9 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1325,7 +1325,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.esc" + "collection": "enxcol_.encryptedCollection.esc" } }, { @@ -1333,7 +1333,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecc" + "collection": "enxcol_.encryptedCollection.ecc" } }, { @@ -1341,7 +1341,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecoc" + "collection": "enxcol_.encryptedCollection.ecoc" } }, { @@ -1366,7 +1366,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -1375,7 +1375,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -1384,7 +1384,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -1402,7 +1402,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.esc", + "create": "enxcol_.encryptedCollection.esc", "clusteredIndex": { "key": { "_id": 1 @@ -1417,7 +1417,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecc", + "create": "enxcol_.encryptedCollection.ecc", "clusteredIndex": { "key": { "_id": 1 @@ -1432,7 +1432,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecoc", + "create": "enxcol_.encryptedCollection.ecoc", "clusteredIndex": { "key": { "_id": 1 @@ -1449,9 +1449,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1510,9 +1510,9 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1542,7 +1542,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -1551,7 +1551,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -1560,7 +1560,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -1594,9 +1594,9 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1618,9 +1618,9 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1641,7 +1641,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.esc" + "collection": "enxcol_.encryptedCollection.esc" } }, { @@ -1649,7 +1649,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecc" + "collection": "enxcol_.encryptedCollection.ecc" } }, { @@ -1657,7 +1657,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecoc" + "collection": "enxcol_.encryptedCollection.ecoc" } }, { @@ -1683,9 +1683,9 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1706,7 +1706,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.esc" + "collection": "enxcol_.encryptedCollection.esc" } }, { @@ -1714,7 +1714,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecc" + "collection": "enxcol_.encryptedCollection.ecc" } }, { @@ -1722,7 +1722,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecoc" + "collection": "enxcol_.encryptedCollection.ecoc" } }, { @@ -1738,7 +1738,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -1747,7 +1747,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -1756,7 +1756,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -1774,7 +1774,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.esc", + "create": "enxcol_.encryptedCollection.esc", "clusteredIndex": { "key": { "_id": 1 @@ -1789,7 +1789,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecc", + "create": "enxcol_.encryptedCollection.ecc", "clusteredIndex": { "key": { "_id": 1 @@ -1804,7 +1804,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecoc", + "create": "enxcol_.encryptedCollection.ecoc", "clusteredIndex": { "key": { "_id": 1 @@ -1821,9 +1821,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1874,7 +1874,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -1883,7 +1883,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -1892,7 +1892,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -1926,9 +1926,9 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1950,9 +1950,9 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1973,7 +1973,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.esc" + "collection": "enxcol_.encryptedCollection.esc" } }, { @@ -1981,7 +1981,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecc" + "collection": "enxcol_.encryptedCollection.ecc" } }, { @@ -1989,7 +1989,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecoc" + "collection": "enxcol_.encryptedCollection.ecoc" } }, { @@ -2021,7 +2021,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.esc" + "collection": "enxcol_.encryptedCollection.esc" } }, { @@ -2029,7 +2029,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecc" + "collection": "enxcol_.encryptedCollection.ecc" } }, { @@ -2037,7 +2037,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecoc" + "collection": "enxcol_.encryptedCollection.ecoc" } }, { @@ -2053,7 +2053,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -2062,7 +2062,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -2071,7 +2071,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -2089,7 +2089,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.esc", + "create": "enxcol_.encryptedCollection.esc", "clusteredIndex": { "key": { "_id": 1 @@ -2104,7 +2104,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecc", + "create": "enxcol_.encryptedCollection.ecc", "clusteredIndex": { "key": { "_id": 1 @@ -2119,7 +2119,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecoc", + "create": "enxcol_.encryptedCollection.ecoc", "clusteredIndex": { "key": { "_id": 1 @@ -2136,9 +2136,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -2201,7 +2201,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -2210,7 +2210,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -2219,7 +2219,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json index 911b428633..42cd4bbc9c 100644 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json +++ b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -94,9 +94,9 @@ }, "encryptedFieldsMap": { "default.default": { - "escCollection": "esc", - "eccCollection": "ecc", - "ecocCollection": "ecoc", + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", "fields": [] } } From e9a6482c4d6042445a95973926be8dc9ce451e47 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 14 Mar 2023 15:37:45 -0500 Subject: [PATCH 0354/1588] PYTHON-3610 Add blacken-docs to pre-commit hook (#1170) --- .pre-commit-config.yaml | 7 ++++ README.rst | 2 +- bson/json_util.py | 44 +++++++++++++------- bson/raw_bson.py | 18 ++++----- doc/examples/aggregation.rst | 17 +++++--- doc/examples/bulk.rst | 40 ++++++++++-------- doc/examples/custom_type.rst | 67 +++++++++++++++++++------------ doc/examples/datetimes.rst | 29 ++++++------- doc/examples/geo.rst | 21 +++++----- doc/examples/gevent.rst | 2 + doc/examples/gridfs.rst | 3 +- doc/examples/server_selection.rst | 4 +- doc/examples/type_hints.rst | 40 +++++++++--------- doc/faq.rst | 4 +- doc/migrate-to-pymongo4.rst | 1 + doc/tutorial.rst | 62 +++++++++++++++------------- pymongo/client_session.py | 14 ++++--- pymongo/collection.py | 5 +-- pymongo/database.py | 5 +-- pymongo/mongo_client.py | 7 ++-- 20 files changed, 224 insertions(+), 168 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f0ee74c785..d8455981f0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,6 +30,13 @@ repos: files: \.py$ args: [--profile=black] +- repo: https://github.com/adamchainz/blacken-docs + rev: "1.13.0" + hooks: + - id: blacken-docs + additional_dependencies: + - black==22.3.0 + - repo: https://github.com/PyCQA/flake8 rev: 3.9.2 hooks: diff --git a/README.rst b/README.rst index 530829f957..bb409a94ff 100644 --- a/README.rst +++ b/README.rst @@ -148,7 +148,7 @@ Examples ======== Here's a basic example (for more see the *examples* section of the docs): -.. code-block:: python +.. code-block:: pycon >>> import pymongo >>> client = pymongo.MongoClient("localhost", 27017) diff --git a/bson/json_util.py b/bson/json_util.py index ae464e4ed8..8842d5c74d 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -29,7 +29,9 @@ .. doctest:: >>> from bson.json_util import loads - >>> loads('[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$scope": {}, "$code": "function x() { return 1; }"}}, {"bin": {"$type": "80", "$binary": "AQIDBA=="}}]') + >>> loads( + ... '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$scope": {}, "$code": "function x() { return 1; }"}}, {"bin": {"$type": "80", "$binary": "AQIDBA=="}}]' + ... ) [{'foo': [1, 2]}, {'bar': {'hello': 'world'}}, {'code': Code('function x() { return 1; }', {})}, {'bin': Binary(b'...', 128)}] Example usage with :const:`RELAXED_JSON_OPTIONS` (the default): @@ -38,10 +40,14 @@ >>> from bson import Binary, Code >>> from bson.json_util import dumps - >>> dumps([{'foo': [1, 2]}, - ... {'bar': {'hello': 'world'}}, - ... {'code': Code("function x() { return 1; }")}, - ... {'bin': Binary(b"\x01\x02\x03\x04")}]) + >>> dumps( + ... [ + ... {"foo": [1, 2]}, + ... {"bar": {"hello": "world"}}, + ... {"code": Code("function x() { return 1; }")}, + ... {"bin": Binary(b"\x01\x02\x03\x04")}, + ... ] + ... ) '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]' Example usage (with :const:`CANONICAL_JSON_OPTIONS`): @@ -50,11 +56,15 @@ >>> from bson import Binary, Code >>> from bson.json_util import dumps, CANONICAL_JSON_OPTIONS - >>> dumps([{'foo': [1, 2]}, - ... {'bar': {'hello': 'world'}}, - ... {'code': Code("function x() { return 1; }")}, - ... {'bin': Binary(b"\x01\x02\x03\x04")}], - ... json_options=CANONICAL_JSON_OPTIONS) + >>> dumps( + ... [ + ... {"foo": [1, 2]}, + ... {"bar": {"hello": "world"}}, + ... {"code": Code("function x() { return 1; }")}, + ... {"bin": Binary(b"\x01\x02\x03\x04")}, + ... ], + ... json_options=CANONICAL_JSON_OPTIONS, + ... ) '[{"foo": [{"$numberInt": "1"}, {"$numberInt": "2"}]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]' Example usage (with :const:`LEGACY_JSON_OPTIONS`): @@ -63,11 +73,15 @@ >>> from bson import Binary, Code >>> from bson.json_util import dumps, LEGACY_JSON_OPTIONS - >>> dumps([{'foo': [1, 2]}, - ... {'bar': {'hello': 'world'}}, - ... {'code': Code("function x() { return 1; }", {})}, - ... {'bin': Binary(b"\x01\x02\x03\x04")}], - ... json_options=LEGACY_JSON_OPTIONS) + >>> dumps( + ... [ + ... {"foo": [1, 2]}, + ... {"bar": {"hello": "world"}}, + ... {"code": Code("function x() { return 1; }", {})}, + ... {"bin": Binary(b"\x01\x02\x03\x04")}, + ... ], + ... json_options=LEGACY_JSON_OPTIONS, + ... ) '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }", "$scope": {}}}, {"bin": {"$binary": "AQIDBA==", "$type": "00"}}]' Alternatively, you can manually pass the `default` to :func:`json.dumps`. diff --git a/bson/raw_bson.py b/bson/raw_bson.py index 6a80ea70ca..2c2b3c97ca 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -25,18 +25,18 @@ >>> from pymongo import MongoClient >>> from bson.raw_bson import RawBSONDocument >>> client = MongoClient(document_class=RawBSONDocument) - >>> client.drop_database('db') - >>> client.drop_database('replica_db') + >>> client.drop_database("db") + >>> client.drop_database("replica_db") >>> db = client.db - >>> result = db.test.insert_many([{'_id': 1, 'a': 1}, - ... {'_id': 2, 'b': 1}, - ... {'_id': 3, 'c': 1}, - ... {'_id': 4, 'd': 1}]) + >>> result = db.test.insert_many( + ... [{"_id": 1, "a": 1}, {"_id": 2, "b": 1}, {"_id": 3, "c": 1}, {"_id": 4, "d": 1}] + ... ) >>> replica_db = client.replica_db >>> for doc in db.test.find(): - ... print(f"raw document: {doc.raw}") - ... print(f"decoded document: {bson.decode(doc.raw)}") - ... result = replica_db.test.insert_one(doc) + ... print(f"raw document: {doc.raw}") + ... print(f"decoded document: {bson.decode(doc.raw)}") + ... result = replica_db.test.insert_one(doc) + ... raw document: b'...' decoded document: {'_id': 1, 'a': 1} raw document: b'...' diff --git a/doc/examples/aggregation.rst b/doc/examples/aggregation.rst index cdd82ff6fb..bd20db2304 100644 --- a/doc/examples/aggregation.rst +++ b/doc/examples/aggregation.rst @@ -8,8 +8,9 @@ group method. .. testsetup:: from pymongo import MongoClient + client = MongoClient() - client.drop_database('aggregation_example') + client.drop_database("aggregation_example") Setup ----- @@ -20,10 +21,14 @@ aggregations on: >>> from pymongo import MongoClient >>> db = MongoClient().aggregation_example - >>> result = db.things.insert_many([{"x": 1, "tags": ["dog", "cat"]}, - ... {"x": 2, "tags": ["cat"]}, - ... {"x": 2, "tags": ["mouse", "cat", "dog"]}, - ... {"x": 3, "tags": []}]) + >>> result = db.things.insert_many( + ... [ + ... {"x": 1, "tags": ["dog", "cat"]}, + ... {"x": 2, "tags": ["cat"]}, + ... {"x": 2, "tags": ["mouse", "cat", "dog"]}, + ... {"x": 3, "tags": []}, + ... ] + ... ) >>> result.inserted_ids [ObjectId('...'), ObjectId('...'), ObjectId('...'), ObjectId('...')] @@ -54,7 +59,7 @@ eg "$sort": >>> pipeline = [ ... {"$unwind": "$tags"}, ... {"$group": {"_id": "$tags", "count": {"$sum": 1}}}, - ... {"$sort": SON([("count", -1), ("_id", -1)])} + ... {"$sort": SON([("count", -1), ("_id", -1)])}, ... ] >>> import pprint >>> pprint.pprint(list(db.things.aggregate(pipeline))) diff --git a/doc/examples/bulk.rst b/doc/examples/bulk.rst index 23367dd2c5..c2c5acc687 100644 --- a/doc/examples/bulk.rst +++ b/doc/examples/bulk.rst @@ -4,8 +4,9 @@ Bulk Write Operations .. testsetup:: from pymongo import MongoClient + client = MongoClient() - client.drop_database('bulk_example') + client.drop_database("bulk_example") This tutorial explains how to take advantage of PyMongo's bulk write operation features. Executing write operations in batches @@ -27,7 +28,7 @@ bulk insert operations. >>> import pymongo >>> db = pymongo.MongoClient().bulk_example - >>> db.test.insert_many([{'i': i} for i in range(10000)]).inserted_ids + >>> db.test.insert_many([{"i": i} for i in range(10000)]).inserted_ids [...] >>> db.test.count_documents({}) 10000 @@ -56,14 +57,17 @@ of operations performed. >>> from pprint import pprint >>> from pymongo import InsertOne, DeleteMany, ReplaceOne, UpdateOne - >>> result = db.test.bulk_write([ - ... DeleteMany({}), # Remove all documents from the previous example. - ... InsertOne({'_id': 1}), - ... InsertOne({'_id': 2}), - ... InsertOne({'_id': 3}), - ... UpdateOne({'_id': 1}, {'$set': {'foo': 'bar'}}), - ... UpdateOne({'_id': 4}, {'$inc': {'j': 1}}, upsert=True), - ... ReplaceOne({'j': 1}, {'j': 2})]) + >>> result = db.test.bulk_write( + ... [ + ... DeleteMany({}), # Remove all documents from the previous example. + ... InsertOne({"_id": 1}), + ... InsertOne({"_id": 2}), + ... InsertOne({"_id": 3}), + ... UpdateOne({"_id": 1}, {"$set": {"foo": "bar"}}), + ... UpdateOne({"_id": 4}, {"$inc": {"j": 1}}, upsert=True), + ... ReplaceOne({"j": 1}, {"j": 2}), + ... ] + ... ) >>> pprint(result.bulk_api_result) {'nInserted': 3, 'nMatched': 2, @@ -87,9 +91,10 @@ the failure. >>> from pymongo import InsertOne, DeleteOne, ReplaceOne >>> from pymongo.errors import BulkWriteError >>> requests = [ - ... ReplaceOne({'j': 2}, {'i': 5}), - ... InsertOne({'_id': 4}), # Violates the unique key constraint on _id. - ... DeleteOne({'i': 5})] + ... ReplaceOne({"j": 2}, {"i": 5}), + ... InsertOne({"_id": 4}), # Violates the unique key constraint on _id. + ... DeleteOne({"i": 5}), + ... ] >>> try: ... db.test.bulk_write(requests) ... except BulkWriteError as bwe: @@ -124,10 +129,11 @@ and fourth operations succeed. :options: +NORMALIZE_WHITESPACE >>> requests = [ - ... InsertOne({'_id': 1}), - ... DeleteOne({'_id': 2}), - ... InsertOne({'_id': 3}), - ... ReplaceOne({'_id': 4}, {'i': 1})] + ... InsertOne({"_id": 1}), + ... DeleteOne({"_id": 2}), + ... InsertOne({"_id": 3}), + ... ReplaceOne({"_id": 4}, {"i": 1}), + ... ] >>> try: ... db.test.bulk_write(requests, ordered=False) ... except BulkWriteError as bwe: diff --git a/doc/examples/custom_type.rst b/doc/examples/custom_type.rst index 404a6c8b55..cbb2f8515b 100644 --- a/doc/examples/custom_type.rst +++ b/doc/examples/custom_type.rst @@ -19,7 +19,7 @@ We'll start by getting a clean database to use for the example: >>> from pymongo import MongoClient >>> client = MongoClient() - >>> client.drop_database('custom_type_example') + >>> client.drop_database("custom_type_example") >>> db = client.custom_type_example @@ -36,7 +36,7 @@ to save an instance of ``Decimal`` with PyMongo, results in an >>> from decimal import Decimal >>> num = Decimal("45.321") - >>> db.test.insert_one({'num': num}) + >>> db.test.insert_one({"num": num}) Traceback (most recent call last): ... bson.errors.InvalidDocument: cannot encode object: Decimal('45.321'), of type: @@ -78,8 +78,8 @@ interested in both encoding and decoding our custom type, we use the >>> from bson.decimal128 import Decimal128 >>> from bson.codec_options import TypeCodec >>> class DecimalCodec(TypeCodec): - ... python_type = Decimal # the Python type acted upon by this type codec - ... bson_type = Decimal128 # the BSON type acted upon by this type codec + ... python_type = Decimal # the Python type acted upon by this type codec + ... bson_type = Decimal128 # the BSON type acted upon by this type codec ... def transform_python(self, value): ... """Function that transforms a custom type value into a type ... that BSON can encode.""" @@ -88,6 +88,7 @@ interested in both encoding and decoding our custom type, we use the ... """Function that transforms a vanilla BSON type value into our ... custom type.""" ... return value.to_decimal() + ... >>> decimal_codec = DecimalCodec() @@ -125,7 +126,7 @@ with our ``type_registry`` and use it to get a >>> from bson.codec_options import CodecOptions >>> codec_options = CodecOptions(type_registry=type_registry) - >>> collection = db.get_collection('test', codec_options=codec_options) + >>> collection = db.get_collection("test", codec_options=codec_options) Now, we can seamlessly encode and decode instances of @@ -133,7 +134,7 @@ Now, we can seamlessly encode and decode instances of .. doctest:: - >>> collection.insert_one({'num': Decimal("45.321")}) + >>> collection.insert_one({"num": Decimal("45.321")}) >>> mydoc = collection.find_one() >>> import pprint @@ -147,7 +148,7 @@ MongoDB: .. doctest:: - >>> vanilla_collection = db.get_collection('test') + >>> vanilla_collection = db.get_collection("test") >>> pprint.pprint(vanilla_collection.find_one()) {'_id': ObjectId('...'), 'num': Decimal128('45.321')} @@ -170,13 +171,14 @@ an integer: ... def my_method(self): ... """Method implementing some custom logic.""" ... return int(self) + ... If we try to save an instance of this type without first registering a type codec for it, we get an error: .. doctest:: - >>> collection.insert_one({'num': DecimalInt("45.321")}) + >>> collection.insert_one({"num": DecimalInt("45.321")}) Traceback (most recent call last): ... bson.errors.InvalidDocument: cannot encode object: Decimal('45.321'), of type: @@ -192,6 +194,7 @@ This is trivial to do since the same transformation as the one used for ... def python_type(self): ... """The Python type acted upon by this type codec.""" ... return DecimalInt + ... >>> decimalint_codec = DecimalIntCodec() @@ -211,9 +214,9 @@ object, we can seamlessly encode instances of ``DecimalInt``: >>> type_registry = TypeRegistry([decimal_codec, decimalint_codec]) >>> codec_options = CodecOptions(type_registry=type_registry) - >>> collection = db.get_collection('test', codec_options=codec_options) + >>> collection = db.get_collection("test", codec_options=codec_options) >>> collection.drop() - >>> collection.insert_one({'num': DecimalInt("45.321")}) + >>> collection.insert_one({"num": DecimalInt("45.321")}) >>> mydoc = collection.find_one() >>> pprint.pprint(mydoc) @@ -236,26 +239,26 @@ writing a ``TypeDecoder`` that modifies how this datatype is decoded. On Python 3.x, :class:`~bson.binary.Binary` data (``subtype = 0``) is decoded as a ``bytes`` instance: -.. code-block:: python +.. code-block:: pycon >>> # On Python 3.x. >>> from bson.binary import Binary - >>> newcoll = db.get_collection('new') - >>> newcoll.insert_one({'_id': 1, 'data': Binary(b"123", subtype=0)}) + >>> newcoll = db.get_collection("new") + >>> newcoll.insert_one({"_id": 1, "data": Binary(b"123", subtype=0)}) >>> doc = newcoll.find_one() - >>> type(doc['data']) + >>> type(doc["data"]) bytes On Python 2.7.x, the same data is decoded as a :class:`~bson.binary.Binary` instance: -.. code-block:: python +.. code-block:: pycon >>> # On Python 2.7.x - >>> newcoll = db.get_collection('new') + >>> newcoll = db.get_collection("new") >>> doc = newcoll.find_one() - >>> type(doc['data']) + >>> type(doc["data"]) bson.binary.Binary @@ -291,6 +294,7 @@ BSON-encodable value. The following fallback encoder encodes python's ... if isinstance(value, Decimal): ... return Decimal128(value) ... return value + ... After declaring the callback, we must create a type registry and codec options with this fallback encoder before it can be used for initializing a collection: @@ -299,14 +303,14 @@ with this fallback encoder before it can be used for initializing a collection: >>> type_registry = TypeRegistry(fallback_encoder=fallback_encoder) >>> codec_options = CodecOptions(type_registry=type_registry) - >>> collection = db.get_collection('test', codec_options=codec_options) + >>> collection = db.get_collection("test", codec_options=codec_options) >>> collection.drop() We can now seamlessly encode instances of :py:class:`~decimal.Decimal`: .. doctest:: - >>> collection.insert_one({'num': Decimal("45.321")}) + >>> collection.insert_one({"num": Decimal("45.321")}) >>> mydoc = collection.find_one() >>> pprint.pprint(mydoc) @@ -343,12 +347,15 @@ We start by defining some arbitrary custom types: class MyStringType(object): def __init__(self, value): self.__value = value + def __repr__(self): return "MyStringType('%s')" % (self.__value,) + class MyNumberType(object): def __init__(self, value): self.__value = value + def __repr__(self): return "MyNumberType(%s)" % (self.__value,) @@ -362,11 +369,15 @@ back into Python objects: import pickle from bson.binary import Binary, USER_DEFINED_SUBTYPE + + def fallback_pickle_encoder(value): return Binary(pickle.dumps(value), USER_DEFINED_SUBTYPE) + class PickledBinaryDecoder(TypeDecoder): bson_type = Binary + def transform_bson(self, value): if value.subtype == USER_DEFINED_SUBTYPE: return pickle.loads(value) @@ -384,19 +395,23 @@ Finally, we create a ``CodecOptions`` instance: .. code-block:: python - codec_options = CodecOptions(type_registry=TypeRegistry( - [PickledBinaryDecoder()], fallback_encoder=fallback_pickle_encoder)) + codec_options = CodecOptions( + type_registry=TypeRegistry( + [PickledBinaryDecoder()], fallback_encoder=fallback_pickle_encoder + ) + ) We can now round trip our custom objects to MongoDB: .. code-block:: python - collection = db.get_collection('test_fe', codec_options=codec_options) - collection.insert_one({'_id': 1, 'str': MyStringType("hello world"), - 'num': MyNumberType(2)}) + collection = db.get_collection("test_fe", codec_options=codec_options) + collection.insert_one( + {"_id": 1, "str": MyStringType("hello world"), "num": MyNumberType(2)} + ) mydoc = collection.find_one() - assert isinstance(mydoc['str'], MyStringType) - assert isinstance(mydoc['num'], MyNumberType) + assert isinstance(mydoc["str"], MyStringType) + assert isinstance(mydoc["num"], MyNumberType) Limitations diff --git a/doc/examples/datetimes.rst b/doc/examples/datetimes.rst index 3b30000ffc..562c9480a6 100644 --- a/doc/examples/datetimes.rst +++ b/doc/examples/datetimes.rst @@ -6,8 +6,9 @@ Datetimes and Timezones import datetime from pymongo import MongoClient from bson.codec_options import CodecOptions + client = MongoClient() - client.drop_database('dt_example') + client.drop_database("dt_example") db = client.dt_example These examples show how to handle Python :class:`datetime.datetime` objects @@ -24,8 +25,7 @@ time into MongoDB: .. doctest:: - >>> result = db.objects.insert_one( - ... {"last_modified": datetime.datetime.utcnow()}) + >>> result = db.objects.insert_one({"last_modified": datetime.datetime.utcnow()}) Always use :meth:`datetime.datetime.utcnow`, which returns the current time in UTC, instead of :meth:`datetime.datetime.now`, which returns the current local @@ -33,8 +33,7 @@ time. Avoid doing this: .. doctest:: - >>> result = db.objects.insert_one( - ... {"last_modified": datetime.datetime.now()}) + >>> result = db.objects.insert_one({"last_modified": datetime.datetime.now()}) The value for `last_modified` is very different between these two examples, even though both documents were stored at around the same local time. This will be @@ -42,7 +41,7 @@ confusing to the application that reads them: .. doctest:: - >>> [doc['last_modified'] for doc in db.objects.find()] # doctest: +SKIP + >>> [doc["last_modified"] for doc in db.objects.find()] # doctest: +SKIP [datetime.datetime(2015, 7, 8, 18, 17, 28, 324000), datetime.datetime(2015, 7, 8, 11, 17, 42, 911000)] @@ -52,12 +51,11 @@ timezone they're in. By default, PyMongo retrieves naive datetimes: .. doctest:: - >>> result = db.tzdemo.insert_one( - ... {'date': datetime.datetime(2002, 10, 27, 6, 0, 0)}) - >>> db.tzdemo.find_one()['date'] + >>> result = db.tzdemo.insert_one({"date": datetime.datetime(2002, 10, 27, 6, 0, 0)}) + >>> db.tzdemo.find_one()["date"] datetime.datetime(2002, 10, 27, 6, 0) >>> options = CodecOptions(tz_aware=True) - >>> db.get_collection('tzdemo', codec_options=options).find_one()['date'] # doctest: +SKIP + >>> db.get_collection("tzdemo", codec_options=options).find_one()["date"] # doctest: +SKIP datetime.datetime(2002, 10, 27, 6, 0, tzinfo=) @@ -71,11 +69,10 @@ those datetimes to UTC automatically: .. doctest:: >>> import pytz - >>> pacific = pytz.timezone('US/Pacific') - >>> aware_datetime = pacific.localize( - ... datetime.datetime(2002, 10, 27, 6, 0, 0)) + >>> pacific = pytz.timezone("US/Pacific") + >>> aware_datetime = pacific.localize(datetime.datetime(2002, 10, 27, 6, 0, 0)) >>> result = db.times.insert_one({"date": aware_datetime}) - >>> db.times.find_one()['date'] + >>> db.times.find_one()["date"] datetime.datetime(2002, 10, 27, 14, 0) Reading Time @@ -150,7 +147,7 @@ cannot be represented using the builtin Python :class:`~datetime.datetime`: .. doctest:: >>> x = encode({"x": datetime(1970, 1, 1)}) - >>> y = encode({"x": DatetimeMS(-2**62)}) + >>> y = encode({"x": DatetimeMS(-(2**62))}) >>> codec_auto = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_AUTO) >>> decode(x, codec_options=codec_auto) {'x': datetime.datetime(1970, 1, 1, 0, 0)} @@ -165,7 +162,7 @@ resulting :class:`~datetime.datetime` objects to be within .. doctest:: >>> x = encode({"x": DatetimeMS(2**62)}) - >>> y = encode({"x": DatetimeMS(-2**62)}) + >>> y = encode({"x": DatetimeMS(-(2**62))}) >>> codec_clamp = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP) >>> decode(x, codec_options=codec_clamp) {'x': datetime.datetime(9999, 12, 31, 23, 59, 59, 999000)} diff --git a/doc/examples/geo.rst b/doc/examples/geo.rst index 2234a20757..e7da156720 100644 --- a/doc/examples/geo.rst +++ b/doc/examples/geo.rst @@ -4,8 +4,9 @@ Geospatial Indexing Example .. testsetup:: from pymongo import MongoClient + client = MongoClient() - client.drop_database('geo_example') + client.drop_database("geo_example") This example shows how to create and use a :data:`~pymongo.GEO2D` index in PyMongo. To create a spherical (earth-like) geospatial index use :data:`~pymongo.GEOSPHERE` instead. @@ -33,10 +34,9 @@ insert a couple of example locations: .. doctest:: - >>> result = db.places.insert_many([{"loc": [2, 5]}, - ... {"loc": [30, 5]}, - ... {"loc": [1, 2]}, - ... {"loc": [4, 4]}]) + >>> result = db.places.insert_many( + ... [{"loc": [2, 5]}, {"loc": [30, 5]}, {"loc": [1, 2]}, {"loc": [4, 4]}] + ... ) >>> result.inserted_ids [ObjectId('...'), ObjectId('...'), ObjectId('...'), ObjectId('...')] @@ -51,7 +51,7 @@ Using the geospatial index we can find documents near another point: >>> import pprint >>> for doc in db.places.find({"loc": {"$near": [3, 6]}}).limit(3): - ... pprint.pprint(doc) + ... pprint.pprint(doc) ... {'_id': ObjectId('...'), 'loc': [2, 5]} {'_id': ObjectId('...'), 'loc': [4, 4]} @@ -66,7 +66,7 @@ The $maxDistance operator requires the use of :class:`~bson.son.SON`: >>> from bson.son import SON >>> query = {"loc": SON([("$near", [3, 6]), ("$maxDistance", 100)])} >>> for doc in db.places.find(query).limit(3): - ... pprint.pprint(doc) + ... pprint.pprint(doc) ... {'_id': ObjectId('...'), 'loc': [2, 5]} {'_id': ObjectId('...'), 'loc': [4, 4]} @@ -78,8 +78,9 @@ It's also possible to query for all items within a given rectangle .. doctest:: >>> query = {"loc": {"$within": {"$box": [[2, 2], [5, 6]]}}} - >>> for doc in db.places.find(query).sort('_id'): + >>> for doc in db.places.find(query).sort("_id"): ... pprint.pprint(doc) + ... {'_id': ObjectId('...'), 'loc': [2, 5]} {'_id': ObjectId('...'), 'loc': [4, 4]} @@ -88,8 +89,8 @@ Or circle (specified by center point and radius): .. doctest:: >>> query = {"loc": {"$within": {"$center": [[0, 0], 6]}}} - >>> for doc in db.places.find(query).sort('_id'): - ... pprint.pprint(doc) + >>> for doc in db.places.find(query).sort("_id"): + ... pprint.pprint(doc) ... {'_id': ObjectId('...'), 'loc': [2, 5]} {'_id': ObjectId('...'), 'loc': [1, 2]} diff --git a/doc/examples/gevent.rst b/doc/examples/gevent.rst index 6eb283dca9..de31158151 100644 --- a/doc/examples/gevent.rst +++ b/doc/examples/gevent.rst @@ -38,10 +38,12 @@ handler to end background greenlets when your application receives SIGHUP: import signal + def graceful_reload(signum, traceback): """Explicitly close some global MongoClient object.""" client.close() + signal.signal(signal.SIGHUP, graceful_reload) Applications using uWSGI prior to 1.9.16 are affected by this issue, diff --git a/doc/examples/gridfs.rst b/doc/examples/gridfs.rst index a015f6a9fd..5f40805d79 100644 --- a/doc/examples/gridfs.rst +++ b/doc/examples/gridfs.rst @@ -4,8 +4,9 @@ GridFS Example .. testsetup:: from pymongo import MongoClient + client = MongoClient() - client.drop_database('gridfs_example') + client.drop_database("gridfs_example") This example shows how to use :mod:`gridfs` to store large binary objects (e.g. files) in MongoDB. diff --git a/doc/examples/server_selection.rst b/doc/examples/server_selection.rst index be2172489e..18de677a58 100644 --- a/doc/examples/server_selection.rst +++ b/doc/examples/server_selection.rst @@ -55,12 +55,12 @@ selector function: >>> def server_selector(server_descriptions): ... servers = [ - ... server for server in server_descriptions - ... if server.address[0] == 'localhost' + ... server for server in server_descriptions if server.address[0] == "localhost" ... ] ... if not servers: ... return server_descriptions ... return servers + ... diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst index e5ad3338e1..f202ab32e1 100644 --- a/doc/examples/type_hints.rst +++ b/doc/examples/type_hints.rst @@ -81,7 +81,7 @@ Subclasses of :py:class:`collections.abc.Mapping` can also be used, such as :cla >>> from pymongo import MongoClient >>> client = MongoClient(document_class=SON[str, int]) >>> collection = client.test.test - >>> inserted = collection.insert_one({"x": 1, "y": 2 }) + >>> inserted = collection.insert_one({"x": 1, "y": 2}) >>> result = collection.find_one({"x": 1}) >>> assert result is not None >>> assert result["x"] == 1 @@ -103,8 +103,8 @@ These methods automatically add an "_id" field. >>> from pymongo import MongoClient >>> from pymongo.collection import Collection >>> class Movie(TypedDict): - ... name: str - ... year: int + ... name: str + ... year: int ... >>> client: MongoClient = MongoClient() >>> collection: Collection[Movie] = client.test.test @@ -113,7 +113,7 @@ These methods automatically add an "_id" field. >>> assert result is not None >>> assert result["year"] == 1993 >>> # This will raise a type-checking error, despite being present, because it is added by PyMongo. - >>> assert result["_id"] # type:ignore[typeddict-item] + >>> assert result["_id"] # type:ignore[typeddict-item] This same typing scheme works for all of the insert methods (:meth:`~pymongo.collection.Collection.insert_one`, :meth:`~pymongo.collection.Collection.insert_many`, and :meth:`~pymongo.collection.Collection.bulk_write`). @@ -158,18 +158,18 @@ Note: to use :py:class:`~typing.TypedDict` and :py:class:`~typing.NotRequired` i >>> from pymongo.collection import Collection >>> from bson import ObjectId >>> class Movie(TypedDict): - ... name: str - ... year: int + ... name: str + ... year: int ... >>> class ExplicitMovie(TypedDict): - ... _id: ObjectId - ... name: str - ... year: int + ... _id: ObjectId + ... name: str + ... year: int ... >>> class NotRequiredMovie(TypedDict): - ... _id: NotRequired[ObjectId] - ... name: str - ... year: int + ... _id: NotRequired[ObjectId] + ... name: str + ... year: int ... >>> client: MongoClient = MongoClient() >>> collection: Collection[Movie] = client.test.test @@ -180,7 +180,9 @@ Note: to use :py:class:`~typing.TypedDict` and :py:class:`~typing.NotRequired` i >>> assert result["_id"] # type:ignore[typeddict-item] >>> collection: Collection[ExplicitMovie] = client.test.test >>> # Note that the _id keyword argument must be supplied - >>> inserted = collection.insert_one(ExplicitMovie(_id=ObjectId(), name="Jurassic Park", year=1993)) + >>> inserted = collection.insert_one( + ... ExplicitMovie(_id=ObjectId(), name="Jurassic Park", year=1993) + ... ) >>> result = collection.find_one({"name": "Jurassic Park"}) >>> assert result is not None >>> # This will not raise a type-checking error. @@ -207,13 +209,13 @@ match a well-defined schema using :py:class:`~typing.TypedDict` (Python 3.8+). >>> from pymongo import MongoClient >>> from pymongo.database import Database >>> class Movie(TypedDict): - ... name: str - ... year: int + ... name: str + ... year: int ... >>> client: MongoClient = MongoClient() >>> db: Database[Movie] = client.test >>> collection = db.test - >>> inserted = collection.insert_one({"name": "Jurassic Park", "year": 1993 }) + >>> inserted = collection.insert_one({"name": "Jurassic Park", "year": 1993}) >>> result = collection.find_one({"name": "Jurassic Park"}) >>> assert result is not None >>> assert result["year"] == 1993 @@ -244,11 +246,11 @@ You can specify the document type returned by :mod:`bson` decoding functions by >>> from typing import Any, Dict >>> from bson import CodecOptions, encode, decode >>> class MyDict(Dict[str, Any]): - ... def foo(self): - ... return "bar" + ... def foo(self): + ... return "bar" ... >>> options = CodecOptions(document_class=MyDict) - >>> doc = {"x": 1, "y": 2 } + >>> doc = {"x": 1, "y": 2} >>> bsonbytes = encode(doc, codec_options=options) >>> rt_document = decode(bsonbytes, codec_options=options) >>> assert rt_document.foo() == "bar" diff --git a/doc/faq.rst b/doc/faq.rst index 876dc68ed8..e64e3c79ed 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -244,8 +244,7 @@ Key order in subdocuments -- why does my query work in the shell but not PyMongo collection = MongoClient().test.collection collection.drop() - collection.insert_one({'_id': 1.0, - 'subdocument': SON([('b', 1.0), ('a', 1.0)])}) + collection.insert_one({"_id": 1.0, "subdocument": SON([("b", 1.0), ("a", 1.0)])}) The key-value pairs in a BSON document can have any order (except that ``_id`` is always first). The mongo shell preserves key order when reading and writing @@ -537,6 +536,7 @@ objects as before: >>> for x in client.db.collection.find(): ... print(x) + ... {'_id': ObjectId('...'), 'x': datetime.datetime(1970, 1, 1, 0, 0)} {'_id': ObjectId('...'), 'x': DatetimeMS(4611686018427387904)} diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 561261c7ad..687fec11bc 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -6,6 +6,7 @@ PyMongo 4 Migration Guide .. testsetup:: from pymongo import MongoClient, ReadPreference + client = MongoClient() database = client.my_database collection = database.my_collection diff --git a/doc/tutorial.rst b/doc/tutorial.rst index 55961241e8..d7854c885a 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -4,8 +4,9 @@ Tutorial .. testsetup:: from pymongo import MongoClient + client = MongoClient() - client.drop_database('test-database') + client.drop_database("test-database") This tutorial is intended as an introduction to working with **MongoDB** and **PyMongo**. @@ -45,13 +46,13 @@ specify the host and port explicitly, as follows: .. doctest:: - >>> client = MongoClient('localhost', 27017) + >>> client = MongoClient("localhost", 27017) Or use the MongoDB URI format: .. doctest:: - >>> client = MongoClient('mongodb://localhost:27017/') + >>> client = MongoClient("mongodb://localhost:27017/") Getting a Database ------------------ @@ -70,7 +71,7 @@ instead: .. doctest:: - >>> db = client['test-database'] + >>> db = client["test-database"] Getting a Collection -------------------- @@ -87,7 +88,7 @@ or (using dictionary style access): .. doctest:: - >>> collection = db['test-collection'] + >>> collection = db["test-collection"] An important note about collections (and databases) in MongoDB is that they are created lazily - none of the above commands have actually @@ -104,10 +105,12 @@ post: .. doctest:: >>> import datetime - >>> post = {"author": "Mike", - ... "text": "My first blog post!", - ... "tags": ["mongodb", "python", "pymongo"], - ... "date": datetime.datetime.utcnow()} + >>> post = { + ... "author": "Mike", + ... "text": "My first blog post!", + ... "tags": ["mongodb", "python", "pymongo"], + ... "date": datetime.datetime.utcnow(), + ... } Note that documents can contain native Python types (like :class:`datetime.datetime` instances) which will be automatically @@ -212,7 +215,7 @@ Note that an ObjectId is not the same as its string representation: .. doctest:: >>> post_id_as_str = str(post_id) - >>> posts.find_one({"_id": post_id_as_str}) # No result + >>> posts.find_one({"_id": post_id_as_str}) # No result >>> A common task in web applications is to get an ObjectId from the @@ -240,14 +243,20 @@ command to the server: .. doctest:: - >>> new_posts = [{"author": "Mike", - ... "text": "Another post!", - ... "tags": ["bulk", "insert"], - ... "date": datetime.datetime(2009, 11, 12, 11, 14)}, - ... {"author": "Eliot", - ... "title": "MongoDB is fun", - ... "text": "and pretty easy too!", - ... "date": datetime.datetime(2009, 11, 10, 10, 45)}] + >>> new_posts = [ + ... { + ... "author": "Mike", + ... "text": "Another post!", + ... "tags": ["bulk", "insert"], + ... "date": datetime.datetime(2009, 11, 12, 11, 14), + ... }, + ... { + ... "author": "Eliot", + ... "title": "MongoDB is fun", + ... "text": "and pretty easy too!", + ... "date": datetime.datetime(2009, 11, 10, 10, 45), + ... }, + ... ] >>> result = posts.insert_many(new_posts) >>> result.inserted_ids [ObjectId('...'), ObjectId('...')] @@ -274,7 +283,7 @@ document in the ``posts`` collection: .. doctest:: >>> for post in posts.find(): - ... pprint.pprint(post) + ... pprint.pprint(post) ... {'_id': ObjectId('...'), 'author': 'Mike', @@ -300,7 +309,7 @@ author is "Mike": .. doctest:: >>> for post in posts.find({"author": "Mike"}): - ... pprint.pprint(post) + ... pprint.pprint(post) ... {'_id': ObjectId('...'), 'author': 'Mike', @@ -343,7 +352,7 @@ than a certain date, but also sort the results by author: >>> d = datetime.datetime(2009, 11, 12, 12) >>> for post in posts.find({"date": {"$lt": d}}).sort("author"): - ... pprint.pprint(post) + ... pprint.pprint(post) ... {'_id': ObjectId('...'), 'author': 'Eliot', @@ -373,8 +382,7 @@ First, we'll need to create the index: .. doctest:: - >>> result = db.profiles.create_index([('user_id', pymongo.ASCENDING)], - ... unique=True) + >>> result = db.profiles.create_index([("user_id", pymongo.ASCENDING)], unique=True) >>> sorted(list(db.profiles.index_information())) ['_id_', 'user_id_1'] @@ -386,9 +394,7 @@ Now let's set up some user profiles: .. doctest:: - >>> user_profiles = [ - ... {'user_id': 211, 'name': 'Luke'}, - ... {'user_id': 212, 'name': 'Ziltoid'}] + >>> user_profiles = [{"user_id": 211, "name": "Luke"}, {"user_id": 212, "name": "Ziltoid"}] >>> result = db.profiles.insert_many(user_profiles) The index prevents us from inserting a document whose ``user_id`` is already in @@ -397,8 +403,8 @@ the collection: .. doctest:: :options: +IGNORE_EXCEPTION_DETAIL - >>> new_profile = {'user_id': 213, 'name': 'Drew'} - >>> duplicate_profile = {'user_id': 212, 'name': 'Tommy'} + >>> new_profile = {"user_id": 213, "name": "Drew"} + >>> duplicate_profile = {"user_id": 212, "name": "Tommy"} >>> result = db.profiles.insert_one(new_profile) # This is fine. >>> result = db.profiles.insert_one(duplicate_profile) Traceback (most recent call last): diff --git a/pymongo/client_session.py b/pymongo/client_session.py index d2479942e4..d73672c5b5 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -23,12 +23,11 @@ with client.start_session(causal_consistency=True) as session: collection = client.db.collection - collection.update_one({'_id': 1}, {'$set': {'x': 10}}, session=session) - secondary_c = collection.with_options( - read_preference=ReadPreference.SECONDARY) + collection.update_one({"_id": 1}, {"$set": {"x": 10}}, session=session) + secondary_c = collection.with_options(read_preference=ReadPreference.SECONDARY) # A secondary read waits for replication of the write. - secondary_c.find_one({'_id': 1}, session=session) + secondary_c.find_one({"_id": 1}, session=session) If `causal_consistency` is True (the default), read operations that use the session are causally after previous read and write operations. Using a @@ -57,8 +56,11 @@ with client.start_session() as session: with session.start_transaction(): orders.insert_one({"sku": "abc123", "qty": 100}, session=session) - inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}}, - {"$inc": {"qty": -100}}, session=session) + inventory.update_one( + {"sku": "abc123", "qty": {"$gte": 100}}, + {"$inc": {"qty": -100}}, + session=session, + ) Upon normal completion of ``with session.start_transaction()`` block, the transaction automatically calls :meth:`ClientSession.commit_transaction`. diff --git a/pymongo/collection.py b/pymongo/collection.py index 4cb3fa79c9..7ce881613c 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2533,14 +2533,13 @@ def watch( .. code-block:: python try: - with db.collection.watch( - [{'$match': {'operationType': 'insert'}}]) as stream: + with db.collection.watch([{"$match": {"operationType": "insert"}}]) as stream: for insert_change in stream: print(insert_change) except pymongo.errors.PyMongoError: # The ChangeStream encountered an unrecoverable error or the # resume attempt failed to recreate the cursor. - logging.error('...') + logging.error("...") For a precise description of the resume process see the `change streams specification`_. diff --git a/pymongo/database.py b/pymongo/database.py index b3c6c60851..6a73f884c5 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -591,14 +591,13 @@ def watch( .. code-block:: python try: - with db.watch( - [{'$match': {'operationType': 'insert'}}]) as stream: + with db.watch([{"$match": {"operationType": "insert"}}]) as stream: for insert_change in stream: print(insert_change) except pymongo.errors.PyMongoError: # The ChangeStream encountered an unrecoverable error or the # resume attempt failed to recreate the cursor. - logging.error('...') + logging.error("...") For a precise description of the resume process see the `change streams specification`_. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 05f00b48ee..ca60affdf5 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -27,7 +27,7 @@ >>> c = MongoClient() >>> c.test_database Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test_database') - >>> c['test-database'] + >>> c["test-database"] Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test-database') """ @@ -935,14 +935,13 @@ def watch( .. code-block:: python try: - with client.watch( - [{'$match': {'operationType': 'insert'}}]) as stream: + with client.watch([{"$match": {"operationType": "insert"}}]) as stream: for insert_change in stream: print(insert_change) except pymongo.errors.PyMongoError: # The ChangeStream encountered an unrecoverable error or the # resume attempt failed to recreate the cursor. - logging.error('...') + logging.error("...") For a precise description of the resume process see the `change streams specification`_. From 04c9f87d7027e90b04819292f450c63b75b85a22 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 20 Mar 2023 15:54:42 -0700 Subject: [PATCH 0355/1588] PYTHON-2468 Add pymongoexplain example to pymongo docs (#1172) --- doc/examples/aggregation.rst | 12 ++++++++++-- pymongo/collection.py | 5 +++-- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/doc/examples/aggregation.rst b/doc/examples/aggregation.rst index bd20db2304..22e19e9842 100644 --- a/doc/examples/aggregation.rst +++ b/doc/examples/aggregation.rst @@ -67,8 +67,16 @@ eg "$sort": {'_id': 'dog', 'count': 2}, {'_id': 'mouse', 'count': 1}] -To run an explain plan for this aggregation use the -:meth:`~pymongo.database.Database.command` method:: +To run an explain plan for this aggregation use +`PyMongoExplain `_, +a companion library for PyMongo. It allows you to explain any CRUD operation +by providing a few convenience classes:: + + >>> from pymongoexplain import ExplainableCollection + >>> ExplainableCollection(collection).aggregate(pipeline) + {'ok': 1.0, 'queryPlanner': [...]} + +Or, use the :meth:`~pymongo.database.Database.command` method:: >>> db.command('aggregate', 'things', pipeline=pipeline, explain=True) {'ok': 1.0, 'stages': [...]} diff --git a/pymongo/collection.py b/pymongo/collection.py index 7ce881613c..0ff56d10cd 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2368,8 +2368,9 @@ def aggregate( :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` is used. .. note:: This method does not support the 'explain' option. Please - use :meth:`~pymongo.database.Database.command` instead. An - example is included in the :ref:`aggregate-examples` documentation. + use `PyMongoExplain `_ + instead. An example is included in the :ref:`aggregate-examples` + documentation. .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of this collection is automatically applied to this operation. From 880f3dd8eaaa91fa017f7bb7993f26ccf4f4670d Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 21 Mar 2023 16:59:35 -0700 Subject: [PATCH 0356/1588] PYTHON-3615 Add docs example for how to rotate CMKs using rewrap_many_data_key (#1171) --- pymongo/encryption.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 2bd6880065..7d017c2c0a 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -1021,6 +1021,23 @@ def rewrap_many_data_key( :Returns: A :class:`RewrapManyDataKeyResult`. + This method allows you to re-encrypt all of your data-keys with a new CMK, or master key. + Note that this does *not* require re-encrypting any of the data in your encrypted collections, + but rather refreshes the key that protects the keys that encrypt the data: + + .. code-block:: python + + client_encryption.rewrap_many_data_key( + filter={"keyAltNames": "optional filter for which keys you want to update"}, + master_key={ + "provider": "azure", # replace with your cloud provider + "master_key": { + # put the rest of your master_key options here + "key": "" + }, + }, + ) + .. versionadded:: 4.2 """ self._check_closed() From 1d052cb7061e3a3c1aa13cc6d4cf17477a60aab5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 27 Mar 2023 11:31:51 -0500 Subject: [PATCH 0357/1588] PYTHON-3639 Release Build is Failing to Create Universal Wheels for MacOS (#1174) --- tools/fail_if_no_c.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/tools/fail_if_no_c.py b/tools/fail_if_no_c.py index e2e9c52527..60fed0ee8a 100644 --- a/tools/fail_if_no_c.py +++ b/tools/fail_if_no_c.py @@ -32,10 +32,11 @@ if os.environ.get("ENSURE_UNIVERSAL2") == "1": parent_dir = os.path.dirname(pymongo.__path__[0]) - for so_file in glob.glob(f"{parent_dir}/**/*.so"): - print(f"Checking universal2 compatibility in {so_file}...") - output = subprocess.check_output(["file", so_file]) - if "arm64" not in output.decode("utf-8"): - sys.exit("Universal wheel was not compiled with arm64 support") - if "x86_64" not in output.decode("utf-8"): - sys.exit("Universal wheel was not compiled with x86_64 support") + for pkg in ["pymongo", "bson", "grifs"]: + for so_file in glob.glob(f"{parent_dir}/{pkg}/*.so"): + print(f"Checking universal2 compatibility in {so_file}...") + output = subprocess.check_output(["file", so_file]) + if "arm64" not in output.decode("utf-8"): + sys.exit("Universal wheel was not compiled with arm64 support") + if "x86_64" not in output.decode("utf-8"): + sys.exit("Universal wheel was not compiled with x86_64 support") From cbad35ec9d591b09c492f4c869c50fd203b55e44 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 27 Mar 2023 18:32:34 -0500 Subject: [PATCH 0358/1588] PYTHON-3619 MacOS hosts are incredibly slow (#1175) --- .evergreen/config.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index e92cf96a1e..b697074020 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -15,7 +15,8 @@ command_type: system # Protect ourself against rogue test case, or curl gone wild, that runs forever # Good rule of thumb: the averageish length a task takes, times 5 # That roughly accounts for variable system performance for various buildvariants -exec_timeout_secs: 1800 # 30 minutes is the longest we'll ever run +exec_timeout_secs: 3600 # 60 minutes is the longest we'll ever run (primarily + # for macos hosts) # What to do when evergreen hits the timeout (`post:` tasks are run automatically) timeout: From 9d65395d7f240e187c5744e601c0eb31d3a039d2 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 28 Mar 2023 14:51:08 -0700 Subject: [PATCH 0359/1588] PYTHON-3567 Add guidance for setting uuidRepresentation in 4.0 migration guide (#1176) --- doc/migrate-to-pymongo4.rst | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 687fec11bc..19aa87fcd8 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -974,12 +974,19 @@ subdocument containing a ``$ref`` field would be decoded as a Encoding a UUID raises an error by default .......................................... -The default uuid_representation for :class:`~bson.codec_options.CodecOptions`, +The default ``uuid_representation`` for :class:`~bson.codec_options.CodecOptions`, :class:`~bson.json_util.JSONOptions`, and :class:`~pymongo.mongo_client.MongoClient` has been changed from :data:`bson.binary.UuidRepresentation.PYTHON_LEGACY` to :data:`bson.binary.UuidRepresentation.UNSPECIFIED`. Attempting to encode a :class:`uuid.UUID` instance to BSON or JSON now produces an error by default. +If you were using UUIDs previously, you will need to set your ``uuid_representation`` to +:data:`bson.binary.UuidRepresentation.PYTHON_LEGACY` to avoid data corruption. If you do not have UUIDs, +then you should set :data:`bson.binary.UuidRepresentation.STANDARD`. If you do not explicitly set a value, +you will receive an error like this when attempting to encode a :class:`uuid.UUID`:: + + ValueError: cannot encode native uuid.UUID with UuidRepresentation.UNSPECIFIED. UUIDs can be manually converted... + See :ref:`handling-uuid-data-example` for details. Additional BSON classes implement ``__slots__`` From e85a84e3f4fb620bd5836b94a83a1e15d0b87951 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 31 Mar 2023 13:58:47 -0700 Subject: [PATCH 0360/1588] PYTHON-3643 Use mongodb+srv in MONGODB-AWS auth examples (#1177) --- doc/examples/authentication.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index a984d17fc0..a46f95c789 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -290,7 +290,7 @@ access key id and secret access key pair as the username and password, respectively, in the MongoDB URI. A sample URI would be:: >>> from pymongo import MongoClient - >>> uri = "mongodb://:@localhost/?authMechanism=MONGODB-AWS" + >>> uri = "mongodb+srv://:@example.mongodb.net/?authMechanism=MONGODB-AWS" >>> client = MongoClient(uri) .. note:: The access_key_id and secret_access_key passed into the URI MUST @@ -305,7 +305,7 @@ ID, a secret access key, and a security token passed into the URI. A sample URI would be:: >>> from pymongo import MongoClient - >>> uri = "mongodb://:@example.com/?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:" + >>> uri = "mongodb+srv://:@example.mongodb.net/?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:" >>> client = MongoClient(uri) .. note:: The access_key_id, secret_access_key, and session_token passed into @@ -325,7 +325,7 @@ for the access key ID, secret access key, and session token, respectively:: $ export AWS_SESSION_TOKEN= $ python >>> from pymongo import MongoClient - >>> uri = "mongodb://example.com/?authMechanism=MONGODB-AWS" + >>> uri = "mongodb+srv://example.mongodb.net/?authMechanism=MONGODB-AWS" >>> client = MongoClient(uri) .. note:: No username, password, or session token is passed into the URI. @@ -357,7 +357,7 @@ credentials assigned to the machine. A sample URI on an ECS container would be:: >>> from pymongo import MongoClient - >>> uri = "mongodb://localhost/?authMechanism=MONGODB-AWS" + >>> uri = "mongodb+srv://example.mongodb.com/?authMechanism=MONGODB-AWS" >>> client = MongoClient(uri) .. note:: No username, password, or session token is passed into the URI. @@ -372,7 +372,7 @@ credentials assigned to the machine. A sample URI on an EC2 machine would be:: >>> from pymongo import MongoClient - >>> uri = "mongodb://localhost/?authMechanism=MONGODB-AWS" + >>> uri = "mongodb+srv://example.mongodb.com/?authMechanism=MONGODB-AWS" >>> client = MongoClient(uri) .. note:: No username, password, or session token is passed into the URI. From 9bc70933554d7809a8169dfdea73b088d9271231 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 3 Apr 2023 13:45:54 -0700 Subject: [PATCH 0361/1588] PYTHON-3634 Windows crypt shared rewrap many data key timing out sometimes (#1173) --- pymongo/encryption.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 7d017c2c0a..d94e1969b0 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -41,6 +41,7 @@ from bson.son import SON from pymongo import _csot from pymongo.collection import Collection +from pymongo.common import CONNECT_TIMEOUT from pymongo.cursor import Cursor from pymongo.daemon import _spawn_daemon from pymongo.database import Database @@ -64,7 +65,7 @@ from pymongo.write_concern import WriteConcern _HTTPS_PORT = 443 -_KMS_CONNECT_TIMEOUT = 10 # TODO: CDRIVER-3262 will define this value. +_KMS_CONNECT_TIMEOUT = CONNECT_TIMEOUT # CDRIVER-3262 redefined this value to CONNECT_TIMEOUT _MONGOCRYPTD_TIMEOUT_MS = 10000 From 148f7877cf568e93c8bc7c7c260617de03e2326f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 3 Apr 2023 17:13:23 -0500 Subject: [PATCH 0362/1588] PYTHON-3644 Test encryption KMS connections with stdlib ssl, not just pyopenssl (#1178) --- .evergreen/config.yml | 55 ++++++++++++++++++++++++++++++++--------- .evergreen/run-tests.sh | 7 ++++-- test/test_encryption.py | 7 ++++-- 3 files changed, 54 insertions(+), 15 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index b697074020..6825aac10a 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -457,6 +457,9 @@ functions: rm -f ./fle_creds.sh export LIBMONGOCRYPT_URL="${libmongocrypt_url}" export TEST_ENCRYPTION=1 + if [ -n "${test_encryption_pyopenssl}" ]; then + export TEST_ENCRYPTION_PYOPENSSL=1 + fi fi if [ -n "${test_crypt_shared}" ]; then export TEST_CRYPT_SHARED=1 @@ -2507,6 +2510,13 @@ axes: variables: test_encryption: true batchtime: 10080 # 7 days + - id: "encryption_pyopenssl" + display_name: "Encryption PyOpenSSL" + tags: ["encryption_tag"] + variables: + test_encryption: true + test_encryption_pyopenssl: true + batchtime: 10080 # 7 days # The path to crypt_shared is stored in the $CRYPT_SHARED_LIB_PATH expansion. - id: "encryption_crypt_shared" display_name: "Encryption shared lib" @@ -2637,14 +2647,22 @@ buildvariants: ssl: "nossl" encryption: "*" display_name: "${encryption} ${platform} ${auth} ${ssl}" - tasks: &encryption-server-versions - - ".rapid" - - ".latest" - - ".6.0" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" + tasks: "test-latest-replica_set" + rules: + - if: + encryption: ["encryption", "encryption_crypt_shared"] + platform: macos-1100 + auth: "auth" + ssl: "nossl" + then: + add_tasks: &encryption-server-versions + - ".rapid" + - ".latest" + - ".6.0" + - ".5.0" + - ".4.4" + - ".4.2" + - ".4.0" # Test one server version with zSeries, POWER8, and ARM. - matrix_name: "test-different-cpu-architectures" @@ -2726,8 +2744,15 @@ buildvariants: # coverage: "*" encryption: "*" display_name: "${encryption} ${python-version} ${platform} ${auth-ssl}" - tasks: *encryption-server-versions - + tasks: "test-latest-replica_set" + rules: + - if: + encryption: ["encryption", "encryption_crypt_shared"] + platform: ubuntu-18.04 + auth-ssl: noauth-nossl + python-version: "*" + then: + add_tasks: *encryption-server-versions - matrix_name: "tests-python-version-ubuntu18-without-c-extensions" matrix_spec: @@ -2837,7 +2862,15 @@ buildvariants: auth-ssl: "*" encryption: "*" display_name: "${encryption} ${platform} ${python-version-windows} ${auth-ssl}" - tasks: *encryption-server-versions + tasks: "test-latest-replica_set" + rules: + - if: + encryption: ["encryption", "encryption_crypt_shared"] + platform: windows-64-vsMulti-small + python-version-windows: "*" + auth-ssl: "*" + then: + add_tasks: *encryption-server-versions # Storage engine tests on Ubuntu 18.04 (x86_64) with Python 3.7. - matrix_name: "tests-storage-engines" diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 3a15163b63..556d60f07f 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -151,8 +151,11 @@ fi if [ -n "$TEST_ENCRYPTION" ]; then # Need aws dependency for On-Demand KMS Credentials. - # Need OSCP dependency to verify OCSP TSL args. - python -m pip install '.[aws,ocsp]' + if [ -n "$TEST_ENCRYPTION_PYOPENSSL" ]; then + python -m pip install '.[aws,ocsp]' + else + python -m pip install '.[aws]' + fi # Get access to the AWS temporary credentials: # CSFLE_AWS_TEMP_ACCESS_KEY_ID, CSFLE_AWS_TEMP_SECRET_ACCESS_KEY, CSFLE_AWS_TEMP_SESSION_TOKEN diff --git a/test/test_encryption.py b/test/test_encryption.py index b7d588e747..6cdc8da3b6 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2159,8 +2159,11 @@ def test_05_tlsDisableOCSPEndpointCheck_is_permitted(self): encryption = ClientEncryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=options ) - self.assertFalse(encryption._io_callbacks.opts._kms_ssl_contexts["aws"].check_ocsp_endpoint) - encryption.close() + self.addCleanup(encryption.close) + ctx = encryption._io_callbacks.opts._kms_ssl_contexts["aws"] + if not hasattr(ctx, "check_ocsp_endpoint"): + raise self.skipTest("OCSP not enabled") # type:ignore + self.assertFalse(ctx.check_ocsp_endpoint) # https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.rst#unique-index-on-keyaltnames From 30ebc1d0902cedfd408b2652aedf847bbf11b22a Mon Sep 17 00:00:00 2001 From: lilinjie <102012657+uniontech-lilinjie@users.noreply.github.com> Date: Tue, 4 Apr 2023 11:46:14 +0000 Subject: [PATCH 0363/1588] fix typo (#1179) --- pymongo/client_session.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index d73672c5b5..1ec0b16476 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -600,7 +600,7 @@ def callback(session, custom_arg, custom_kwarg=None): In the event of an exception, ``with_transaction`` may retry the commit or the entire transaction, therefore ``callback`` may be invoked multiple times by a single call to ``with_transaction``. Developers - should be mindful of this possiblity when writing a ``callback`` that + should be mindful of this possibility when writing a ``callback`` that modifies application state or has any other side-effects. Note that even when the ``callback`` is invoked multiple times, ``with_transaction`` ensures that the transaction will be committed From d8897fce3ef685ccda6d9ab671125a9a07558bf3 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 5 Apr 2023 08:45:27 -0700 Subject: [PATCH 0364/1588] PYTHON-3066 Test against Apple silicon in Evergreen (#1180) --- .evergreen/config.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 6825aac10a..37bc5751da 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2176,6 +2176,15 @@ axes: skip_web_identity_auth_test: true python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz + - id: macos-1100-arm64 + display_name: "macOS 11.00 Arm64" + run_on: macos-1100-arm64 + variables: + skip_EC2_auth_test: true + skip_ECS_auth_test: true + skip_web_identity_auth_test: true + python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - id: rhel62 display_name: "RHEL 6.2 (x86_64)" run_on: rhel62-small @@ -2639,6 +2648,18 @@ buildvariants: - ".4.0" - ".3.6" +- matrix_name: "test-macos-arm64" + matrix_spec: + platform: + - macos-1100-arm64 + auth-ssl: "*" + display_name: "${platform} ${auth-ssl}" + tasks: + - ".latest" + - ".6.0" + - ".5.0" + - ".4.4" + - matrix_name: "test-macos-encryption" matrix_spec: platform: From acc6605ea119ca62b4d9dbff07b62bbc7c86c3d8 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 5 Apr 2023 11:24:27 -0700 Subject: [PATCH 0365/1588] PYTHON-3522 Increase test timeout for Windows (#1181) Temporarily skip CSOT GridFS tests on Windows. --- test/csot/command-execution.json | 2 +- test/unified_format.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/test/csot/command-execution.json b/test/csot/command-execution.json index 10f87d43ac..f0858791e9 100644 --- a/test/csot/command-execution.json +++ b/test/csot/command-execution.json @@ -52,7 +52,7 @@ ], "appName": "reduceMaxTimeMSTest", "blockConnection": true, - "blockTimeMS": 50 + "blockTimeMS": 75 } } } diff --git a/test/unified_format.py b/test/unified_format.py index 5afc746859..18130290b5 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -947,6 +947,8 @@ def maybe_skip_test(self, spec): class_name = self.__class__.__name__.lower() description = spec["description"].lower() if "csot" in class_name: + if "gridfs" in class_name and sys.platform == "win32": + self.skipTest("PYTHON-3522 CSOT GridFS tests are flaky on Windows") if client_context.storage_engine == "mmapv1": self.skipTest( "MMAPv1 does not support retryable writes which is required for CSOT tests" From 3077bbf1f946c2900a8a1026bc51d3ba4e208f5d Mon Sep 17 00:00:00 2001 From: Michael Pacheco Date: Fri, 7 Apr 2023 15:09:50 -0300 Subject: [PATCH 0366/1588] PYTHON-3657 Allow index name explicitly set to None (#1182) Co-authored-by: Michael Pacheco --- .gitignore | 1 + pymongo/operations.py | 2 +- test/test_collection.py | 4 ++++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index f7ad6563ff..269a7e7081 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,4 @@ pymongo.egg-info/ mongocryptd.pid .idea/ .nova/ +venv/ diff --git a/pymongo/operations.py b/pymongo/operations.py index f939cd479f..f73262074d 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -494,7 +494,7 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: .. _wildcard index: https://mongodb.com/docs/master/core/index-wildcard/ """ keys = _index_list(keys) - if "name" not in kwargs: + if kwargs.get("name") is None: kwargs["name"] = _gen_index_name(keys) kwargs["key"] = _index_document(keys) collation = validate_collation_or_none(kwargs.pop("collation", None)) diff --git a/test/test_collection.py b/test/test_collection.py index 881896c847..e36d6663f0 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -307,6 +307,10 @@ def test_create_index(self): db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) self.assertTrue("hello_-1_world_1" in db.test.index_information()) + db.test.drop_indexes() + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], name=None) + self.assertTrue("hello_-1_world_1" in db.test.index_information()) + db.test.drop() db.test.insert_one({"a": 1}) db.test.insert_one({"a": 1}) From 6088b5315259cc09420831d022b4ddb5f0be818c Mon Sep 17 00:00:00 2001 From: Kevin Albertson Date: Mon, 10 Apr 2023 17:24:44 -0400 Subject: [PATCH 0367/1588] PYTHON-3658 Reload expansions before deleting Azure resources (#1185) --- .evergreen/config.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 37bc5751da..0b2aaa8d54 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1181,6 +1181,10 @@ task_groups: params: file: testazurekms-expansions.yml teardown_group: + # Load expansions again. The setup task may have failed before running `expansions.update`. + - command: expansions.update + params: + file: testazurekms-expansions.yml - command: shell.exec params: shell: bash From 1010ea62f5e2196c36e2a6eb61549fc6e884a100 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 10 Apr 2023 16:41:30 -0500 Subject: [PATCH 0368/1588] PYTHON-3649 Switch to Supported Build Hosts (#1184) --- .evergreen/config.yml | 132 +++++++++++++++++------------------------- 1 file changed, 54 insertions(+), 78 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 0b2aaa8d54..21c9992dd8 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -321,7 +321,7 @@ functions: ${PREPARE_SHELL} # The mongohouse build script needs to be passed the VARIANT variable, see # https://github.com/10gen/mongohouse/blob/973cc11/evergreen.yaml#L65 - VARIANT=ubuntu1804 bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/build-mongohouse-local.sh + VARIANT=rhel84-small bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/build-mongohouse-local.sh - command: shell.exec type: setup params: @@ -2077,7 +2077,7 @@ tasks: shell: "bash" script: | ${PREPARE_SHELL} - export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-64/master/latest/libmongocrypt.tar.gz + export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu2004-64/master/latest/libmongocrypt.tar.gz SUCCESS=false TEST_FLE_GCP_AUTO=1 ./.evergreen/run-tests.sh - name: testazurekms-task @@ -2134,7 +2134,7 @@ tasks: PYTHON_BINARY= KEY_NAME='${testazurekms_keyname}' \ KEY_VAULT_ENDPOINT='${testazurekms_keyvaultendpoint}' \ - LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-64/master/latest/libmongocrypt.tar.gz \ + LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu2004-64/master/latest/libmongocrypt.tar.gz \ SUCCESS=false TEST_FLE_AZURE_AUTO=1 \ ./.evergreen/run-tests.sh @@ -2189,41 +2189,18 @@ axes: skip_web_identity_auth_test: true python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - - id: rhel62 - display_name: "RHEL 6.2 (x86_64)" - run_on: rhel62-small + - id: rhel84 + display_name: "RHEL 8.4" + run_on: rhel84-small batchtime: 10080 # 7 days variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-62-64-bit/master/latest/libmongocrypt.tar.gz - # Note that rhel70 isn't currently used since it doesn't - # have a system Python 3. We'll switch to rhel70 as our main test - # system (using /opt/python) in a future change. - - id: rhel70 - display_name: "RHEL 7.0" - run_on: rhel70-small + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-80-64-bit/master/latest/libmongocrypt.tar.gz + - id: rhel80-fips + display_name: "RHEL 8.0 FIPS" + run_on: rhel80-fips batchtime: 10080 # 7 days variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz - - id: rhel70-fips - display_name: "RHEL 7.0 FIPS" - run_on: rhel70-fips - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz - - id: ubuntu-16.04 - display_name: "Ubuntu 16.04" - run_on: ubuntu1604-test - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1604/master/latest/libmongocrypt.tar.gz - python3_binary: "/opt/python/3.8/bin/python3" - - id: ubuntu-18.04 - display_name: "Ubuntu 18.04" - run_on: ubuntu1804-small - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-64/master/latest/libmongocrypt.tar.gz - python3_binary: "/opt/python/3.8/bin/python3" + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-80-64-bit/master/latest/libmongocrypt.tar.gz - id: ubuntu-20.04 display_name: "Ubuntu 20.04" run_on: ubuntu2004-small @@ -2243,7 +2220,7 @@ axes: run_on: rhel82-arm64-small batchtime: 10080 # 7 days variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-arm64/master/latest/libmongocrypt.tar.gz + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-82-arm64/master/latest/libmongocrypt.tar.gz - id: windows-64-vsMulti-small display_name: "Windows 64" run_on: windows-64-vsMulti-small @@ -2595,7 +2572,7 @@ buildvariants: matrix_spec: platform: # OSes that support versions of MongoDB>=3.6 with SSL. - - ubuntu-18.04 + - rhel84 auth-ssl: "*" display_name: "${platform} ${auth-ssl}" tasks: @@ -2622,7 +2599,7 @@ buildvariants: - matrix_name: "tests-fips" matrix_spec: platform: - - rhel70-fips + - rhel80-fips auth: "auth" ssl: "ssl" display_name: "${platform} ${auth} ${ssl}" @@ -2701,9 +2678,9 @@ buildvariants: tasks: - ".6.0" -- matrix_name: "tests-python-version-ubuntu18-test-ssl" +- matrix_name: "tests-python-version-rhel8.4-test-ssl" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: "*" auth-ssl: "*" coverage: "*" @@ -2720,14 +2697,14 @@ buildvariants: - matrix_name: "tests-pyopenssl" matrix_spec: - platform: ubuntu-18.04 + platform: ubuntu-20.04 python-version: "*" auth: "*" ssl: "ssl" pyopenssl: "*" # Only test "noauth" with Python 3.7. exclude_spec: - platform: ubuntu-18.04 + platform: ubuntu-20.04 python-version: ["3.8", "3.9", "3.10", "pypy3.7", "pypy3.8"] auth: "noauth" ssl: "ssl" @@ -2759,9 +2736,9 @@ buildvariants: tasks: - '.replica_set' -- matrix_name: "tests-python-version-ubuntu18-test-encryption" +- matrix_name: "tests-python-version-rhel84-test-encryption" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: "*" auth-ssl: noauth-nossl # TODO: dependency error for 'coverage-report' task: @@ -2773,22 +2750,22 @@ buildvariants: rules: - if: encryption: ["encryption", "encryption_crypt_shared"] - platform: ubuntu-18.04 + platform: rhel84 auth-ssl: noauth-nossl python-version: "*" then: add_tasks: *encryption-server-versions -- matrix_name: "tests-python-version-ubuntu18-without-c-extensions" +- matrix_name: "tests-python-version-rhel84-without-c-extensions" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: "*" c-extensions: without-c-extensions auth-ssl: noauth-nossl coverage: "*" exclude_spec: # These interpreters are always tested without extensions. - - platform: ubuntu-18.04 + - platform: rhel84 python-version: ["pypy3.7", "pypy3.8"] c-extensions: "*" auth-ssl: "*" @@ -2796,15 +2773,15 @@ buildvariants: display_name: "${c-extensions} ${python-version} ${platform} ${auth} ${ssl} ${coverage}" tasks: *all-server-versions -- matrix_name: "tests-python-version-ubuntu18-compression" +- matrix_name: "tests-python-version-rhel84-compression" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: "*" c-extensions: "*" compression: "*" exclude_spec: # These interpreters are always tested without extensions. - - platform: ubuntu-18.04 + - platform: rhel84 python-version: ["pypy3.7", "pypy3.8"] c-extensions: "with-c-extensions" compression: "*" @@ -2825,15 +2802,15 @@ buildvariants: - "test-4.0-standalone" - "test-3.6-standalone" -- matrix_name: "tests-python-version-green-framework-ubuntu18" +- matrix_name: "tests-python-version-green-framework-rhel84" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: "*" green-framework: "*" auth-ssl: "*" exclude_spec: # Don't test green frameworks on these Python versions. - - platform: ubuntu-18.04 + - platform: rhel84 python-version: ["pypy3.7", "pypy3.8", "3.11"] green-framework: "*" auth-ssl: "*" @@ -2897,16 +2874,16 @@ buildvariants: then: add_tasks: *encryption-server-versions -# Storage engine tests on Ubuntu 18.04 (x86_64) with Python 3.7. +# Storage engine tests on RHEL 8.4 (x86_64) with Python 3.7. - matrix_name: "tests-storage-engines" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 storage-engine: "*" python-version: 3.7 display_name: "Storage ${storage-engine} ${python-version} ${platform}" rules: - if: - platform: ubuntu-18.04 + platform: rhel84 storage-engine: ["inmemory"] python-version: "*" then: @@ -2919,7 +2896,7 @@ buildvariants: - "test-3.6-standalone" - if: # MongoDB 4.2 drops support for MMAPv1 - platform: ubuntu-18.04 + platform: rhel84 storage-engine: ["mmapv1"] python-version: "*" then: @@ -2929,10 +2906,10 @@ buildvariants: - "test-3.6-standalone" - "test-3.6-replica_set" -# enableTestCommands=0 tests on Ubuntu18 (x86_64) with Python 3.7. +# enableTestCommands=0 tests on RHEL 8.4 (x86_64) with Python 3.7. - matrix_name: "test-disableTestCommands" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 disableTestCommands: "*" python-version: "3.7" display_name: "Disable test commands ${python-version} ${platform}" @@ -2941,7 +2918,7 @@ buildvariants: - matrix_name: "test-linux-enterprise-auth" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: "*" auth: "auth" display_name: "Enterprise ${auth} ${platform} ${python-version}" @@ -2959,13 +2936,13 @@ buildvariants: - matrix_name: "tests-mod-wsgi" matrix_spec: - platform: ubuntu-18.04 - python-version: ["3.7", "3.8", "3.9", "3.10"] + platform: ubuntu-20.04 + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] mod-wsgi-version: "*" exclude_spec: # mod-wsgi 3.5 won't build against CPython 3.8+ - - platform: ubuntu-18.04 - python-version: ["3.8", "3.9", "3.10"] + - platform: ubuntu-20.04 + python-version: ["3.8", "3.9", "3.10", "3.11"] mod-wsgi-version: "3" display_name: "${mod-wsgi-version} ${python-version} ${platform}" tasks: @@ -2974,7 +2951,7 @@ buildvariants: - matrix_name: "mockupdb-tests" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: 3.7 display_name: "MockupDB Tests" tasks: @@ -2982,7 +2959,7 @@ buildvariants: - matrix_name: "tests-doctests" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: ["3.8"] display_name: "Doctests ${python-version} ${platform}" tasks: @@ -2991,7 +2968,7 @@ buildvariants: - name: "no-server" display_name: "No server test" run_on: - - ubuntu1804-test + - rhel84-small tasks: - name: "no-server" expansions: @@ -3000,7 +2977,7 @@ buildvariants: - name: "Coverage Report" display_name: "Coverage Report" run_on: - - ubuntu1804-test + - rhel84-small tasks: - name: "coverage-report" expansions: @@ -3008,7 +2985,7 @@ buildvariants: - matrix_name: "atlas-connect" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: "*" display_name: "Atlas connect ${python-version} ${platform}" tasks: @@ -3016,7 +2993,7 @@ buildvariants: - matrix_name: "serverless" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: "*" auth-ssl: auth-ssl serverless: "*" @@ -3026,7 +3003,7 @@ buildvariants: - matrix_name: "data-lake-spec-tests" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: ["3.7", "3.10"] auth: "auth" c-extensions: "*" @@ -3036,7 +3013,7 @@ buildvariants: - matrix_name: "stable-api-tests" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: ["3.7", "3.10"] auth: "auth" versionedApi: "*" @@ -3049,8 +3026,6 @@ buildvariants: - matrix_name: "ocsp-test" matrix_spec: - # OCSP stapling is not supported on Ubuntu 18.04. - # See https://jira.mongodb.org/browse/SERVER-51364. platform: ubuntu-20.04 python-version: ["3.7", "3.10", "pypy3.7", "pypy3.8"] mongodb-version: ["4.4", "5.0", "6.0", "latest"] @@ -3088,7 +3063,7 @@ buildvariants: - matrix_name: "aws-auth-test" matrix_spec: - platform: [ubuntu-18.04] + platform: [ubuntu-20.04] python-version: ["3.7"] display_name: "MONGODB-AWS Auth ${platform} ${python-version}" tasks: @@ -3123,7 +3098,7 @@ buildvariants: - matrix_name: "load-balancer" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 mongodb-version: ["rapid", "latest", "6.0"] auth-ssl: "*" python-version: "*" @@ -3135,7 +3110,7 @@ buildvariants: - name: testgcpkms-variant display_name: "GCP KMS" run_on: - - ubuntu1804-test + - ubuntu2004-small tasks: - name: testgcpkms_task_group batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README @@ -3143,7 +3118,7 @@ buildvariants: - name: testazurekms-variant display_name: "Azure KMS" - run_on: ubuntu1804-test + run_on: ubuntu2004-small tasks: - name: testazurekms_task_group batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README @@ -3165,6 +3140,7 @@ buildvariants: # Debian 8.1 only supports MongoDB 3.4+ # SUSE12 s390x is only supported by MongoDB 3.4+ # No enterprise build for Archlinux, SSL not available + # RHEL 7.6 and RHEL 8.4 only supports 3.6+. # RHEL 7 only supports 2.6+ # RHEL 7.1 ppc64le is only supported by MongoDB 3.2+ # RHEL 7.2 s390x is only supported by MongoDB 3.4+ From 9256cb20afcbeaa2a1af06e579fc5349232864b7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 11 Apr 2023 12:49:34 -0500 Subject: [PATCH 0369/1588] PYTHON-3649 Use RHEL8 for PyOpenSSL Builds (#1187) --- .evergreen/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 21c9992dd8..f102668206 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2697,14 +2697,14 @@ buildvariants: - matrix_name: "tests-pyopenssl" matrix_spec: - platform: ubuntu-20.04 + platform: rhel84 python-version: "*" auth: "*" ssl: "ssl" pyopenssl: "*" # Only test "noauth" with Python 3.7. exclude_spec: - platform: ubuntu-20.04 + platform: rhel84 python-version: ["3.8", "3.9", "3.10", "pypy3.7", "pypy3.8"] auth: "noauth" ssl: "ssl" From f7225fda55df81265c3284b2b159a610eb390539 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 12 Apr 2023 10:40:44 -0500 Subject: [PATCH 0370/1588] PYTHON-3652 Bump maxWireVersion for MongoDB 7.0 (#1188) --- pymongo/common.py | 2 +- test/test_topology.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pymongo/common.py b/pymongo/common.py index add70cfb5f..707cf5d23f 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -63,7 +63,7 @@ # What this version of PyMongo supports. MIN_SUPPORTED_SERVER_VERSION = "3.6" MIN_SUPPORTED_WIRE_VERSION = 6 -MAX_SUPPORTED_WIRE_VERSION = 17 +MAX_SUPPORTED_WIRE_VERSION = 21 # Frequency to call hello on servers, in seconds. HEARTBEAT_FREQUENCY = 10 diff --git a/test/test_topology.py b/test/test_topology.py index d7bae9229f..e09d7c3691 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -540,8 +540,8 @@ def test_wire_version(self): HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"], - "minWireVersion": 21, - "maxWireVersion": 22, + "minWireVersion": 22, + "maxWireVersion": 24, }, ) @@ -551,7 +551,7 @@ def test_wire_version(self): # Error message should say which server failed and why. self.assertEqual( str(e), - "Server at a:27017 requires wire version 21, but this version " + "Server at a:27017 requires wire version 22, but this version " "of PyMongo only supports up to %d." % (common.MAX_SUPPORTED_WIRE_VERSION,), ) else: From b38a416836ca26957ddc8865db300c6f2178f648 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 12 Apr 2023 11:10:28 -0500 Subject: [PATCH 0371/1588] PYTHON-3162 Deprecate ServerDescription.election_tuple (#1189) --- pymongo/server_description.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 53f90cea25..46517ee95e 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -15,6 +15,7 @@ """Represent one server the driver is connected to.""" import time +import warnings from typing import Any, Dict, Mapping, Optional, Set, Tuple from bson import EPOCH_NAIVE @@ -180,6 +181,11 @@ def cluster_time(self) -> Optional[Mapping[str, Any]]: @property def election_tuple(self) -> Tuple[Optional[int], Optional[ObjectId]]: + warnings.warn( + "'election_tuple' is deprecated, use 'set_version' and 'election_id' instead", + DeprecationWarning, + stacklevel=2, + ) return self._set_version, self._election_id @property From be355e2bea995ad72c8d1fde6dedf9ec7a637352 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 13 Apr 2023 10:30:54 -0500 Subject: [PATCH 0372/1588] PYTHON-3604 Remove Duplicate API Docs (#1190) --- doc/api/bson/index.rst | 2 +- doc/api/gridfs/index.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/api/bson/index.rst b/doc/api/bson/index.rst index 72baae68a6..d5b69607de 100644 --- a/doc/api/bson/index.rst +++ b/doc/api/bson/index.rst @@ -3,7 +3,7 @@ .. automodule:: bson :synopsis: BSON (Binary JSON) Encoding and Decoding - :members: + :members: BSON, decode, decode_all, decode_file_iter, decode_iter, encode, gen_list_name, has_c, is_valid Sub-modules: diff --git a/doc/api/gridfs/index.rst b/doc/api/gridfs/index.rst index 6764ef622b..b81fbde782 100644 --- a/doc/api/gridfs/index.rst +++ b/doc/api/gridfs/index.rst @@ -3,7 +3,7 @@ .. automodule:: gridfs :synopsis: Tools for working with GridFS - :members: + :members: GridFS, GridFSBucket Sub-modules: From c5652336efe04a770fb316e7b48d5b5509c4f3f0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 14 Apr 2023 14:45:57 -0700 Subject: [PATCH 0373/1588] PYTHON-3671 Use default server selection timeout in test setup (#1191) --- test/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/__init__.py b/test/__init__.py index 20b1d00ca8..dc324c6911 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -536,7 +536,6 @@ def _check_user_provided(self): port, username=db_user, password=db_pwd, - serverSelectionTimeoutMS=100, **self.default_client_options, ) @@ -550,6 +549,8 @@ def _check_user_provided(self): return False else: raise + finally: + client.close() def _server_started_with_auth(self): # MongoDB >= 2.0 From 2cc8fb1f2ea871860c1b3731bf374ca34f63712a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 14 Apr 2023 15:35:20 -0700 Subject: [PATCH 0374/1588] PYTHON-3672 Increase server selection timeout in more tests (#1192) --- test/test_ssl.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/test/test_ssl.py b/test/test_ssl.py index 9b58c2251b..bf151578cb 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -174,7 +174,7 @@ def test_tlsCertificateKeyFilePassword(self): tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, tlsCertificateKeyFilePassword="qwerty", tlsCAFile=CA_PEM, - serverSelectionTimeoutMS=100, + serverSelectionTimeoutMS=1000, ) else: connected( @@ -374,7 +374,7 @@ def test_tlsCRLFile_support(self): ssl=True, tlsCAFile=CA_PEM, tlsCRLFile=CRL_PEM, - serverSelectionTimeoutMS=100, + serverSelectionTimeoutMS=1000, ) else: connected( @@ -382,7 +382,7 @@ def test_tlsCRLFile_support(self): "localhost", ssl=True, tlsCAFile=CA_PEM, - serverSelectionTimeoutMS=100, + serverSelectionTimeoutMS=1000, **self.credentials # type: ignore[arg-type] ) ) @@ -394,17 +394,17 @@ def test_tlsCRLFile_support(self): ssl=True, tlsCAFile=CA_PEM, tlsCRLFile=CRL_PEM, - serverSelectionTimeoutMS=100, + serverSelectionTimeoutMS=1000, **self.credentials # type: ignore[arg-type] ) ) - uri_fmt = "mongodb://localhost/?ssl=true&tlsCAFile=%s&serverSelectionTimeoutMS=100" + uri_fmt = "mongodb://localhost/?ssl=true&tlsCAFile=%s&serverSelectionTimeoutMS=1000" connected(MongoClient(uri_fmt % (CA_PEM,), **self.credentials)) # type: ignore uri_fmt = ( "mongodb://localhost/?ssl=true&tlsCRLFile=%s" - "&tlsCAFile=%s&serverSelectionTimeoutMS=100" + "&tlsCAFile=%s&serverSelectionTimeoutMS=1000" ) with self.assertRaises(ConnectionFailure): connected( @@ -425,7 +425,7 @@ def test_validation_with_system_ca_certs(self): with self.assertRaises(ConnectionFailure): # Server cert is verified but hostname matching fails connected( - MongoClient("server", ssl=True, serverSelectionTimeoutMS=100, **self.credentials) # type: ignore[arg-type] + MongoClient("server", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials) # type: ignore[arg-type] ) # Server cert is verified. Disable hostname matching. @@ -434,20 +434,20 @@ def test_validation_with_system_ca_certs(self): "server", ssl=True, tlsAllowInvalidHostnames=True, - serverSelectionTimeoutMS=100, + serverSelectionTimeoutMS=1000, **self.credentials # type: ignore[arg-type] ) ) # Server cert and hostname are verified. connected( - MongoClient("localhost", ssl=True, serverSelectionTimeoutMS=100, **self.credentials) # type: ignore[arg-type] + MongoClient("localhost", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials) # type: ignore[arg-type] ) # Server cert and hostname are verified. connected( MongoClient( - "mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=100", **self.credentials # type: ignore[arg-type] + "mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=1000", **self.credentials # type: ignore[arg-type] ) ) @@ -622,7 +622,7 @@ def test_mongodb_x509_auth(self): ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CA_PEM, - serverSelectionTimeoutMS=100, + serverSelectionTimeoutMS=1000, ) ) except (ConnectionFailure, ConfigurationError): From 79488d95dbeac0a6d48256ecaba7c0ebdf90884d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 25 Apr 2023 12:30:23 -0600 Subject: [PATCH 0375/1588] PYTHON-3678 Username/password needs to be escaped with quote_plus to account for '/' (#1193) --- doc/examples/authentication.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index a46f95c789..5bd3282146 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -11,7 +11,7 @@ Percent-Escaping Username and Password -------------------------------------- Username and password must be percent-escaped with -:py:func:`urllib.parse.quote`, to be used in a MongoDB URI. For example:: +:py:func:`urllib.parse.quote_plus`, to be used in a MongoDB URI. For example:: >>> from pymongo import MongoClient >>> import urllib.parse From dfd82d2375bd641f9d7c70ae34751c1db3fc673d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 25 Apr 2023 17:28:10 -0500 Subject: [PATCH 0376/1588] PYTHON-3677 Update docs on Range Index (#1195) --- doc/changelog.rst | 1 - pymongo/encryption.py | 9 +++------ pymongo/encryption_options.py | 3 +-- 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 2ad33e41ec..3c0419f401 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,7 +6,6 @@ Changes in Version 4.4 - Added support for passing a list containing (key, direction) pairs or keys to :meth:`~pymongo.collection.Collection.create_index`. -- **BETA** Added support for range queries on client side field level encrypted collections. - pymongocrypt 1.5.0 or later is now required for client side field level encryption support. - Improved support for Pyright to improve typing support for IDEs like Visual Studio Code or Visual Studio. diff --git a/pymongo/encryption.py b/pymongo/encryption.py index d94e1969b0..4ad59d436e 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -799,9 +799,9 @@ def encrypt( when the algorithm is :attr:`Algorithm.INDEXED`. An integer value *must* be given when the :attr:`Algorithm.INDEXED` algorithm is used. - - `range_opts`: **(BETA)** An instance of RangeOpts. + - `range_opts`: Experimental only, not intended for public use. - .. note:: `query_type`, `contention_factor` and `range_opts` are part of the Queryable Encryption beta. + .. note:: `query_type`, and `contention_factor` are part of the Queryable Encryption beta. Backwards-breaking changes may be made before the final release. :Returns: @@ -851,10 +851,7 @@ def encrypt_expression( when the algorithm is :attr:`Algorithm.INDEXED`. An integer value *must* be given when the :attr:`Algorithm.INDEXED` algorithm is used. - - `range_opts`: **(BETA)** An instance of RangeOpts. - - .. note:: Support for range queries is in beta. - Backwards-breaking changes may be made before the final release. + - `range_opts`: Experimental only, not intended for public use. :Returns: The encrypted expression, a :class:`~bson.RawBSONDocument`. diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 6c966e30cd..d8e9daad1f 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -233,8 +233,7 @@ def __init__( ) -> None: """Options to configure encrypted queries using the rangePreview algorithm. - .. note:: Support for Range queries is in beta. - Backwards-breaking changes may be made before the final release. + .. note:: This feature is experimental only, and not intended for public use. :Parameters: - `sparsity`: An integer. From e75cfec34f529c88e837f58267796ff8b13a4dc0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 28 Apr 2023 16:11:27 -0500 Subject: [PATCH 0377/1588] PYTHON-3686 Consolidate CodecOptions Typings (#1199) --- .github/workflows/test-python.yml | 3 - bson/__init__.py | 2 +- bson/codec_options.py | 458 +++++++++++++++++------------- bson/codec_options.pyi | 106 ------- test/test_custom_types.py | 3 +- 5 files changed, 256 insertions(+), 316 deletions(-) delete mode 100644 bson/codec_options.pyi diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 8dad68ab20..2941f9c3ab 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -68,9 +68,6 @@ jobs: mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test python -m pip install -U typing_extensions mypy --install-types --non-interactive test/test_typing.py test/test_typing_strict.py - - name: Run mypy strict - run: | - mypy --strict test/test_typing_strict.py - name: Run pyright run: | python -m pip install -U pip pyright==1.1.290 diff --git a/bson/__init__.py b/bson/__init__.py index 2fe4aa173e..700a5d4cf8 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -1115,7 +1115,7 @@ def decode_all( if not isinstance(opts, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR - return _decode_all(data, opts) # type: ignore[arg-type] + return _decode_all(data, opts) # type:ignore[arg-type] def _decode_selective(rawdoc: Any, fields: Any, codec_options: Any) -> Mapping[Any, Any]: diff --git a/bson/codec_options.py b/bson/codec_options.py index c09de8a931..096be85264 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -19,15 +19,17 @@ import enum from collections.abc import MutableMapping as _MutableMapping from typing import ( + TYPE_CHECKING, Any, Callable, Dict, + Generic, Iterable, Mapping, NamedTuple, Optional, + Tuple, Type, - TypeVar, Union, cast, ) @@ -37,11 +39,7 @@ UUID_REPRESENTATION_NAMES, UuidRepresentation, ) - - -def _abstractproperty(func: Callable[..., Any]) -> property: - return property(abc.abstractmethod(func)) - +from bson.typings import _DocumentType _RAW_BSON_DOCUMENT_MARKER = 101 @@ -62,7 +60,7 @@ class TypeEncoder(abc.ABC): See :ref:`custom-type-type-codec` documentation for an example. """ - @_abstractproperty + @abc.abstractproperty def python_type(self) -> Any: """The Python type to be converted into something serializable.""" pass @@ -83,7 +81,7 @@ class TypeDecoder(abc.ABC): See :ref:`custom-type-type-codec` documentation for an example. """ - @_abstractproperty + @abc.abstractproperty def bson_type(self) -> Any: """The BSON type to be converted into our own type.""" pass @@ -112,7 +110,6 @@ class TypeCodec(TypeEncoder, TypeDecoder): _Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] _Fallback = Callable[[Any], Any] -_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) class TypeRegistry(object): @@ -244,208 +241,259 @@ class _BaseCodecOptions(NamedTuple): datetime_conversion: Optional[DatetimeConversion] -class CodecOptions(_BaseCodecOptions): - """Encapsulates options used encoding and / or decoding BSON.""" - - def __init__(self, *args, **kwargs): - """Encapsulates options used encoding and / or decoding BSON. - - The `document_class` option is used to define a custom type for use - decoding BSON documents. Access to the underlying raw BSON bytes for - a document is available using the :class:`~bson.raw_bson.RawBSONDocument` - type:: - - >>> from bson.raw_bson import RawBSONDocument - >>> from bson.codec_options import CodecOptions - >>> codec_options = CodecOptions(document_class=RawBSONDocument) - >>> coll = db.get_collection('test', codec_options=codec_options) - >>> doc = coll.find_one() - >>> doc.raw - '\\x16\\x00\\x00\\x00\\x07_id\\x00[0\\x165\\x91\\x10\\xea\\x14\\xe8\\xc5\\x8b\\x93\\x00' - - The document class can be any type that inherits from - :class:`~collections.abc.MutableMapping`:: - - >>> class AttributeDict(dict): - ... # A dict that supports attribute access. - ... def __getattr__(self, key): - ... return self[key] - ... def __setattr__(self, key, value): - ... self[key] = value - ... - >>> codec_options = CodecOptions(document_class=AttributeDict) - >>> coll = db.get_collection('test', codec_options=codec_options) - >>> doc = coll.find_one() - >>> doc._id - ObjectId('5b3016359110ea14e8c58b93') - - See :doc:`/examples/datetimes` for examples using the `tz_aware` and - `tzinfo` options. - - See :doc:`/examples/uuid` for examples using the `uuid_representation` - option. - - :Parameters: - - `document_class`: BSON documents returned in queries will be decoded - to an instance of this class. Must be a subclass of - :class:`~collections.abc.MutableMapping`. Defaults to :class:`dict`. - - `tz_aware`: If ``True``, BSON datetimes will be decoded to timezone - aware instances of :class:`~datetime.datetime`. Otherwise they will be - naive. Defaults to ``False``. - - `uuid_representation`: The BSON representation to use when encoding - and decoding instances of :class:`~uuid.UUID`. Defaults to - :data:`~bson.binary.UuidRepresentation.UNSPECIFIED`. New - applications should consider setting this to - :data:`~bson.binary.UuidRepresentation.STANDARD` for cross language - compatibility. See :ref:`handling-uuid-data-example` for details. - - `unicode_decode_error_handler`: The error handler to apply when - a Unicode-related error occurs during BSON decoding that would - otherwise raise :exc:`UnicodeDecodeError`. Valid options include - 'strict', 'replace', 'backslashreplace', 'surrogateescape', and - 'ignore'. Defaults to 'strict'. - - `tzinfo`: A :class:`~datetime.tzinfo` subclass that specifies the - timezone to/from which :class:`~datetime.datetime` objects should be - encoded/decoded. - - `type_registry`: Instance of :class:`TypeRegistry` used to customize - encoding and decoding behavior. - - `datetime_conversion`: Specifies how UTC datetimes should be decoded - within BSON. Valid options include 'datetime_ms' to return as a - DatetimeMS, 'datetime' to return as a datetime.datetime and - raising a ValueError for out-of-range values, 'datetime_auto' to - return DatetimeMS objects when the underlying datetime is - out-of-range and 'datetime_clamp' to clamp to the minimum and - maximum possible datetimes. Defaults to 'datetime'. - - .. versionchanged:: 4.0 - The default for `uuid_representation` was changed from - :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to - :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. - - .. versionadded:: 3.8 - `type_registry` attribute. - - .. warning:: Care must be taken when changing - `unicode_decode_error_handler` from its default value ('strict'). - The 'replace' and 'ignore' modes should not be used when documents - retrieved from the server will be modified in the client application - and stored back to the server. - """ - super().__init__() - - def __new__( - cls: Type["CodecOptions"], - document_class: Optional[Type[Mapping[str, Any]]] = None, - tz_aware: bool = False, - uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, - unicode_decode_error_handler: str = "strict", - tzinfo: Optional[datetime.tzinfo] = None, - type_registry: Optional[TypeRegistry] = None, - datetime_conversion: Optional[DatetimeConversion] = DatetimeConversion.DATETIME, - ) -> "CodecOptions": - doc_class = document_class or dict - # issubclass can raise TypeError for generic aliases like SON[str, Any]. - # In that case we can use the base class for the comparison. - is_mapping = False - try: - is_mapping = issubclass(doc_class, _MutableMapping) - except TypeError: - if hasattr(doc_class, "__origin__"): - is_mapping = issubclass(doc_class.__origin__, _MutableMapping) # type: ignore[union-attr] - if not (is_mapping or _raw_document_class(doc_class)): - raise TypeError( - "document_class must be dict, bson.son.SON, " - "bson.raw_bson.RawBSONDocument, or a " - "subclass of collections.abc.MutableMapping" - ) - if not isinstance(tz_aware, bool): - raise TypeError("tz_aware must be True or False") - if uuid_representation not in ALL_UUID_REPRESENTATIONS: - raise ValueError( - "uuid_representation must be a value from bson.binary.UuidRepresentation" +if TYPE_CHECKING: + + class CodecOptions(Tuple, Generic[_DocumentType]): + document_class: Type[_DocumentType] + tz_aware: bool + uuid_representation: int + unicode_decode_error_handler: Optional[str] + tzinfo: Optional[datetime.tzinfo] + type_registry: TypeRegistry + datetime_conversion: Optional[int] + + def __new__( + cls: Type["CodecOptions"], + document_class: Optional[Type[_DocumentType]] = ..., + tz_aware: bool = ..., + uuid_representation: Optional[int] = ..., + unicode_decode_error_handler: Optional[str] = ..., + tzinfo: Optional[datetime.tzinfo] = ..., + type_registry: Optional[TypeRegistry] = ..., + datetime_conversion: Optional[int] = ..., + ) -> "CodecOptions[_DocumentType]": + ... + + # CodecOptions API + def with_options(self, **kwargs: Any) -> "CodecOptions[_DocumentType]": + ... + + def _arguments_repr(self) -> str: + ... + + def _options_dict(self) -> Dict[Any, Any]: + ... + + # NamedTuple API + @classmethod + def _make(cls, obj: Iterable) -> "CodecOptions[_DocumentType]": + ... + + def _asdict(self) -> Dict[str, Any]: + ... + + def _replace(self, **kwargs: Any) -> "CodecOptions[_DocumentType]": + ... + + _source: str + _fields: Tuple[str] + +else: + + class CodecOptions(_BaseCodecOptions): + """Encapsulates options used encoding and / or decoding BSON.""" + + def __init__(self, *args, **kwargs): + """Encapsulates options used encoding and / or decoding BSON. + + The `document_class` option is used to define a custom type for use + decoding BSON documents. Access to the underlying raw BSON bytes for + a document is available using the :class:`~bson.raw_bson.RawBSONDocument` + type:: + + >>> from bson.raw_bson import RawBSONDocument + >>> from bson.codec_options import CodecOptions + >>> codec_options = CodecOptions(document_class=RawBSONDocument) + >>> coll = db.get_collection('test', codec_options=codec_options) + >>> doc = coll.find_one() + >>> doc.raw + '\\x16\\x00\\x00\\x00\\x07_id\\x00[0\\x165\\x91\\x10\\xea\\x14\\xe8\\xc5\\x8b\\x93\\x00' + + The document class can be any type that inherits from + :class:`~collections.abc.MutableMapping`:: + + >>> class AttributeDict(dict): + ... # A dict that supports attribute access. + ... def __getattr__(self, key): + ... return self[key] + ... def __setattr__(self, key, value): + ... self[key] = value + ... + >>> codec_options = CodecOptions(document_class=AttributeDict) + >>> coll = db.get_collection('test', codec_options=codec_options) + >>> doc = coll.find_one() + >>> doc._id + ObjectId('5b3016359110ea14e8c58b93') + + See :doc:`/examples/datetimes` for examples using the `tz_aware` and + `tzinfo` options. + + See :doc:`/examples/uuid` for examples using the `uuid_representation` + option. + + :Parameters: + - `document_class`: BSON documents returned in queries will be decoded + to an instance of this class. Must be a subclass of + :class:`~collections.abc.MutableMapping`. Defaults to :class:`dict`. + - `tz_aware`: If ``True``, BSON datetimes will be decoded to timezone + aware instances of :class:`~datetime.datetime`. Otherwise they will be + naive. Defaults to ``False``. + - `uuid_representation`: The BSON representation to use when encoding + and decoding instances of :class:`~uuid.UUID`. Defaults to + :data:`~bson.binary.UuidRepresentation.UNSPECIFIED`. New + applications should consider setting this to + :data:`~bson.binary.UuidRepresentation.STANDARD` for cross language + compatibility. See :ref:`handling-uuid-data-example` for details. + - `unicode_decode_error_handler`: The error handler to apply when + a Unicode-related error occurs during BSON decoding that would + otherwise raise :exc:`UnicodeDecodeError`. Valid options include + 'strict', 'replace', 'backslashreplace', 'surrogateescape', and + 'ignore'. Defaults to 'strict'. + - `tzinfo`: A :class:`~datetime.tzinfo` subclass that specifies the + timezone to/from which :class:`~datetime.datetime` objects should be + encoded/decoded. + - `type_registry`: Instance of :class:`TypeRegistry` used to customize + encoding and decoding behavior. + - `datetime_conversion`: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. + + .. versionchanged:: 4.0 + The default for `uuid_representation` was changed from + :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to + :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + + .. versionadded:: 3.8 + `type_registry` attribute. + + .. warning:: Care must be taken when changing + `unicode_decode_error_handler` from its default value ('strict'). + The 'replace' and 'ignore' modes should not be used when documents + retrieved from the server will be modified in the client application + and stored back to the server. + """ + super().__init__() + + def __new__( + cls: Type["CodecOptions"], + document_class: Optional[Type[Mapping[str, Any]]] = None, + tz_aware: bool = False, + uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, + unicode_decode_error_handler: str = "strict", + tzinfo: Optional[datetime.tzinfo] = None, + type_registry: Optional[TypeRegistry] = None, + datetime_conversion: Optional[DatetimeConversion] = DatetimeConversion.DATETIME, + ) -> "CodecOptions": + doc_class = document_class or dict + # issubclass can raise TypeError for generic aliases like SON[str, Any]. + # In that case we can use the base class for the comparison. + is_mapping = False + try: + is_mapping = issubclass(doc_class, _MutableMapping) + except TypeError: + if hasattr(doc_class, "__origin__"): + is_mapping = issubclass(doc_class.__origin__, _MutableMapping) + if not (is_mapping or _raw_document_class(doc_class)): + raise TypeError( + "document_class must be dict, bson.son.SON, " + "bson.raw_bson.RawBSONDocument, or a " + "subclass of collections.abc.MutableMapping" + ) + if not isinstance(tz_aware, bool): + raise TypeError("tz_aware must be True or False") + if uuid_representation not in ALL_UUID_REPRESENTATIONS: + raise ValueError( + "uuid_representation must be a value from bson.binary.UuidRepresentation" + ) + if not isinstance(unicode_decode_error_handler, str): + raise ValueError("unicode_decode_error_handler must be a string") + if tzinfo is not None: + if not isinstance(tzinfo, datetime.tzinfo): + raise TypeError("tzinfo must be an instance of datetime.tzinfo") + if not tz_aware: + raise ValueError("cannot specify tzinfo without also setting tz_aware=True") + + type_registry = type_registry or TypeRegistry() + + if not isinstance(type_registry, TypeRegistry): + raise TypeError("type_registry must be an instance of TypeRegistry") + + return tuple.__new__( + cls, + ( + doc_class, + tz_aware, + uuid_representation, + unicode_decode_error_handler, + tzinfo, + type_registry, + datetime_conversion, + ), ) - if not isinstance(unicode_decode_error_handler, str): - raise ValueError("unicode_decode_error_handler must be a string") - if tzinfo is not None: - if not isinstance(tzinfo, datetime.tzinfo): - raise TypeError("tzinfo must be an instance of datetime.tzinfo") - if not tz_aware: - raise ValueError("cannot specify tzinfo without also setting tz_aware=True") - - type_registry = type_registry or TypeRegistry() - - if not isinstance(type_registry, TypeRegistry): - raise TypeError("type_registry must be an instance of TypeRegistry") - - return tuple.__new__( - cls, - ( - doc_class, - tz_aware, - uuid_representation, - unicode_decode_error_handler, - tzinfo, - type_registry, - datetime_conversion, - ), - ) - - def _arguments_repr(self) -> str: - """Representation of the arguments used to create this object.""" - document_class_repr = "dict" if self.document_class is dict else repr(self.document_class) - - uuid_rep_repr = UUID_REPRESENTATION_NAMES.get( - self.uuid_representation, self.uuid_representation - ) - return ( - "document_class=%s, tz_aware=%r, uuid_representation=%s, " - "unicode_decode_error_handler=%r, tzinfo=%r, " - "type_registry=%r, datetime_conversion=%s" - % ( - document_class_repr, - self.tz_aware, - uuid_rep_repr, - self.unicode_decode_error_handler, - self.tzinfo, - self.type_registry, - self.datetime_conversion, + def _arguments_repr(self) -> str: + """Representation of the arguments used to create this object.""" + document_class_repr = ( + "dict" if self.document_class is dict else repr(self.document_class) ) - ) - - def _options_dict(self) -> Dict[str, Any]: - """Dictionary of the arguments used to create this object.""" - # TODO: PYTHON-2442 use _asdict() instead - return { - "document_class": self.document_class, - "tz_aware": self.tz_aware, - "uuid_representation": self.uuid_representation, - "unicode_decode_error_handler": self.unicode_decode_error_handler, - "tzinfo": self.tzinfo, - "type_registry": self.type_registry, - "datetime_conversion": self.datetime_conversion, - } - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, self._arguments_repr()) - def with_options(self, **kwargs: Any) -> "CodecOptions": - """Make a copy of this CodecOptions, overriding some options:: - - >>> from bson.codec_options import DEFAULT_CODEC_OPTIONS - >>> DEFAULT_CODEC_OPTIONS.tz_aware - False - >>> options = DEFAULT_CODEC_OPTIONS.with_options(tz_aware=True) - >>> options.tz_aware - True - - .. versionadded:: 3.5 - """ - opts = self._options_dict() - opts.update(kwargs) - return CodecOptions(**opts) + uuid_rep_repr = UUID_REPRESENTATION_NAMES.get( + self.uuid_representation, self.uuid_representation + ) + return ( + "document_class=%s, tz_aware=%r, uuid_representation=%s, " + "unicode_decode_error_handler=%r, tzinfo=%r, " + "type_registry=%r, datetime_conversion=%s" + % ( + document_class_repr, + self.tz_aware, + uuid_rep_repr, + self.unicode_decode_error_handler, + self.tzinfo, + self.type_registry, + self.datetime_conversion, + ) + ) -DEFAULT_CODEC_OPTIONS = CodecOptions() + def _options_dict(self) -> Dict[str, Any]: + """Dictionary of the arguments used to create this object.""" + # TODO: PYTHON-2442 use _asdict() instead + return { + "document_class": self.document_class, + "tz_aware": self.tz_aware, + "uuid_representation": self.uuid_representation, + "unicode_decode_error_handler": self.unicode_decode_error_handler, + "tzinfo": self.tzinfo, + "type_registry": self.type_registry, + "datetime_conversion": self.datetime_conversion, + } + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, self._arguments_repr()) + + def with_options(self, **kwargs: Any) -> "CodecOptions": + """Make a copy of this CodecOptions, overriding some options:: + + >>> from bson.codec_options import DEFAULT_CODEC_OPTIONS + >>> DEFAULT_CODEC_OPTIONS.tz_aware + False + >>> options = DEFAULT_CODEC_OPTIONS.with_options(tz_aware=True) + >>> options.tz_aware + True + + .. versionadded:: 3.5 + """ + opts = self._options_dict() + opts.update(kwargs) + return CodecOptions(**opts) + + +DEFAULT_CODEC_OPTIONS: "CodecOptions[Mapping[str, Any]]" = CodecOptions() def _parse_codec_options(options: Any) -> CodecOptions: diff --git a/bson/codec_options.pyi b/bson/codec_options.pyi deleted file mode 100644 index 8242bd4cb2..0000000000 --- a/bson/codec_options.pyi +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2022-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Workaround for https://bugs.python.org/issue43923. -Ideally we would have done this with a single class, but -generic subclasses *must* take a parameter, and prior to Python 3.9 -or in Python 3.7 and 3.8 with `from __future__ import annotations`, -you get the error: "TypeError: 'type' object is not subscriptable". -""" - -import datetime -import abc -import enum -from typing import Tuple, Generic, Optional, Mapping, Any, Type, Dict, Iterable, Tuple, Callable, Union -from bson.typings import _DocumentType, _DocumentTypeArg - - -class TypeEncoder(abc.ABC, metaclass=abc.ABCMeta): - @property - @abc.abstractmethod - def python_type(self) -> Any: ... - @abc.abstractmethod - def transform_python(self, value: Any) -> Any: ... - -class TypeDecoder(abc.ABC, metaclass=abc.ABCMeta): - @property - @abc.abstractmethod - def bson_type(self) -> Any: ... - @abc.abstractmethod - def transform_bson(self, value: Any) -> Any: ... - -class TypeCodec(TypeEncoder, TypeDecoder, metaclass=abc.ABCMeta): ... - -Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] -Fallback = Callable[[Any], Any] - -class TypeRegistry: - _decoder_map: Dict[Any, Any] - _encoder_map: Dict[Any, Any] - _fallback_encoder: Optional[Fallback] - - def __init__(self, type_codecs: Optional[Iterable[Codec]] = ..., fallback_encoder: Optional[Fallback] = ...) -> None: ... - def __eq__(self, other: Any) -> Any: ... - -class DatetimeConversion(int, enum.Enum): - DATETIME = ... - DATETIME_CLAMP = ... - DATETIME_MS = ... - DATETIME_AUTO = ... - -class CodecOptions(Tuple, Generic[_DocumentType]): - document_class: Type[_DocumentType] - tz_aware: bool - uuid_representation: int - unicode_decode_error_handler: Optional[str] - tzinfo: Optional[datetime.tzinfo] - type_registry: TypeRegistry - datetime_conversion: Optional[int] - - def __new__( - cls: Type[CodecOptions], - document_class: Optional[Type[_DocumentType]] = ..., - tz_aware: bool = ..., - uuid_representation: Optional[int] = ..., - unicode_decode_error_handler: Optional[str] = ..., - tzinfo: Optional[datetime.tzinfo] = ..., - type_registry: Optional[TypeRegistry] = ..., - datetime_conversion: Optional[int] = ..., - ) -> CodecOptions[_DocumentType]: ... - - # CodecOptions API - def with_options(self, **kwargs: Any) -> CodecOptions[_DocumentTypeArg]: ... - - def _arguments_repr(self) -> str: ... - - def _options_dict(self) -> Dict[Any, Any]: ... - - # NamedTuple API - @classmethod - def _make(cls, obj: Iterable) -> CodecOptions[_DocumentType]: ... - - def _asdict(self) -> Dict[str, Any]: ... - - def _replace(self, **kwargs: Any) -> CodecOptions[_DocumentType]: ... - - _source: str - _fields: Tuple[str] - - -DEFAULT_CODEC_OPTIONS: "CodecOptions[Mapping[str, Any]]" -_RAW_BSON_DOCUMENT_MARKER: int - -def _raw_document_class(document_class: Any) -> bool: ... - -def _parse_codec_options(options: Any) -> CodecOptions: ... diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 868756c67d..676b3b6af0 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -541,7 +541,8 @@ def transform_bson(self, value): {MyIntEncoder.python_type: codec_instances[1].transform_python}, ) self.assertEqual( - type_registry._decoder_map, {MyIntDecoder.bson_type: codec_instances[0].transform_bson} + type_registry._decoder_map, + {MyIntDecoder.bson_type: codec_instances[0].transform_bson}, ) def test_initialize_fail(self): From bc9029a22879c772e6049b962435fd563636ea92 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 28 Apr 2023 14:27:06 -0700 Subject: [PATCH 0378/1588] PYTHON-3679 Support mypy 1.2 (#1194) --- .github/workflows/test-python.yml | 2 +- bson/decimal128.py | 2 +- bson/son.py | 4 ++-- test/test_encryption.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 2941f9c3ab..bb0b836788 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -58,7 +58,7 @@ jobs: cache-dependency-path: 'setup.py' - name: Install dependencies run: | - python -m pip install -U pip mypy==0.990 + python -m pip install -U pip mypy==1.2 pip install -e ".[zstd, encryption, ocsp]" - name: Run mypy run: | diff --git a/bson/decimal128.py b/bson/decimal128.py index ab2d1a24ac..bce5b251e9 100644 --- a/bson/decimal128.py +++ b/bson/decimal128.py @@ -100,7 +100,7 @@ def _decimal_to_128(value: _VALUE_OPTIONS) -> Tuple[int, int]: if significand & (1 << i): high |= 1 << (i - 64) - biased_exponent = exponent + _EXPONENT_BIAS + biased_exponent = exponent + _EXPONENT_BIAS # type: ignore[operator] if high >> 49 == 1: high = high & 0x7FFFFFFFFFFF diff --git a/bson/son.py b/bson/son.py index e4238b4058..bba108aa80 100644 --- a/bson/son.py +++ b/bson/son.py @@ -66,7 +66,7 @@ def __init__( self.update(kwargs) def __new__(cls: Type["SON[_Key, _Value]"], *args: Any, **kwargs: Any) -> "SON[_Key, _Value]": - instance = super(SON, cls).__new__(cls, *args, **kwargs) + instance = super(SON, cls).__new__(cls, *args, **kwargs) # type: ignore[type-var] instance.__keys = [] return instance @@ -115,7 +115,7 @@ def clear(self) -> None: self.__keys = [] super(SON, self).clear() - def setdefault(self, key: _Key, default: _Value) -> _Value: # type: ignore[override] + def setdefault(self, key: _Key, default: _Value) -> _Value: try: return self[key] except KeyError: diff --git a/test/test_encryption.py b/test/test_encryption.py index 6cdc8da3b6..872e0356ad 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2162,7 +2162,7 @@ def test_05_tlsDisableOCSPEndpointCheck_is_permitted(self): self.addCleanup(encryption.close) ctx = encryption._io_callbacks.opts._kms_ssl_contexts["aws"] if not hasattr(ctx, "check_ocsp_endpoint"): - raise self.skipTest("OCSP not enabled") # type:ignore + raise self.skipTest("OCSP not enabled") self.assertFalse(ctx.check_ocsp_endpoint) From deb0566c3e8ede09fa7f88fd173ef25b0e40c3a9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 1 May 2023 09:16:28 -0700 Subject: [PATCH 0379/1588] PYTHON-3614 Support Queryable Encryption protocol v2 on 7.0+ (#1197) Resync FLE spec tests and update docs for new QE protocol on 7.0. Add client side error for createEncryptedCollection on MongoDB < 7.0. KMS timeout errors should always have exc.timeout==True. PYTHON-3583 Drivers should not create the ECC collection in v2 of queryable encryption. --- doc/changelog.rst | 7 +- doc/examples/encryption.rst | 8 +- pymongo/collection.py | 18 +- pymongo/common.py | 4 - pymongo/database.py | 7 +- pymongo/encryption.py | 36 +- pymongo/encryption_options.py | 1 - .../etc/data/encryptedFields-Range-Date.json | 36 -- .../data/encryptedFields-Range-Decimal.json | 26 - ...ncryptedFields-Range-DecimalPrecision.json | 35 - .../data/encryptedFields-Range-Double.json | 26 - ...encryptedFields-Range-DoublePrecision.json | 35 - .../etc/data/encryptedFields-Range-Int.json | 32 - .../etc/data/encryptedFields-Range-Long.json | 32 - .../etc/data/encryptedFields.json | 5 +- .../etc/data/range-encryptedFields-Date.json | 49 +- ...ge-encryptedFields-DecimalNoPrecision.json | 36 +- ...ange-encryptedFields-DecimalPrecision.json | 5 +- ...nge-encryptedFields-DoubleNoPrecision.json | 36 +- ...range-encryptedFields-DoublePrecision.json | 54 +- .../etc/data/range-encryptedFields-Int.json | 48 +- .../etc/data/range-encryptedFields-Long.json | 48 +- .../spec/legacy/bypassedCommand.json | 9 +- ...s.json => fle2v2-BypassQueryAnalysis.json} | 116 ++-- ...{fle2-Compact.json => fle2v2-Compact.json} | 6 +- .../fle2v2-CreateCollection-OldServer.json | 62 ++ ...tion.json => fle2v2-CreateCollection.json} | 609 +++--------------- ...a.json => fle2v2-DecryptExistingData.json} | 3 +- .../{fle2-Delete.json => fle2v2-Delete.json} | 29 +- ...ncryptedFields-vs-EncryptedFieldsMap.json} | 9 +- ...fle2v2-EncryptedFields-vs-jsonSchema.json} | 10 +- ...> fle2v2-EncryptedFieldsMap-defaults.json} | 8 +- ...date.json => fle2v2-FindOneAndUpdate.json} | 50 +- ...ed.json => fle2v2-InsertFind-Indexed.json} | 10 +- ....json => fle2v2-InsertFind-Unindexed.json} | 6 +- ...MissingKey.json => fle2v2-MissingKey.json} | 6 +- ...cryption.json => fle2v2-NoEncryption.json} | 3 +- ....json => fle2v2-Range-Date-Aggregate.json} | 11 +- ...son => fle2v2-Range-Date-Correctness.json} | 6 +- ...ete.json => fle2v2-Range-Date-Delete.json} | 30 +- ...> fle2v2-Range-Date-FindOneAndUpdate.json} | 29 +- ...json => fle2v2-Range-Date-InsertFind.json} | 11 +- ...ate.json => fle2v2-Range-Date-Update.json} | 29 +- ...on => fle2v2-Range-Decimal-Aggregate.json} | 37 +- ... => fle2v2-Range-Decimal-Correctness.json} | 144 ++--- ....json => fle2v2-Range-Decimal-Delete.json} | 52 +- ...le2v2-Range-Decimal-FindOneAndUpdate.json} | 59 +- ...n => fle2v2-Range-Decimal-InsertFind.json} | 37 +- ....json => fle2v2-Range-Decimal-Update.json} | 57 +- ...2v2-Range-DecimalPrecision-Aggregate.json} | 11 +- ...2-Range-DecimalPrecision-Correctness.json} | 6 +- ...fle2v2-Range-DecimalPrecision-Delete.json} | 30 +- ...ge-DecimalPrecision-FindOneAndUpdate.json} | 29 +- ...v2-Range-DecimalPrecision-InsertFind.json} | 11 +- ...fle2v2-Range-DecimalPrecision-Update.json} | 29 +- ...son => fle2v2-Range-Double-Aggregate.json} | 37 +- ...n => fle2v2-Range-Double-Correctness.json} | 144 ++--- ...e.json => fle2v2-Range-Double-Delete.json} | 52 +- ...fle2v2-Range-Double-FindOneAndUpdate.json} | 59 +- ...on => fle2v2-Range-Double-InsertFind.json} | 37 +- ...e.json => fle2v2-Range-Double-Update.json} | 57 +- ...e2v2-Range-DoublePrecision-Aggregate.json} | 11 +- ...v2-Range-DoublePrecision-Correctness.json} | 6 +- ... fle2v2-Range-DoublePrecision-Delete.json} | 30 +- ...nge-DoublePrecision-FindOneAndUpdate.json} | 29 +- ...2v2-Range-DoublePrecision-InsertFind.json} | 11 +- ... fle2v2-Range-DoublePrecision-Update.json} | 29 +- ...e.json => fle2v2-Range-Int-Aggregate.json} | 11 +- ...json => fle2v2-Range-Int-Correctness.json} | 6 +- ...lete.json => fle2v2-Range-Int-Delete.json} | 30 +- ...=> fle2v2-Range-Int-FindOneAndUpdate.json} | 29 +- ....json => fle2v2-Range-Int-InsertFind.json} | 11 +- ...date.json => fle2v2-Range-Int-Update.json} | 29 +- ....json => fle2v2-Range-Long-Aggregate.json} | 11 +- ...son => fle2v2-Range-Long-Correctness.json} | 6 +- ...ete.json => fle2v2-Range-Long-Delete.json} | 30 +- ...> fle2v2-Range-Long-FindOneAndUpdate.json} | 29 +- ...json => fle2v2-Range-Long-InsertFind.json} | 11 +- ...ate.json => fle2v2-Range-Long-Update.json} | 29 +- ...gType.json => fle2v2-Range-WrongType.json} | 6 +- .../{fle2-Update.json => fle2v2-Update.json} | 52 +- ...2-validatorAndPartialFieldExpression.json} | 21 +- .../spec/legacy/timeoutMS.json | 200 ++++++ .../spec/unified/rewrapManyDataKey.json | 30 +- test/test_encryption.py | 15 +- test/unified_format.py | 4 +- test/utils.py | 12 +- test/utils_spec_runner.py | 49 +- 88 files changed, 1086 insertions(+), 2175 deletions(-) delete mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Date.json delete mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Decimal.json delete mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-DecimalPrecision.json delete mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Double.json delete mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-DoublePrecision.json delete mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Int.json delete mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Long.json rename test/client-side-encryption/spec/legacy/{fle2-BypassQueryAnalysis.json => fle2v2-BypassQueryAnalysis.json} (65%) rename test/client-side-encryption/spec/legacy/{fle2-Compact.json => fle2v2-Compact.json} (97%) create mode 100644 test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json rename test/client-side-encryption/spec/legacy/{fle2-CreateCollection.json => fle2v2-CreateCollection.json} (74%) rename test/client-side-encryption/spec/legacy/{fle2-DecryptExistingData.json => fle2v2-DecryptExistingData.json} (98%) rename test/client-side-encryption/spec/legacy/{fle2-Delete.json => fle2v2-Delete.json} (86%) rename test/client-side-encryption/spec/legacy/{fle2-EncryptedFields-vs-EncryptedFieldsMap.json => fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json} (94%) rename test/client-side-encryption/spec/legacy/{fle2-EncryptedFields-vs-jsonSchema.json => fle2v2-EncryptedFields-vs-jsonSchema.json} (94%) rename test/client-side-encryption/spec/legacy/{fle2-EncryptedFieldsMap-defaults.json => fle2v2-EncryptedFieldsMap-defaults.json} (93%) rename test/client-side-encryption/spec/legacy/{fle2-FindOneAndUpdate.json => fle2v2-FindOneAndUpdate.json} (87%) rename test/client-side-encryption/spec/legacy/{fle2-InsertFind-Indexed.json => fle2v2-InsertFind-Indexed.json} (94%) rename test/client-side-encryption/spec/legacy/{fle2-InsertFind-Unindexed.json => fle2v2-InsertFind-Unindexed.json} (97%) rename test/client-side-encryption/spec/legacy/{fle2-MissingKey.json => fle2v2-MissingKey.json} (94%) rename test/client-side-encryption/spec/legacy/{fle2-NoEncryption.json => fle2v2-NoEncryption.json} (96%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Date-Aggregate.json => fle2v2-Range-Date-Aggregate.json} (89%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Date-Correctness.json => fle2v2-Range-Date-Correctness.json} (99%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Date-Delete.json => fle2v2-Range-Date-Delete.json} (83%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Date-FindOneAndUpdate.json => fle2v2-Range-Date-FindOneAndUpdate.json} (86%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Date-InsertFind.json => fle2v2-Range-Date-InsertFind.json} (89%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Date-Update.json => fle2v2-Range-Date-Update.json} (86%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Decimal-Aggregate.json => fle2v2-Range-Decimal-Aggregate.json} (75%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Decimal-Correctness.json => fle2v2-Range-Decimal-Correctness.json} (88%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Decimal-Delete.json => fle2v2-Range-Decimal-Delete.json} (65%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Decimal-FindOneAndUpdate.json => fle2v2-Range-Decimal-FindOneAndUpdate.json} (75%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Decimal-InsertFind.json => fle2v2-Range-Decimal-InsertFind.json} (75%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Decimal-Update.json => fle2v2-Range-Decimal-Update.json} (75%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DecimalPrecision-Aggregate.json => fle2v2-Range-DecimalPrecision-Aggregate.json} (86%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DecimalPrecision-Correctness.json => fle2v2-Range-DecimalPrecision-Correctness.json} (99%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DecimalPrecision-Delete.json => fle2v2-Range-DecimalPrecision-Delete.json} (81%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DecimalPrecision-FindOneAndUpdate.json => fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json} (84%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DecimalPrecision-InsertFind.json => fle2v2-Range-DecimalPrecision-InsertFind.json} (86%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DecimalPrecision-Update.json => fle2v2-Range-DecimalPrecision-Update.json} (84%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Double-Aggregate.json => fle2v2-Range-Double-Aggregate.json} (81%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Double-Correctness.json => fle2v2-Range-Double-Correctness.json} (88%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Double-Delete.json => fle2v2-Range-Double-Delete.json} (72%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Double-FindOneAndUpdate.json => fle2v2-Range-Double-FindOneAndUpdate.json} (79%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Double-InsertFind.json => fle2v2-Range-Double-InsertFind.json} (80%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Double-Update.json => fle2v2-Range-Double-Update.json} (80%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DoublePrecision-Aggregate.json => fle2v2-Range-DoublePrecision-Aggregate.json} (86%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DoublePrecision-Correctness.json => fle2v2-Range-DoublePrecision-Correctness.json} (99%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DoublePrecision-Delete.json => fle2v2-Range-DoublePrecision-Delete.json} (80%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DoublePrecision-FindOneAndUpdate.json => fle2v2-Range-DoublePrecision-FindOneAndUpdate.json} (84%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DoublePrecision-InsertFind.json => fle2v2-Range-DoublePrecision-InsertFind.json} (86%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DoublePrecision-Update.json => fle2v2-Range-DoublePrecision-Update.json} (84%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Int-Aggregate.json => fle2v2-Range-Int-Aggregate.json} (88%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Int-Correctness.json => fle2v2-Range-Int-Correctness.json} (99%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Int-Delete.json => fle2v2-Range-Int-Delete.json} (83%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Int-FindOneAndUpdate.json => fle2v2-Range-Int-FindOneAndUpdate.json} (85%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Int-InsertFind.json => fle2v2-Range-Int-InsertFind.json} (88%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Int-Update.json => fle2v2-Range-Int-Update.json} (85%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Long-Aggregate.json => fle2v2-Range-Long-Aggregate.json} (88%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Long-Correctness.json => fle2v2-Range-Long-Correctness.json} (99%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Long-Delete.json => fle2v2-Range-Long-Delete.json} (83%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Long-FindOneAndUpdate.json => fle2v2-Range-Long-FindOneAndUpdate.json} (85%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Long-InsertFind.json => fle2v2-Range-Long-InsertFind.json} (88%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Long-Update.json => fle2v2-Range-Long-Update.json} (85%) rename test/client-side-encryption/spec/legacy/{fle2-Range-WrongType.json => fle2v2-Range-WrongType.json} (95%) rename test/client-side-encryption/spec/legacy/{fle2-Update.json => fle2v2-Update.json} (87%) rename test/client-side-encryption/spec/legacy/{fle2-validatorAndPartialFieldExpression.json => fle2v2-validatorAndPartialFieldExpression.json} (93%) create mode 100644 test/client-side-encryption/spec/legacy/timeoutMS.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 3c0419f401..19830b09ac 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -8,9 +8,14 @@ Changes in Version 4.4 or keys to :meth:`~pymongo.collection.Collection.create_index`. - pymongocrypt 1.5.0 or later is now required for client side field level encryption support. -- Improved support for Pyright to improve typing support for IDEs like Visual Studio Code or Visual Studio. +- Improved support for Pyright to improve typing support for IDEs like Visual Studio Code + or Visual Studio. - Improved support for type-checking with MyPy "strict" mode (`--strict`). - Added support for Python 3.11. +- pymongocrypt 1.6.0 or later is now required for Client Side Field Level Encryption (CSFLE) + and Queryable Encryption (QE) support. MongoDB Server 7.0 introduced a backwards breaking + change to the QE protocol. Users taking advantage of the QE beta must now upgrade to + MongoDB 7.0+ and PyMongo 4.4+. Issues Resolved ............... diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 9978cb6e36..57c1a84b0f 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -364,13 +364,13 @@ data key and create a collection with the Automatic Queryable Encryption (Beta) ````````````````````````````````````` -PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB >=6.0. +PyMongo 4.4 brings beta support for Queryable Encryption with MongoDB >=7.0. Queryable Encryption is the second version of Client-Side Field Level Encryption. Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, which are further processed server-side. -You must have MongoDB 6.0 Enterprise to preview the capability. +You must have MongoDB 7.0 Enterprise to preview the capability. Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, as demonstrated by the following example:: @@ -396,7 +396,6 @@ Automatic encryption in Queryable Encryption is configured with an ``encrypted_f encrypted_fields_map = { "default.encryptedCollection": { "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", "ecocCollection": "encryptedCollection.ecoc", "fields": [ { @@ -429,7 +428,7 @@ automatically encrypted and decrypted. Explicit Queryable Encryption (Beta) ```````````````````````````````````` -PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB >=6.0. +PyMongo 4.4 brings beta support for Queryable Encryption with MongoDB >=7.0. Queryable Encryption is the second version of Client-Side Field Level Encryption. Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, @@ -487,7 +486,6 @@ using an ``encrypted_fields`` mapping, as demonstrated by the following example: encrypted_fields = { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/pymongo/collection.py b/pymongo/collection.py index 0ff56d10cd..a5d3be9e05 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -43,7 +43,7 @@ from pymongo.change_stream import CollectionChangeStream from pymongo.collation import validate_collation_or_none from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor -from pymongo.common import _ecc_coll_name, _ecoc_coll_name, _esc_coll_name +from pymongo.common import _ecoc_coll_name, _esc_coll_name from pymongo.cursor import Cursor, RawBatchCursor from pymongo.errors import ( ConfigurationError, @@ -232,8 +232,9 @@ def __init__( if encrypted_fields: common.validate_is_mapping("encrypted_fields", encrypted_fields) opts = {"clusteredIndex": {"key": {"_id": 1}, "unique": True}} - self.__create(_esc_coll_name(encrypted_fields, name), opts, None, session) - self.__create(_ecc_coll_name(encrypted_fields, name), opts, None, session) + self.__create( + _esc_coll_name(encrypted_fields, name), opts, None, session, qev2_required=True + ) self.__create(_ecoc_coll_name(encrypted_fields, name), opts, None, session) self.__create(name, kwargs, collation, session, encrypted_fields=encrypted_fields) self.create_index([("__safeContent__", ASCENDING)], session) @@ -305,7 +306,9 @@ def _command( user_fields=user_fields, ) - def __create(self, name, options, collation, session, encrypted_fields=None): + def __create( + self, name, options, collation, session, encrypted_fields=None, qev2_required=False + ): """Sends a create command with the given options.""" cmd = SON([("create", name)]) if encrypted_fields: @@ -316,6 +319,13 @@ def __create(self, name, options, collation, session, encrypted_fields=None): options["size"] = float(options["size"]) cmd.update(options) with self._socket_for_writes(session) as sock_info: + if qev2_required and sock_info.max_wire_version < 21: + raise ConfigurationError( + "Driver support of Queryable Encryption is incompatible with server. " + "Upgrade server to use Queryable Encryption. " + f"Got maxWireVersion {sock_info.max_wire_version} but need maxWireVersion >= 21 (MongoDB >=7.0)" + ) + self._command( sock_info, cmd, diff --git a/pymongo/common.py b/pymongo/common.py index 707cf5d23f..ba861c1545 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -822,10 +822,6 @@ def _esc_coll_name(encrypted_fields, name): return encrypted_fields.get("escCollection", f"enxcol_.{name}.esc") -def _ecc_coll_name(encrypted_fields, name): - return encrypted_fields.get("eccCollection", f"enxcol_.{name}.ecc") - - def _ecoc_coll_name(encrypted_fields, name): return encrypted_fields.get("ecocCollection", f"enxcol_.{name}.ecoc") diff --git a/pymongo/database.py b/pymongo/database.py index 6a73f884c5..358b946201 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -39,7 +39,7 @@ from pymongo.change_stream import DatabaseChangeStream from pymongo.collection import Collection from pymongo.command_cursor import CommandCursor -from pymongo.common import _ecc_coll_name, _ecoc_coll_name, _esc_coll_name +from pymongo.common import _ecoc_coll_name, _esc_coll_name from pymongo.errors import CollectionInvalid, InvalidName from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline @@ -394,7 +394,6 @@ def create_collection( { "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { @@ -1009,7 +1008,6 @@ def drop_collection( { "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { @@ -1061,9 +1059,6 @@ def drop_collection( self._drop_helper( _esc_coll_name(encrypted_fields, name), session=session, comment=comment ) - self._drop_helper( - _ecc_coll_name(encrypted_fields, name), session=session, comment=comment - ) self._drop_helper( _ecoc_coll_name(encrypted_fields, name), session=session, comment=comment ) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 4ad59d436e..4c46bf56ae 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -51,12 +51,13 @@ EncryptedCollectionError, EncryptionError, InvalidOperation, + PyMongoError, ServerSelectionTimeoutError, ) from pymongo.mongo_client import MongoClient from pymongo.network import BLOCKING_IO_ERRORS from pymongo.operations import UpdateOne -from pymongo.pool import PoolOptions, _configured_socket +from pymongo.pool import PoolOptions, _configured_socket, _raise_connection_failure from pymongo.read_concern import ReadConcern from pymongo.results import BulkWriteResult, DeleteResult from pymongo.ssl_support import get_ssl_context @@ -139,20 +140,26 @@ def kms_request(self, kms_context): ssl_context=ctx, ) host, port = parse_host(endpoint, _HTTPS_PORT) - conn = _configured_socket((host, port), opts) try: - conn.sendall(message) - while kms_context.bytes_needed > 0: - # CSOT: update timeout. - conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) - data = conn.recv(kms_context.bytes_needed) - if not data: - raise OSError("KMS connection closed") - kms_context.feed(data) - except BLOCKING_IO_ERRORS: - raise socket.timeout("timed out") - finally: - conn.close() + conn = _configured_socket((host, port), opts) + try: + conn.sendall(message) + while kms_context.bytes_needed > 0: + # CSOT: update timeout. + conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) + data = conn.recv(kms_context.bytes_needed) + if not data: + raise OSError("KMS connection closed") + kms_context.feed(data) + except BLOCKING_IO_ERRORS: + raise socket.timeout("timed out") + finally: + conn.close() + except (PyMongoError, MongoCryptError): + raise # Propagate pymongo errors directly. + except Exception as error: + # Wrap I/O errors in PyMongo exceptions. + _raise_connection_failure((host, port), error) def collection_info(self, database, filter): """Get the collection info for a namespace. @@ -588,7 +595,6 @@ def create_encrypted_collection( { "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index d8e9daad1f..0cb96d7dad 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -158,7 +158,6 @@ def __init__( { "db.encryptedCollection": { "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Date.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Date.json deleted file mode 100644 index c9ad1ffdd4..0000000000 --- a/test/client-side-encryption/etc/data/encryptedFields-Range-Date.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDate", - "bsonType": "date", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$date": { - "$numberLong": "0" - } - }, - "max": { - "$date": { - "$numberLong": "200" - } - } - } - } - ] -} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Decimal.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Decimal.json deleted file mode 100644 index f209536c9c..0000000000 --- a/test/client-side-encryption/etc/data/encryptedFields-Range-Decimal.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDecimal", - "bsonType": "decimal", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - } - } - } - ] -} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-DecimalPrecision.json b/test/client-side-encryption/etc/data/encryptedFields-Range-DecimalPrecision.json deleted file mode 100644 index e7634152ba..0000000000 --- a/test/client-side-encryption/etc/data/encryptedFields-Range-DecimalPrecision.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDecimalPrecision", - "bsonType": "decimal", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberDecimal": "0.0" - }, - "max": { - "$numberDecimal": "200.0" - }, - "precision": { - "$numberInt": "2" - } - } - } - ] -} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Double.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Double.json deleted file mode 100644 index 4e9e8d6d81..0000000000 --- a/test/client-side-encryption/etc/data/encryptedFields-Range-Double.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDouble", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - } - } - } - ] -} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-DoublePrecision.json b/test/client-side-encryption/etc/data/encryptedFields-Range-DoublePrecision.json deleted file mode 100644 index 17c725ec44..0000000000 --- a/test/client-side-encryption/etc/data/encryptedFields-Range-DoublePrecision.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDoublePrecision", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberDouble": "0.0" - }, - "max": { - "$numberDouble": "200.0" - }, - "precision": { - "$numberInt": "2" - } - } - } - ] -} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Int.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Int.json deleted file mode 100644 index 661d7395c5..0000000000 --- a/test/client-side-encryption/etc/data/encryptedFields-Range-Int.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedInt", - "bsonType": "int", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberInt": "0" - }, - "max": { - "$numberInt": "200" - } - } - } - ] -} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Long.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Long.json deleted file mode 100644 index b36bfb2c46..0000000000 --- a/test/client-side-encryption/etc/data/encryptedFields-Range-Long.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedLong", - "bsonType": "long", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberLong": "0" - }, - "max": { - "$numberLong": "200" - } - } - } - ] -} diff --git a/test/client-side-encryption/etc/data/encryptedFields.json b/test/client-side-encryption/etc/data/encryptedFields.json index 2364590e4c..88abe5a604 100644 --- a/test/client-side-encryption/etc/data/encryptedFields.json +++ b/test/client-side-encryption/etc/data/encryptedFields.json @@ -1,7 +1,4 @@ { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -30,4 +27,4 @@ "bsonType": "string" } ] -} +} \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Date.json b/test/client-side-encryption/etc/data/range-encryptedFields-Date.json index e19fc1e182..97a2b2d4e5 100644 --- a/test/client-side-encryption/etc/data/range-encryptedFields-Date.json +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Date.json @@ -1,30 +1,33 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" }, - "path": "encryptedDate", - "bsonType": "date", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$date": { - "$numberLong": "0" - } - }, - "max": { - "$date": { - "$numberLong": "200" + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" } + }, + "max": { + "$date": { + "$numberLong": "200" } } } - ] + } + ] } diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json index c6d129d4ca..4d284475f4 100644 --- a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json @@ -1,21 +1,23 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" }, - "path": "encryptedDecimalNoPrecision", - "bsonType": "decimal", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberInt": "1" - } + "sparsity": { + "$numberLong": "1" } } - ] - } - \ No newline at end of file + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json index c23c3fa923..53449182b2 100644 --- a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json @@ -11,8 +11,11 @@ "bsonType": "decimal", "queries": { "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, "sparsity": { - "$numberInt": "1" + "$numberLong": "1" }, "min": { "$numberDecimal": "0.0" diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json index 4af6422714..b478a772d7 100644 --- a/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json @@ -1,21 +1,23 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" }, - "path": "encryptedDoubleNoPrecision", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberLong": "1" - } + "sparsity": { + "$numberLong": "1" } } - ] - } - \ No newline at end of file + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json index c1f388219d..395a369680 100644 --- a/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json @@ -1,30 +1,32 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" }, - "path": "encryptedDoublePrecision", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberDouble": "0.0" - }, - "max": { - "$numberDouble": "200.0" - }, - "precision": { - "$numberInt": "2" - } + "precision": { + "$numberInt": "2" } } - ] - } - \ No newline at end of file + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Int.json b/test/client-side-encryption/etc/data/range-encryptedFields-Int.json index 217bf6743c..61b7082dff 100644 --- a/test/client-side-encryption/etc/data/range-encryptedFields-Int.json +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Int.json @@ -1,27 +1,29 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" }, - "path": "encryptedInt", - "bsonType": "int", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberInt": "0" - }, - "max": { - "$numberInt": "200" - } + "max": { + "$numberInt": "200" } } - ] - } - \ No newline at end of file + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Long.json b/test/client-side-encryption/etc/data/range-encryptedFields-Long.json index 0fb87edaef..b18b84b6e8 100644 --- a/test/client-side-encryption/etc/data/range-encryptedFields-Long.json +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Long.json @@ -1,27 +1,29 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" }, - "path": "encryptedLong", - "bsonType": "long", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberLong": "0" - }, - "max": { - "$numberLong": "200" - } + "max": { + "$numberLong": "200" } } - ] - } - \ No newline at end of file + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/bypassedCommand.json b/test/client-side-encryption/spec/legacy/bypassedCommand.json index bd0b1c565d..18054a70cb 100644 --- a/test/client-side-encryption/spec/legacy/bypassedCommand.json +++ b/test/client-side-encryption/spec/legacy/bypassedCommand.json @@ -78,7 +78,7 @@ ] }, { - "description": "current op is not bypassed", + "description": "kill op is not bypassed", "clientOptions": { "autoEncryptOpts": { "kmsProviders": { @@ -90,14 +90,15 @@ { "name": "runCommand", "object": "database", - "command_name": "currentOp", + "command_name": "killOp", "arguments": { "command": { - "currentOp": 1 + "killOp": 1, + "op": 1234 } }, "result": { - "errorContains": "command not supported for auto encryption: currentOp" + "errorContains": "command not supported for auto encryption: killOp" } } ] diff --git a/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json b/test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json similarity index 65% rename from test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json rename to test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json index b8d06e8bcd..dcc3983ae0 100644 --- a/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -75,36 +73,6 @@ "masterKey": { "provider": "local" } - }, - { - "_id": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "keyMaterial": { - "$binary": { - "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", - "subType": "00" - } - }, - "creationDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "updateDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "status": { - "$numberInt": "0" - }, - "masterKey": { - "provider": "local" - } } ], "tests": [ @@ -133,7 +101,7 @@ "_id": 1, "encryptedIndexed": { "$binary": { - "base64": "BHEBAAAFZAAgAAAAAHb62aV7+mqmaGcotPLdG3KP7S8diFwWMLM/5rYtqLrEBXMAIAAAAAAVJ6OWHRv3OtCozHpt3ZzfBhaxZirLv3B+G8PuaaO4EgVjACAAAAAAsZXWOWA+UiCBbrJNB6bHflB/cn7pWSvwWN2jw4FPeIUFcABQAAAAAMdD1nV2nqeI1eXEQNskDflCy8I7/HvvqDKJ6XxjhrPQWdLqjz+8GosGUsB7A8ee/uG9/guENuL25XD+Fxxkv1LLXtavHOlLF7iW0u9yabqqBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AE0AAAAAq83vqxI0mHYSNBI0VniQEkzZZBBDgeZh+h+gXEmOrSFtVvkUcnHWj/rfPW7iJ0G3UJ8zpuBmUM/VjOMJCY4+eDqdTiPIwX+/vNXegc8FZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsAA==", + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", "subType": "06" } } @@ -150,7 +118,7 @@ "result": [ { "_id": 1, - "encryptedIndexed": "value123" + "encryptedIndexed": "123" } ] } @@ -176,13 +144,50 @@ "_id": 1, "encryptedIndexed": { "$binary": { - "base64": "BHEBAAAFZAAgAAAAAHb62aV7+mqmaGcotPLdG3KP7S8diFwWMLM/5rYtqLrEBXMAIAAAAAAVJ6OWHRv3OtCozHpt3ZzfBhaxZirLv3B+G8PuaaO4EgVjACAAAAAAsZXWOWA+UiCBbrJNB6bHflB/cn7pWSvwWN2jw4FPeIUFcABQAAAAAMdD1nV2nqeI1eXEQNskDflCy8I7/HvvqDKJ6XxjhrPQWdLqjz+8GosGUsB7A8ee/uG9/guENuL25XD+Fxxkv1LLXtavHOlLF7iW0u9yabqqBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AE0AAAAAq83vqxI0mHYSNBI0VniQEkzZZBBDgeZh+h+gXEmOrSFtVvkUcnHWj/rfPW7iJ0G3UJ8zpuBmUM/VjOMJCY4+eDqdTiPIwX+/vNXegc8FZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsAA==", + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", "subType": "06" } } } ], - "ordered": true + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } }, "command_name": "insert" } @@ -230,39 +235,6 @@ }, "command_name": "find" } - }, - { - "command_started_event": { - "command": { - "find": "datakeys", - "filter": { - "$or": [ - { - "_id": { - "$in": [ - { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - } - ] - } - }, - { - "keyAltNames": { - "$in": [] - } - } - ] - }, - "$db": "keyvault", - "readConcern": { - "level": "majority" - } - }, - "command_name": "find" - } } ], "outcome": { @@ -276,7 +248,7 @@ "__safeContent__": [ { "$binary": { - "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", "subType": "00" } } diff --git a/test/client-side-encryption/spec/legacy/fle2-Compact.json b/test/client-side-encryption/spec/legacy/fle2v2-Compact.json similarity index 97% rename from test/client-side-encryption/spec/legacy/fle2-Compact.json rename to test/client-side-encryption/spec/legacy/fle2v2-Compact.json index 6ca0f9ba02..e47c689bf0 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Compact.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Compact.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json new file mode 100644 index 0000000000..d5b04b3ea5 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json @@ -0,0 +1,62 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "maxServerVersion": "6.3.99", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "tests": [ + { + "description": "driver returns an error if creating a QEv2 collection on unsupported server", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + }, + "result": { + "errorContains": "Driver support of Queryable Encryption is incompatible with server. Upgrade server to use Queryable Encryption." + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json similarity index 74% rename from test/client-side-encryption/spec/legacy/fle2-CreateCollection.json rename to test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json index 7f4f38161e..819d2eec3c 100644 --- a/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -21,9 +22,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -64,7 +62,7 @@ } }, { - "name": "assertCollectionExists", + "name": "assertCollectionNotExists", "object": "testRunner", "arguments": { "database": "default", @@ -107,15 +105,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -149,21 +138,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -184,9 +158,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "escCollection": null, + "ecocCollection": null, + "eccCollection": null, "fields": [ { "path": "firstName", @@ -242,12 +216,6 @@ "subType": "04", "base64": "AAAAAAAAAAAAAAAAAAAAAA==" } - }, - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } } } ] @@ -279,7 +247,7 @@ } }, { - "name": "assertCollectionExists", + "name": "assertCollectionNotExists", "object": "testRunner", "arguments": { "database": "default", @@ -322,15 +290,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -364,21 +323,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -399,6 +343,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { + "escCollection": null, + "ecocCollection": null, + "eccCollection": null, "fields": [ { "path": "firstName", @@ -408,12 +355,6 @@ "subType": "04", "base64": "AAAAAAAAAAAAAAAAAAAAAA==" } - }, - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } } } ] @@ -460,12 +401,6 @@ "subType": "04", "base64": "AAAAAAAAAAAAAAAAAAAAAA==" } - }, - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } } } ] @@ -497,7 +432,7 @@ } }, { - "name": "assertCollectionExists", + "name": "assertCollectionNotExists", "object": "testRunner", "arguments": { "database": "default", @@ -536,14 +471,6 @@ "collection": "encryptedCollection" } }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.ecc" - } - }, { "name": "assertCollectionNotExists", "object": "testRunner", @@ -580,15 +507,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -622,21 +540,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -666,12 +569,6 @@ "subType": "04", "base64": "AAAAAAAAAAAAAAAAAAAAAA==" } - }, - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } } } ] @@ -707,156 +604,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecoc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection" - }, - "command_name": "drop", - "database_name": "default" - } - } - ] - }, - { - "description": "encryptedFieldsMap with cyclic entries does not loop", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "aws": {} - }, - "encryptedFieldsMap": { - "default.encryptedCollection": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - }, - "default.encryptedCollection.esc": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - } - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.esc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.ecc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.ecoc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection" - } - }, - { - "name": "assertIndexExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection", - "index": "__safeContent___1" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.esc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -874,94 +621,6 @@ "command_name": "drop", "database_name": "default" } - }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.esc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecoc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection", - "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "createIndexes": "encryptedCollection", - "indexes": [ - { - "name": "__safeContent___1", - "key": { - "__safeContent__": 1 - } - } - ] - }, - "command_name": "createIndexes", - "database_name": "default" - } } ] }, @@ -974,9 +633,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1059,9 +715,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1102,7 +755,7 @@ } }, { - "name": "assertCollectionExists", + "name": "assertCollectionNotExists", "object": "testRunner", "arguments": { "database": "default", @@ -1145,15 +798,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -1187,21 +831,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -1222,9 +851,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "escCollection": null, + "ecocCollection": null, + "eccCollection": null, "fields": [ { "path": "firstName", @@ -1278,9 +907,6 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1302,9 +928,6 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1329,7 +952,7 @@ } }, { - "name": "assertCollectionExists", + "name": "assertCollectionNotExists", "object": "testRunner", "arguments": { "database": "default", @@ -1372,15 +995,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -1414,21 +1028,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -1449,9 +1048,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "escCollection": null, + "ecocCollection": null, + "eccCollection": null, "fields": [ { "path": "firstName", @@ -1510,9 +1109,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1548,15 +1144,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -1594,9 +1181,6 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1618,9 +1202,6 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1645,7 +1226,7 @@ } }, { - "name": "assertCollectionExists", + "name": "assertCollectionNotExists", "object": "testRunner", "arguments": { "database": "default", @@ -1683,9 +1264,6 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1709,14 +1287,6 @@ "collection": "enxcol_.encryptedCollection.esc" } }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.ecc" - } - }, { "name": "assertCollectionNotExists", "object": "testRunner", @@ -1744,15 +1314,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -1786,21 +1347,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -1821,9 +1367,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "escCollection": null, + "ecocCollection": null, + "eccCollection": null, "fields": [ { "path": "firstName", @@ -1880,15 +1426,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -1926,9 +1463,6 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1950,9 +1484,6 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1977,7 +1508,7 @@ } }, { - "name": "assertCollectionExists", + "name": "assertCollectionNotExists", "object": "testRunner", "arguments": { "database": "default", @@ -2024,14 +1555,6 @@ "collection": "enxcol_.encryptedCollection.esc" } }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.ecc" - } - }, { "name": "assertCollectionNotExists", "object": "testRunner", @@ -2059,15 +1582,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -2101,21 +1615,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -2136,9 +1635,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "escCollection": null, + "ecocCollection": null, + "eccCollection": null, "fields": [ { "path": "firstName", @@ -2210,7 +1709,7 @@ { "command_started_event": { "command": { - "drop": "enxcol_.encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -2219,19 +1718,57 @@ { "command_started_event": { "command": { - "drop": "enxcol_.encryptedCollection.ecoc" + "drop": "encryptedCollection" }, "command_name": "drop", "database_name": "default" } + } + ] + }, + { + "description": "encryptedFields are consulted for metadata collection names", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "invalid_esc_name", + "ecocCollection": "invalid_ecoc_name", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } }, { - "command_started_event": { - "command": { - "drop": "encryptedCollection" - }, - "command_name": "drop", - "database_name": "default" + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + }, + "result": { + "errorContains": "Encrypted State Collection name should follow" } } ] diff --git a/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json b/test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json similarity index 98% rename from test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json rename to test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json index e622d3334d..905d3c9456 100644 --- a/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Delete.json similarity index 86% rename from test/client-side-encryption/spec/legacy/fle2-Delete.json rename to test/client-side-encryption/spec/legacy/fle2v2-Delete.json index 8687127748..e4150eab8e 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Delete.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -179,7 +177,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -226,7 +223,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", "subType": "06" } } @@ -235,12 +232,12 @@ "limit": 1 } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -271,24 +268,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedIndexed": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json similarity index 94% rename from test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json rename to test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json index 42cd4bbc9c..b579979e94 100644 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -94,9 +92,6 @@ }, "encryptedFieldsMap": { "default.default": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [] } } diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json similarity index 94% rename from test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json rename to test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json index f4386483da..0a84d73650 100644 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -17,9 +18,6 @@ "bsonType": "object" }, "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -186,7 +184,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -231,7 +228,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", + "base64": "DIkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVsACAAAAAApJtKPW4+o9B7gAynNLL26jtlB4+hq5TXResijcYet8USY20AAAAAAAAAAAAA", "subType": "06" } } @@ -242,7 +239,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json similarity index 93% rename from test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json rename to test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json index 60820aae95..3e0905eadf 100644 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -73,10 +74,9 @@ }, "schema": { "default.default": { - "fields": [], "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc" + "ecocCollection": "enxcol_.default.ecoc", + "fields": [] } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json similarity index 87% rename from test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json index de1b5c5aad..4606fbb930 100644 --- a/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -186,7 +184,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -231,7 +228,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", "subType": "06" } } @@ -247,7 +244,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -278,24 +274,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedIndexed": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, @@ -446,7 +424,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -491,7 +468,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", "subType": "06" } } @@ -509,7 +486,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -540,24 +516,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedIndexed": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json similarity index 94% rename from test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json rename to test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json index 84b69d7de9..c7149d1f5c 100644 --- a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -182,7 +180,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -227,7 +224,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", + "base64": "DIkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVsACAAAAAApJtKPW4+o9B7gAynNLL26jtlB4+hq5TXResijcYet8USY20AAAAAAAAAAAAA", "subType": "06" } } @@ -238,7 +235,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json similarity index 97% rename from test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json rename to test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json index 9b31438525..008b0c959f 100644 --- a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2-MissingKey.json b/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json similarity index 94% rename from test/client-side-encryption/spec/legacy/fle2-MissingKey.json rename to test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json index 4210da09e4..0b7e86bca3 100644 --- a/test/client-side-encryption/spec/legacy/fle2-MissingKey.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -22,9 +23,6 @@ } ], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json b/test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json similarity index 96% rename from test/client-side-encryption/spec/legacy/fle2-NoEncryption.json rename to test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json index 9d255bd493..185691d61c 100644 --- a/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Aggregate.json similarity index 89% rename from test/client-side-encryption/spec/legacy/fle2-Range-Date-Aggregate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Aggregate.json index a35321cd35..dea821bd1e 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Aggregate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -216,7 +214,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -274,7 +271,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -324,7 +320,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -338,7 +334,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Correctness.json similarity index 99% rename from test/client-side-encryption/spec/legacy/fle2-Range-Date-Correctness.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Correctness.json index 5832e85418..9e4f525877 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Correctness.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Delete.json similarity index 83% rename from test/client-side-encryption/spec/legacy/fle2-Range-Date-Delete.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Delete.json index b5856e7620..7f4094f50c 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Delete.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -205,7 +203,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -263,7 +260,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -313,7 +309,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -322,12 +318,12 @@ "limit": 1 } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -361,24 +357,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDate": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-FindOneAndUpdate.json similarity index 86% rename from test/client-side-encryption/spec/legacy/fle2-Range-Date-FindOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Date-FindOneAndUpdate.json index a59258a466..5ec0601603 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Date-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-FindOneAndUpdate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -220,7 +218,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -278,7 +275,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -326,7 +322,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -344,7 +340,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -378,24 +373,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDate": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-InsertFind.json similarity index 89% rename from test/client-side-encryption/spec/legacy/fle2-Range-Date-InsertFind.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Date-InsertFind.json index 4357fafeea..efce1511c0 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Date-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-InsertFind.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -212,7 +210,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -270,7 +267,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -318,7 +314,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -329,7 +325,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Update.json similarity index 86% rename from test/client-side-encryption/spec/legacy/fle2-Range-Date-Update.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Update.json index fd170554f6..7f9fadcda4 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Update.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -216,7 +214,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -274,7 +271,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -326,7 +322,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -346,7 +342,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -380,24 +375,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDate": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } }, "$db": "default" diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Aggregate.json similarity index 75% rename from test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Aggregate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Aggregate.json index 73d2cf4892..fb129392b1 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Aggregate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset" ] @@ -11,9 +12,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -22,7 +20,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -91,7 +89,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0" } } @@ -102,7 +100,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1" } } @@ -114,7 +112,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "0" } @@ -126,7 +124,7 @@ "result": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1" } } @@ -185,7 +183,7 @@ "documents": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -196,7 +194,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -206,7 +203,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -233,7 +230,7 @@ "documents": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -244,7 +241,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -254,7 +250,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -281,10 +277,10 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", "subType": "06" } } @@ -298,7 +294,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -308,7 +303,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -336,7 +331,7 @@ "_id": { "$numberInt": "0" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ @@ -1120,7 +1115,7 @@ "_id": { "$numberInt": "1" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Correctness.json similarity index 88% rename from test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Correctness.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Correctness.json index 89b7bd3118..5120aecb7a 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Correctness.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset" ] @@ -11,9 +12,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -22,7 +20,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -91,7 +89,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -102,7 +100,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -112,7 +110,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "0.0" } @@ -122,7 +120,7 @@ "result": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -152,7 +150,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -163,7 +161,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -173,7 +171,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gte": { "$numberDecimal": "0.0" } @@ -186,13 +184,13 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } }, { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -222,7 +220,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -233,7 +231,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -243,7 +241,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "1.0" } @@ -276,7 +274,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -287,7 +285,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -297,7 +295,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$lt": { "$numberDecimal": "1.0" } @@ -307,7 +305,7 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -337,7 +335,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -348,7 +346,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -358,7 +356,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$lte": { "$numberDecimal": "1.0" } @@ -371,13 +369,13 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } }, { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -407,7 +405,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -418,7 +416,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -428,7 +426,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "0.0" }, @@ -441,7 +439,7 @@ "result": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -471,7 +469,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -482,7 +480,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -492,7 +490,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -500,7 +498,7 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -510,7 +508,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -518,7 +516,7 @@ "result": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -548,7 +546,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -559,7 +557,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -569,7 +567,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$in": [ { "$numberDecimal": "0.0" @@ -581,7 +579,7 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -611,7 +609,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -622,7 +620,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -634,7 +632,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gte": { "$numberDecimal": "0.0" } @@ -651,13 +649,13 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } }, { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -687,7 +685,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -698,7 +696,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -710,7 +708,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "1.0" } @@ -745,7 +743,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -756,7 +754,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -768,7 +766,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$lt": { "$numberDecimal": "1.0" } @@ -780,7 +778,7 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -810,7 +808,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -821,7 +819,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -833,7 +831,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$lte": { "$numberDecimal": "1.0" } @@ -850,13 +848,13 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } }, { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -886,7 +884,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -897,7 +895,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -909,7 +907,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "0.0" }, @@ -924,7 +922,7 @@ "result": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -954,7 +952,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -965,7 +963,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -977,7 +975,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -987,7 +985,7 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -999,7 +997,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -1009,7 +1007,7 @@ "result": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -1039,7 +1037,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -1050,7 +1048,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -1062,7 +1060,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$in": [ { "$numberDecimal": "0.0" @@ -1076,7 +1074,7 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -1106,7 +1104,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberInt": "0" } } @@ -1138,7 +1136,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gte": { "$numberInt": "0" } diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Delete.json similarity index 65% rename from test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Delete.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Delete.json index 0463be1c69..de81159b43 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Delete.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset" ] @@ -11,9 +12,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -22,7 +20,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -91,7 +89,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0" } } @@ -102,7 +100,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1" } } @@ -112,7 +110,7 @@ "name": "deleteOne", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "0" } @@ -176,7 +174,7 @@ "documents": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -187,7 +185,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -197,7 +194,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -224,7 +221,7 @@ "documents": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -235,7 +232,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -245,7 +241,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -272,10 +268,10 @@ "deletes": [ { "q": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", "subType": "06" } } @@ -284,12 +280,12 @@ "limit": 1 } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -299,7 +295,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -313,24 +309,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDecimal": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, @@ -345,7 +323,7 @@ "_id": { "$numberInt": "0" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json similarity index 75% rename from test/client-side-encryption/spec/legacy/fle2-Range-Decimal-FindOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json index d0e2967771..36cf91c88c 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset" ] @@ -11,9 +12,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -22,7 +20,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -91,7 +89,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0" } } @@ -102,7 +100,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1" } } @@ -112,7 +110,7 @@ "name": "findOneAndUpdate", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "0" } @@ -120,7 +118,7 @@ }, "update": { "$set": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "2" } } @@ -129,7 +127,7 @@ }, "result": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1" } } @@ -187,7 +185,7 @@ "documents": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -198,7 +196,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -208,7 +205,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -235,7 +232,7 @@ "documents": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -246,7 +243,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -256,7 +252,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -281,10 +277,10 @@ "command": { "findAndModify": "default", "query": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", "subType": "06" } } @@ -292,7 +288,7 @@ }, "update": { "$set": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -302,7 +298,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -312,7 +307,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -326,24 +321,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDecimal": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, @@ -358,7 +335,7 @@ "_id": { "$numberInt": "0" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ @@ -1142,7 +1119,7 @@ "_id": { "$numberInt": "1" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-InsertFind.json similarity index 75% rename from test/client-side-encryption/spec/legacy/fle2-Range-Decimal-InsertFind.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-InsertFind.json index cea03e23fe..6b5a642aa8 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-InsertFind.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset" ] @@ -11,9 +12,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -22,7 +20,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -91,7 +89,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0" } } @@ -102,7 +100,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1" } } @@ -112,7 +110,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "0" } @@ -122,7 +120,7 @@ "result": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1" } } @@ -181,7 +179,7 @@ "documents": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -192,7 +190,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -202,7 +199,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -229,7 +226,7 @@ "documents": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -240,7 +237,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -250,7 +246,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -275,10 +271,10 @@ "command": { "find": "default", "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", "subType": "06" } } @@ -289,7 +285,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -299,7 +294,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -327,7 +322,7 @@ "_id": { "$numberInt": "0" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ @@ -1111,7 +1106,7 @@ "_id": { "$numberInt": "1" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Update.json similarity index 75% rename from test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Update.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Update.json index 2f8b991cf7..8cfb7b525b 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Update.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset" ] @@ -11,9 +12,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -22,7 +20,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -91,7 +89,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0" } } @@ -102,7 +100,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1" } } @@ -112,7 +110,7 @@ "name": "updateOne", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "0" } @@ -120,7 +118,7 @@ }, "update": { "$set": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "2" } } @@ -185,7 +183,7 @@ "documents": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -196,7 +194,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -206,7 +203,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -233,7 +230,7 @@ "documents": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -244,7 +241,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -254,7 +250,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -283,10 +279,10 @@ "updates": [ { "q": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", "subType": "06" } } @@ -294,7 +290,7 @@ }, "u": { "$set": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -306,7 +302,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -316,7 +311,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -330,24 +325,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDecimal": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } }, "$db": "default" @@ -362,7 +339,7 @@ "_id": { "$numberInt": "0" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ @@ -1146,7 +1123,7 @@ "_id": { "$numberInt": "1" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json similarity index 86% rename from test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Aggregate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json index a3e605d1bb..801beefe18 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -207,7 +205,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -264,7 +261,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -313,7 +309,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -327,7 +323,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Correctness.json similarity index 99% rename from test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Correctness.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Correctness.json index 9fafc243d6..b8a6953611 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Correctness.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Delete.json similarity index 81% rename from test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Delete.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Delete.json index 3d7d359af6..1abb59bfd1 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Delete.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -198,7 +196,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -255,7 +252,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -304,7 +300,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -313,12 +309,12 @@ "limit": 1 } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -351,24 +347,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDecimalPrecision": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json similarity index 84% rename from test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-FindOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json index b1442c3a3c..8d763431fa 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -209,7 +207,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -266,7 +263,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -313,7 +309,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -331,7 +327,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -364,24 +359,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDecimalPrecision": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json similarity index 86% rename from test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-InsertFind.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json index 3b8202ff87..5407fba18b 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -203,7 +201,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -260,7 +257,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -307,7 +303,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -318,7 +314,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Update.json similarity index 84% rename from test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Update.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Update.json index 3dc6631c61..e5d1a4e059 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Update.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -207,7 +205,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -264,7 +261,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -315,7 +311,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -335,7 +331,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -368,24 +363,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDecimalPrecision": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } }, "$db": "default" diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Aggregate.json similarity index 81% rename from test/client-side-encryption/spec/legacy/fle2-Range-Double-Aggregate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Aggregate.json index 3d54be3d18..d8c9cacdcc 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Aggregate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -24,7 +22,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -93,7 +91,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0" } } @@ -104,7 +102,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1" } } @@ -116,7 +114,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "0" } @@ -128,7 +126,7 @@ "result": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1" } } @@ -187,7 +185,7 @@ "documents": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -198,7 +196,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -208,7 +205,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -235,7 +232,7 @@ "documents": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -246,7 +243,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -256,7 +252,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -283,10 +279,10 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -300,7 +296,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -310,7 +305,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -336,7 +331,7 @@ "data": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ @@ -734,7 +729,7 @@ }, { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Correctness.json similarity index 88% rename from test/client-side-encryption/spec/legacy/fle2-Range-Double-Correctness.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Correctness.json index b09e966324..65594bcb11 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Correctness.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -24,7 +22,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -93,7 +91,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -104,7 +102,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -114,7 +112,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "0.0" } @@ -124,7 +122,7 @@ "result": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -154,7 +152,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -165,7 +163,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -175,7 +173,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gte": { "$numberDouble": "0.0" } @@ -188,13 +186,13 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } }, { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -224,7 +222,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -235,7 +233,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -245,7 +243,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "1.0" } @@ -278,7 +276,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -289,7 +287,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -299,7 +297,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$lt": { "$numberDouble": "1.0" } @@ -309,7 +307,7 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -339,7 +337,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -350,7 +348,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -360,7 +358,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$lte": { "$numberDouble": "1.0" } @@ -373,13 +371,13 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } }, { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -409,7 +407,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -420,7 +418,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -430,7 +428,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "0.0" }, @@ -443,7 +441,7 @@ "result": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -473,7 +471,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -484,7 +482,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -494,7 +492,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -502,7 +500,7 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -512,7 +510,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -520,7 +518,7 @@ "result": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -550,7 +548,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -561,7 +559,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -571,7 +569,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$in": [ { "$numberDouble": "0.0" @@ -583,7 +581,7 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -613,7 +611,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -624,7 +622,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -636,7 +634,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gte": { "$numberDouble": "0.0" } @@ -653,13 +651,13 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } }, { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -689,7 +687,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -700,7 +698,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -712,7 +710,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "1.0" } @@ -747,7 +745,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -758,7 +756,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -770,7 +768,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$lt": { "$numberDouble": "1.0" } @@ -782,7 +780,7 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -812,7 +810,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -823,7 +821,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -835,7 +833,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$lte": { "$numberDouble": "1.0" } @@ -852,13 +850,13 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } }, { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -888,7 +886,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -899,7 +897,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -911,7 +909,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "0.0" }, @@ -926,7 +924,7 @@ "result": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -956,7 +954,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -967,7 +965,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -979,7 +977,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -989,7 +987,7 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -1001,7 +999,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -1011,7 +1009,7 @@ "result": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -1041,7 +1039,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -1052,7 +1050,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -1064,7 +1062,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$in": [ { "$numberDouble": "0.0" @@ -1078,7 +1076,7 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -1108,7 +1106,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberInt": "0" } } @@ -1140,7 +1138,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gte": { "$numberInt": "0" } diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Delete.json similarity index 72% rename from test/client-side-encryption/spec/legacy/fle2-Range-Double-Delete.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Delete.json index fa09cb87df..392e722f1f 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Delete.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -24,7 +22,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -93,7 +91,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0" } } @@ -104,7 +102,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1" } } @@ -114,7 +112,7 @@ "name": "deleteOne", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "0" } @@ -178,7 +176,7 @@ "documents": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -189,7 +187,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -199,7 +196,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -226,7 +223,7 @@ "documents": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -237,7 +234,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -247,7 +243,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -274,10 +270,10 @@ "deletes": [ { "q": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -286,12 +282,12 @@ "limit": 1 } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -301,7 +297,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -315,24 +311,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDouble": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, @@ -345,7 +323,7 @@ "data": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-FindOneAndUpdate.json similarity index 79% rename from test/client-side-encryption/spec/legacy/fle2-Range-Double-FindOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Double-FindOneAndUpdate.json index 59a304166b..bbcfb321f5 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Double-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-FindOneAndUpdate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -24,7 +22,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -93,7 +91,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0" } } @@ -104,7 +102,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1" } } @@ -114,7 +112,7 @@ "name": "findOneAndUpdate", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "0" } @@ -122,7 +120,7 @@ }, "update": { "$set": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "2" } } @@ -131,7 +129,7 @@ }, "result": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1" } } @@ -189,7 +187,7 @@ "documents": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -200,7 +198,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -210,7 +207,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -237,7 +234,7 @@ "documents": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -248,7 +245,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -258,7 +254,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -283,10 +279,10 @@ "command": { "findAndModify": "default", "query": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -294,7 +290,7 @@ }, "update": { "$set": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -304,7 +300,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -314,7 +309,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -328,24 +323,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDouble": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, @@ -358,7 +335,7 @@ "data": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ @@ -756,7 +733,7 @@ }, { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-InsertFind.json similarity index 80% rename from test/client-side-encryption/spec/legacy/fle2-Range-Double-InsertFind.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Double-InsertFind.json index 634230eaca..9f2c7c9911 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Double-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-InsertFind.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -24,7 +22,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -93,7 +91,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0" } } @@ -104,7 +102,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1" } } @@ -114,7 +112,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "0" } @@ -124,7 +122,7 @@ "result": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1" } } @@ -183,7 +181,7 @@ "documents": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -194,7 +192,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -204,7 +201,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -231,7 +228,7 @@ "documents": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -242,7 +239,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -252,7 +248,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -277,10 +273,10 @@ "command": { "find": "default", "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -291,7 +287,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -301,7 +296,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -327,7 +322,7 @@ "data": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ @@ -725,7 +720,7 @@ }, { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Update.json similarity index 80% rename from test/client-side-encryption/spec/legacy/fle2-Range-Double-Update.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Update.json index cdc9f28e76..ce03576f88 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Update.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -24,7 +22,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -93,7 +91,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0" } } @@ -104,7 +102,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1" } } @@ -114,7 +112,7 @@ "name": "updateOne", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "0" } @@ -122,7 +120,7 @@ }, "update": { "$set": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "2" } } @@ -187,7 +185,7 @@ "documents": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -198,7 +196,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -208,7 +205,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -235,7 +232,7 @@ "documents": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -246,7 +243,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -256,7 +252,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -285,10 +281,10 @@ "updates": [ { "q": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -296,7 +292,7 @@ }, "u": { "$set": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -308,7 +304,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -318,7 +313,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -332,24 +327,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDouble": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } }, "$db": "default" @@ -362,7 +339,7 @@ "data": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ @@ -760,7 +737,7 @@ }, { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Aggregate.json similarity index 86% rename from test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Aggregate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Aggregate.json index f2ea49ad75..b121c72f14 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Aggregate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -207,7 +205,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -264,7 +261,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -313,7 +309,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -327,7 +323,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Correctness.json similarity index 99% rename from test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Correctness.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Correctness.json index e69d912694..6b42ecfe82 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Correctness.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Delete.json similarity index 80% rename from test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Delete.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Delete.json index d6a9c4b7e7..a5c397d0be 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Delete.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -198,7 +196,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -255,7 +252,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -304,7 +300,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -313,12 +309,12 @@ "limit": 1 } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -351,24 +347,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDoublePrecision": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json similarity index 84% rename from test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-FindOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json index 0511c2e37e..b6df9463e8 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -209,7 +207,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -266,7 +263,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -313,7 +309,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -331,7 +327,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -364,24 +359,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDoublePrecision": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-InsertFind.json similarity index 86% rename from test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-InsertFind.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-InsertFind.json index 616101b4d4..1cea25545b 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-InsertFind.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -203,7 +201,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -260,7 +257,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -307,7 +303,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -318,7 +314,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Update.json similarity index 84% rename from test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Update.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Update.json index 300202e227..7703c9057d 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Update.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -207,7 +205,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -264,7 +261,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -315,7 +311,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -335,7 +331,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -368,24 +363,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDoublePrecision": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } }, "$db": "default" diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Aggregate.json similarity index 88% rename from test/client-side-encryption/spec/legacy/fle2-Range-Int-Aggregate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Aggregate.json index 536415f3fe..9c2536264d 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Aggregate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -204,7 +202,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -258,7 +255,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -304,7 +300,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -318,7 +314,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Correctness.json similarity index 99% rename from test/client-side-encryption/spec/legacy/fle2-Range-Int-Correctness.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Correctness.json index 6abd773da8..58ccf3efc8 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Correctness.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Delete.json similarity index 83% rename from test/client-side-encryption/spec/legacy/fle2-Range-Int-Delete.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Delete.json index 9d5bff1d19..b20b2750bb 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Delete.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -195,7 +193,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -249,7 +246,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -295,7 +291,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -304,12 +300,12 @@ "limit": 1 } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -339,24 +335,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedInt": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-FindOneAndUpdate.json similarity index 85% rename from test/client-side-encryption/spec/legacy/fle2-Range-Int-FindOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Int-FindOneAndUpdate.json index 4bf57700c9..f9c189ace9 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Int-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-FindOneAndUpdate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -206,7 +204,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -260,7 +257,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -304,7 +300,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -322,7 +318,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -352,24 +347,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedInt": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-InsertFind.json similarity index 88% rename from test/client-side-encryption/spec/legacy/fle2-Range-Int-InsertFind.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Int-InsertFind.json index 6f6022e749..874d4760c8 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Int-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-InsertFind.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -200,7 +198,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -254,7 +251,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -298,7 +294,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -309,7 +305,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Update.json similarity index 85% rename from test/client-side-encryption/spec/legacy/fle2-Range-Int-Update.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Update.json index 17d23b957f..c2b62b4d1c 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Update.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -204,7 +202,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -258,7 +255,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -306,7 +302,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -326,7 +322,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -356,24 +351,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedInt": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } }, "$db": "default" diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Aggregate.json similarity index 88% rename from test/client-side-encryption/spec/legacy/fle2-Range-Long-Aggregate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Aggregate.json index 3f1c723bd2..afc0f97be1 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Aggregate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -204,7 +202,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -258,7 +255,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -304,7 +300,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -318,7 +314,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Correctness.json similarity index 99% rename from test/client-side-encryption/spec/legacy/fle2-Range-Long-Correctness.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Correctness.json index 972388c6c4..cda941de8a 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Correctness.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Delete.json similarity index 83% rename from test/client-side-encryption/spec/legacy/fle2-Range-Long-Delete.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Delete.json index 89e1898406..ad344e21b4 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Delete.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -195,7 +193,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -249,7 +246,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -295,7 +291,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -304,12 +300,12 @@ "limit": 1 } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -339,24 +335,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedLong": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-FindOneAndUpdate.json similarity index 85% rename from test/client-side-encryption/spec/legacy/fle2-Range-Long-FindOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Long-FindOneAndUpdate.json index 59342a343a..d447200468 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Long-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-FindOneAndUpdate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -206,7 +204,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -260,7 +257,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -304,7 +300,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -322,7 +318,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -352,24 +347,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedLong": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-InsertFind.json similarity index 88% rename from test/client-side-encryption/spec/legacy/fle2-Range-Long-InsertFind.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Long-InsertFind.json index 882e52170d..4eb837f28b 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Long-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-InsertFind.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -200,7 +198,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -254,7 +251,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -298,7 +294,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -309,7 +305,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Update.json similarity index 85% rename from test/client-side-encryption/spec/legacy/fle2-Range-Long-Update.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Update.json index 92e3e390a5..3ba7f17c14 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Update.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -204,7 +202,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -258,7 +255,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -306,7 +302,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -326,7 +322,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -356,24 +351,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedLong": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } }, "$db": "default" diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-WrongType.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-WrongType.json similarity index 95% rename from test/client-side-encryption/spec/legacy/fle2-Range-WrongType.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-WrongType.json index 9eddf1c99c..e5e9ddc821 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-WrongType.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-WrongType.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Update.json similarity index 87% rename from test/client-side-encryption/spec/legacy/fle2-Update.json rename to test/client-side-encryption/spec/legacy/fle2v2-Update.json index 090f44f9ac..14104e2cd8 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Update.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -186,7 +184,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -233,7 +230,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", "subType": "06" } } @@ -246,12 +243,12 @@ } } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -282,24 +279,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedIndexed": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, @@ -450,7 +429,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -497,7 +475,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", "subType": "06" } } @@ -512,12 +490,12 @@ } } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -548,24 +526,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedIndexed": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json similarity index 93% rename from test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json rename to test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json index e70ca7c72d..4adf6fc07d 100644 --- a/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -29,9 +30,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -108,9 +106,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -182,9 +177,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -262,9 +254,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -345,9 +334,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -442,9 +428,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/timeoutMS.json b/test/client-side-encryption/spec/legacy/timeoutMS.json new file mode 100644 index 0000000000..443aa0aa23 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/timeoutMS.json @@ -0,0 +1,200 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4" + } + ], + "database_name": "cse-timeouts-db", + "collection_name": "cse-timeouts-coll", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "timeoutMS applied to listCollections to get collection schema", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + }, + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + }, + "timeoutMS": 50 + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "result": { + "isTimeoutError": true + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "cse-timeouts-coll" + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "command_name": "listCollections" + } + } + ] + }, + { + "description": "remaining timeoutMS applied to find to get keyvault data", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 3 + }, + "data": { + "failCommands": [ + "listCollections", + "find" + ], + "blockConnection": true, + "blockTimeMS": 20 + } + }, + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + }, + "timeoutMS": 50 + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "result": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json index 89860de0c0..6b3c9664a9 100644 --- a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json @@ -321,7 +321,10 @@ "modifiedCount": 4, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } } @@ -503,7 +506,10 @@ "modifiedCount": 4, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } } @@ -687,7 +693,10 @@ "modifiedCount": 4, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } } @@ -873,7 +882,10 @@ "modifiedCount": 4, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } } @@ -1055,7 +1067,10 @@ "modifiedCount": 4, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } } @@ -1218,7 +1233,10 @@ "modifiedCount": 5, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } }, diff --git a/test/test_encryption.py b/test/test_encryption.py index 872e0356ad..af8f54cd07 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1329,9 +1329,9 @@ def test_04_aws_endpoint_invalid_port(self): "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), "endpoint": "kms.us-east-1.amazonaws.com:12345", } - with self.assertRaises(EncryptionError) as ctx: + with self.assertRaisesRegex(EncryptionError, "kms.us-east-1.amazonaws.com:12345") as ctx: self.client_encryption.create_data_key("aws", master_key=master_key) - self.assertIsInstance(ctx.exception.cause, socket.error) + self.assertIsInstance(ctx.exception.cause, AutoReconnect) @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_05_aws_endpoint_wrong_region(self): @@ -2198,7 +2198,7 @@ def test_02_add_key_alt_name(self): # https://github.com/mongodb/specifications/blob/d4c9432/source/client-side-encryption/tests/README.rst#explicit-encryption class TestExplicitQueryableEncryption(EncryptionIntegrationTest): @client_context.require_no_standalone - @client_context.require_version_min(6, 0, -1) + @client_context.require_version_min(7, 0, -1) def setUp(self): super().setUp() self.encrypted_fields = json_data("etc", "data", "encryptedFields.json") @@ -2206,9 +2206,6 @@ def setUp(self): self.key1_id = self.key1_document["_id"] self.db = self.client.test_queryable_encryption self.client.drop_database(self.db) - self.db.command("create", self.encrypted_fields["escCollection"]) - self.db.command("create", self.encrypted_fields["eccCollection"]) - self.db.command("create", self.encrypted_fields["ecocCollection"]) self.db.command("create", "explicit_encryption", encryptedFields=self.encrypted_fields) key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) self.addCleanup(key_vault.drop) @@ -2425,7 +2422,7 @@ def test_02_success(self): class TestQueryableEncryptionDocsExample(EncryptionIntegrationTest): # Queryable Encryption is not supported on Standalone topology. @client_context.require_no_standalone - @client_context.require_version_min(6, 0, -1) + @client_context.require_version_min(7, 0, -1) def setUp(self): super().setUp() @@ -2517,7 +2514,7 @@ def MongoClient(**kwargs): # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#range-explicit-encryption class TestRangeQueryProse(EncryptionIntegrationTest): @client_context.require_no_standalone - @client_context.require_version_min(6, 2, -1) + @client_context.require_version_min(7, 0, -1) def setUp(self): super().setUp() self.key1_document = json_data("etc", "data", "keys", "key1-document.json") @@ -2710,7 +2707,7 @@ def test_int(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#automatic-data-encryption-keys class TestAutomaticDecryptionKeys(EncryptionIntegrationTest): @client_context.require_no_standalone - @client_context.require_version_min(6, 0, -1) + @client_context.require_version_min(7, 0, -1) def setUp(self): super().setUp() self.key1_document = json_data("etc", "data", "keys", "key1-document.json") diff --git a/test/unified_format.py b/test/unified_format.py index 18130290b5..584ee04ddd 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1016,7 +1016,9 @@ def process_error(self, exception, spec): if is_timeout_error: self.assertIsInstance(exception, PyMongoError) - self.assertTrue(exception.timeout, msg=exception) + if not exception.timeout: + # Re-raise the exception for better diagnostics. + raise exception if error_contains: if isinstance(exception, BulkWriteError): diff --git a/test/utils.py b/test/utils.py index 842e9e3a7b..b39375925c 100644 --- a/test/utils.py +++ b/test/utils.py @@ -358,17 +358,13 @@ def __getitem__(self, item): class CompareType(object): - """Class that compares equal to any object of the given type.""" + """Class that compares equal to any object of the given type(s).""" - def __init__(self, type): - self.type = type + def __init__(self, types): + self.types = types def __eq__(self, other): - return isinstance(other, self.type) - - def __ne__(self, other): - """Needed for Python 2.""" - return not self.__eq__(other) + return isinstance(other, self.types) class FunctionCallRecorder(object): diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 4252420909..6530f39da6 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -32,7 +32,7 @@ ) from typing import List -from bson import decode, encode +from bson import ObjectId, decode, encode from bson.binary import Binary from bson.int64 import Int64 from bson.son import SON @@ -336,22 +336,24 @@ def _run_op(self, sessions, collection, op, in_with_transaction): if expect_error(op): with self.assertRaises(self.allowable_errors(op), msg=op["name"]) as context: out = self.run_operation(sessions, collection, op.copy()) + exc = context.exception if expect_error_message(expected_result): - if isinstance(context.exception, BulkWriteError): - errmsg = str(context.exception.details).lower() + if isinstance(exc, BulkWriteError): + errmsg = str(exc.details).lower() else: - errmsg = str(context.exception).lower() + errmsg = str(exc).lower() self.assertIn(expected_result["errorContains"].lower(), errmsg) if expect_error_code(expected_result): - self.assertEqual( - expected_result["errorCodeName"], context.exception.details.get("codeName") - ) + self.assertEqual(expected_result["errorCodeName"], exc.details.get("codeName")) if expect_error_labels_contain(expected_result): - self.assertErrorLabelsContain( - context.exception, expected_result["errorLabelsContain"] - ) + self.assertErrorLabelsContain(exc, expected_result["errorLabelsContain"]) if expect_error_labels_omit(expected_result): - self.assertErrorLabelsOmit(context.exception, expected_result["errorLabelsOmit"]) + self.assertErrorLabelsOmit(exc, expected_result["errorLabelsOmit"]) + if expect_timeout_error(expected_result): + self.assertIsInstance(exc, PyMongoError) + if not exc.timeout: + # Re-raise the exception for better diagnostics. + raise exc # Reraise the exception if we're in the with_transaction # callback. @@ -427,6 +429,12 @@ def check_events(self, test, listener, session_ids): elif key not in actual: self.fail("Expected key [%s] in %r" % (key, actual)) else: + # Workaround an incorrect command started event in fle2v2-CreateCollection.yml + # added in DRIVERS-2524. + if key == "encryptedFields": + for n in ("eccCollection", "ecocCollection", "escCollection"): + if val.get(n) is None: + val.pop(n, None) self.assertEqual( val, decode_raw(actual[key]), "Key [%s] in %s" % (key, actual) ) @@ -617,6 +625,13 @@ def expect_error_labels_omit(expected_result): return False +def expect_timeout_error(expected_result): + if isinstance(expected_result, dict): + return expected_result["isTimeoutError"] + + return False + + def expect_error(op): expected_result = op.get("result") return ( @@ -625,6 +640,7 @@ def expect_error(op): or expect_error_code(expected_result) or expect_error_labels_contain(expected_result) or expect_error_labels_omit(expected_result) + or expect_timeout_error(expected_result) ) @@ -644,6 +660,11 @@ def decode_raw(val): TYPES = { "binData": Binary, "long": Int64, + "int": int, + "string": str, + "objectId": ObjectId, + "object": dict, + "array": list, } @@ -654,7 +675,11 @@ def wrap_types(val): if isinstance(val, abc.Mapping): typ = val.get("$$type") if typ: - return CompareType(TYPES[typ]) + if isinstance(typ, str): + types = TYPES[typ] + else: + types = tuple(TYPES[t] for t in typ) + return CompareType(types) d = {} for key in val: d[key] = wrap_types(val[key]) From 3f1e960c4bb97b2864beee79ec42eb1483a79a3b Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 1 May 2023 12:24:18 -0500 Subject: [PATCH 0380/1588] PYTHON-3690 Do not install unittest-xml-reporting on MacOS EG Hosts (#1200) --- .evergreen/utils.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index a474ce545e..c97cc34362 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -30,7 +30,10 @@ createvirtualenv () { fi python -m pip install --upgrade pip - python -m pip install --upgrade setuptools wheel unittest-xml-reporting + python -m pip install --upgrade setuptools wheel + # lxml only has wheels for macos 10.15+ + python -m pip install unittest-xml-reporting || true + } # Usage: From eb137fdf5cdf61788a9d490a095361e16b6a3f7a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 28 Apr 2023 15:01:25 -0700 Subject: [PATCH 0381/1588] PYTHON-3686 codec_options is no longer shadowed --- .github/workflows/test-python.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index bb0b836788..b4a8177fda 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -63,8 +63,6 @@ jobs: - name: Run mypy run: | mypy --install-types --non-interactive bson gridfs tools pymongo - # Test overshadowed codec_options.py file - mypy --install-types --non-interactive bson/codec_options.py mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test python -m pip install -U typing_extensions mypy --install-types --non-interactive test/test_typing.py test/test_typing_strict.py From 14e8b011c20aaf88a579c08b27a4be5e8eac0b89 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 2 May 2023 13:45:55 -0700 Subject: [PATCH 0382/1588] PYTHON-3700 Clean up docs for create_index/drop_indexes (#1201) --- pymongo/collection.py | 6 +----- pymongo/operations.py | 4 ++-- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/pymongo/collection.py b/pymongo/collection.py index a5d3be9e05..ac78b6878d 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2030,11 +2030,11 @@ def create_index( pairs specifying the index to create - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - arguments - `comment` (optional): A user-provided comment to attach to this command. - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword + arguments. .. versionchanged:: 4.4 Allow passing a list containing (key, direction) pairs @@ -2082,14 +2082,11 @@ def drop_indexes( :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - arguments - `comment` (optional): A user-provided comment to attach to this command. - `**kwargs` (optional): optional arguments to the createIndexes command (like maxTimeMS) can be passed as keyword arguments. - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of this collection is automatically applied to this operation. @@ -2100,7 +2097,6 @@ def drop_indexes( .. versionchanged:: 3.4 Apply this collection's write concern automatically to this operation when connected to MongoDB >= 3.4. - """ if comment is not None: kwargs["comment"] = comment diff --git a/pymongo/operations.py b/pymongo/operations.py index f73262074d..ad119f2ecc 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -480,10 +480,10 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: :Parameters: - `keys`: a single key or a list containing (key, direction) pairs - or keys specifying the index to create + or keys specifying the index to create. - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword - arguments + arguments. .. versionchanged:: 3.11 Added the ``hidden`` option. From d340710e3d488f3906d0e3fa4d4f25e56779fc3a Mon Sep 17 00:00:00 2001 From: Jean-Christophe Fillion-Robin Date: Wed, 3 May 2023 17:47:24 -0400 Subject: [PATCH 0383/1588] PYTHON-3703 Fix typos and add codespell pre-commit hook (#1203) Update pre-commit config adding "codespell" hook --- .pre-commit-config.yaml | 13 ++++++ bson/__init__.py | 2 +- bson/_cbsonmodule.c | 2 +- bson/objectid.py | 2 +- bson/time64.c | 4 +- doc/changelog.rst | 4 +- doc/contributors.rst | 1 + doc/examples/bulk.rst | 2 +- doc/examples/type_hints.rst | 10 ++-- gridfs/grid_file.py | 2 +- pymongo/collection.py | 6 +-- pymongo/common.py | 2 +- pymongo/message.py | 2 +- pymongo/ocsp_support.py | 2 +- pymongo/results.py | 2 +- pymongo/uri_parser.py | 4 +- .../spec/legacy/fle2v2-MissingKey.json | 4 +- test/csot/deprecated-options.json | 2 +- test/mod_wsgi_test/mod_wsgi_test.conf | 2 +- test/mypy_fails/raw_bson_document.py | 8 ++-- test/mypy_fails/typedict_client.py | 8 ++-- test/test_read_write_concern_spec.py | 2 +- test/test_typing.py | 46 +++++++++---------- 23 files changed, 73 insertions(+), 59 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d8455981f0..f19f15682c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -83,3 +83,16 @@ repos: files: \.py$ exclude: "^(test|tools)/" stages: [manual] + +- repo: https://github.com/codespell-project/codespell + rev: "v2.2.4" + hooks: + - id: codespell + # Examples of errors or updates to justify the exceptions: + # - test/test_on_demand_csfle.py:44: FLE ==> FILE + # - test/test_bson.py:1043: fo ==> of, for, to, do, go + # - test/bson_corpus/decimal128-4.json:98: Infinit ==> Infinite + # - test/test_bson.py:267: isnt ==> isn't + # - test/versioned-api/crud-api-version-1-strict.json:514: nin ==> inn, min, bin, nine + # - test/test_client.py:188: te ==> the, be, we, to + args: ["-L", "fle,fo,infinit,isnt,nin,te"] diff --git a/bson/__init__.py b/bson/__init__.py index 700a5d4cf8..d95c511fc7 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -1282,7 +1282,7 @@ def decode_file_iter( # Read size of next object. size_data = file_obj.read(4) if not size_data: - break # Finished with file normaly. + break # Finished with file normally. elif len(size_data) != 4: raise InvalidBSON("cut off in middle of objsize") obj_size = _UNPACK_INT_FROM(size_data, 0)[0] - 4 diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 8678e8050b..e45a11be32 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -968,7 +968,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } } - /* No _type_marker attibute or not one of our types. */ + /* No _type_marker attribute or not one of our types. */ if (PyBool_Check(value)) { const char c = (value == Py_True) ? 0x01 : 0x00; diff --git a/bson/objectid.py b/bson/objectid.py index 4bc0243532..1fab986b8b 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -231,7 +231,7 @@ def __getstate__(self) -> bytes: def __setstate__(self, value: Any) -> None: """explicit state set from pickling""" - # Provide backwards compatability with OIDs + # Provide backwards compatibility with OIDs # pickled with pymongo-1.9 or older. if isinstance(value, dict): oid = value["_ObjectId__id"] diff --git a/bson/time64.c b/bson/time64.c index 8d2886592e..a21fbb90bd 100644 --- a/bson/time64.c +++ b/bson/time64.c @@ -73,7 +73,7 @@ static const Year years_in_gregorian_cycle = 400; #define days_in_gregorian_cycle ((365 * 400) + 100 - 4 + 1) static const Time64_T seconds_in_gregorian_cycle = days_in_gregorian_cycle * 60LL * 60LL * 24LL; -/* Year range we can trust the time funcitons with */ +/* Year range we can trust the time functions with */ #define MAX_SAFE_YEAR 2037 #define MIN_SAFE_YEAR 1971 @@ -739,7 +739,7 @@ struct TM *cbson_localtime64_r (const Time64_T *time, struct TM *local_tm) /* GMT is Jan 1st, xx01 year, but localtime is still Dec 31st in a non-leap xx00. There is one point in the cycle we can't account for which the safe xx00 year is a leap - year. So we need to correct for Dec 31st comming out as + year. So we need to correct for Dec 31st coming out as the 366th day of the year. */ if( !IS_LEAP(local_tm->tm_year) && local_tm->tm_yday == 365 ) diff --git a/doc/changelog.rst b/doc/changelog.rst index 19830b09ac..db2259f95f 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -2805,7 +2805,7 @@ Important New Features: - The URI parser has been moved into its own module and can be used directly by application code. - AutoReconnect exception now provides information about the error that - actually occured instead of a generic failure message. + actually occurred instead of a generic failure message. - A number of new helper methods have been added with options for setting and unsetting cursor flags, re-indexing a collection, fsync and locking a server, and getting the server's current operations. @@ -2930,7 +2930,7 @@ Issues resolved - `PYTHON-186 `_: When storing integers, type is selected according to value instead of type - `PYTHON-173 `_: - as_class option is not propogated by Cursor.clone + as_class option is not propagated by Cursor.clone - `PYTHON-113 `_: Redunducy in MasterSlaveConnection diff --git a/doc/contributors.rst b/doc/contributors.rst index 7ab87f7790..7efda5b20d 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -93,3 +93,4 @@ The following is a list of people who have contributed to - Ishmum Jawad Khan (ishmum123) - Arie Bovenberg (ariebovenberg) - Ben Warner (bcwarner) +- Jean-Christophe Fillion-Robin (jcfr) diff --git a/doc/examples/bulk.rst b/doc/examples/bulk.rst index c2c5acc687..3ed8e09645 100644 --- a/doc/examples/bulk.rst +++ b/doc/examples/bulk.rst @@ -80,7 +80,7 @@ of operations performed. The first write failure that occurs (e.g. duplicate key error) aborts the remaining operations, and PyMongo raises -:class:`~pymongo.errors.BulkWriteError`. The :attr:`details` attibute of +:class:`~pymongo.errors.BulkWriteError`. The :attr:`details` attribute of the exception instance provides the execution results up until the failure occurred and details about the failure - including the operation that caused the failure. diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst index f202ab32e1..8aaaff81eb 100644 --- a/doc/examples/type_hints.rst +++ b/doc/examples/type_hints.rst @@ -264,7 +264,7 @@ Troubleshooting Client Type Annotation ~~~~~~~~~~~~~~~~~~~~~~ -If you forget to add a type annotation for a :class:`~pymongo.mongo_client.MongoClient` object you may get the followig ``mypy`` error:: +If you forget to add a type annotation for a :class:`~pymongo.mongo_client.MongoClient` object you may get the following ``mypy`` error:: from pymongo import MongoClient client = MongoClient() # error: Need type annotation for "client" @@ -313,10 +313,10 @@ Another example is trying to set a value on a :class:`~bson.raw_bson.RawBSONDocu coll = client.test.test doc = {"my": "doc"} coll.insert_one(doc) - retreived = coll.find_one({"_id": doc["_id"]}) - assert retreived is not None - assert len(retreived.raw) > 0 - retreived[ + retrieved = coll.find_one({"_id": doc["_id"]}) + assert retrieved is not None + assert len(retrieved.raw) > 0 + retrieved[ "foo" ] = "bar" # error: Unsupported target for indexed assignment # ("RawBSONDocument") [index] diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 50efc0cd23..5ec6352684 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -381,7 +381,7 @@ def write(self, data: Any) -> None: def writelines(self, sequence: Iterable[Any]) -> None: """Write a sequence of strings to the file. - Does not add seperators. + Does not add separators. """ for line in sequence: self.write(line) diff --git a/pymongo/collection.py b/pymongo/collection.py index ac78b6878d..91b4013ee8 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2076,7 +2076,7 @@ def drop_indexes( ) -> None: """Drops all indexes on this collection. - Can be used on non-existant collections or collections with no indexes. + Can be used on non-existent collections or collections with no indexes. Raises OperationFailure on an error. :Parameters: @@ -2112,7 +2112,7 @@ def drop_index( ) -> None: """Drops the specified index on this collection. - Can be used on non-existant collections or collections with no + Can be used on non-existent collections or collections with no indexes. Raises OperationFailure on an error (e.g. trying to drop an index that does not exist). `index_or_name` can be either an index name (as returned by `create_index`), @@ -2683,7 +2683,7 @@ def rename( if not new_name or ".." in new_name: raise InvalidName("collection names cannot be empty") if new_name[0] == "." or new_name[-1] == ".": - raise InvalidName("collecion names must not start or end with '.'") + raise InvalidName("collection names must not start or end with '.'") if "$" in new_name and not new_name.startswith("oplog.$main"): raise InvalidName("collection names must not contain '$'") diff --git a/pymongo/common.py b/pymongo/common.py index ba861c1545..4b8aeb020c 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -469,7 +469,7 @@ def validate_document_class( raise TypeError( "%s must be dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or a " - "sublass of collections.MutableMapping" % (option,) + "subclass of collections.MutableMapping" % (option,) ) return value diff --git a/pymongo/message.py b/pymongo/message.py index 9fa64a875a..f7a173ca8a 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -1077,7 +1077,7 @@ def _batched_op_msg_impl(operation, command, docs, ack, opts, ctx, buf): new_message_size = buf.tell() + doc_length # Does first document exceed max_message_size? doc_too_large = idx == 0 and (new_message_size > max_message_size) - # When OP_MSG is used unacknowleged we have to check + # When OP_MSG is used unacknowledged we have to check # document size client side or applications won't be notified. # Otherwise we let the server deal with documents that are too large # since ordered=False causes those documents to be skipped instead of diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index 3a201f1f5e..e7f4a15d84 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -312,7 +312,7 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): return 0 if not user_data.check_ocsp_endpoint: _LOGGER.debug("OCSP endpoint checking is disabled, soft fail.") - # No stapled OCSP response, checking responder URI diabled, soft fail. + # No stapled OCSP response, checking responder URI disabled, soft fail. return 1 # https://tools.ietf.org/html/rfc6960#section-3.1 ext = _get_extension(cert, _AuthorityInformationAccess) diff --git a/pymongo/results.py b/pymongo/results.py index 5803900398..b072979499 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -45,7 +45,7 @@ def acknowledged(self) -> bool: .. note:: If the :attr:`acknowledged` attribute is ``False`` all other - attibutes of this class will raise + attributes of this class will raise :class:`~pymongo.errors.InvalidOperation` when accessed. Values for other attributes cannot be determined if the write operation was unacknowledged. diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 398dfbff00..e3aeee399e 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -42,7 +42,7 @@ def _unquoted_percent(s): """Check for unescaped percent signs. - :Paramaters: + :Parameters: - `s`: A string. `s` can have things like '%25', '%2525', and '%E2%85%A8' but cannot have unquoted percent like '%foo'. """ @@ -64,7 +64,7 @@ def parse_userinfo(userinfo: str) -> Tuple[str, str]: Returns a 2-tuple containing the unescaped username followed by the unescaped password. - :Paramaters: + :Parameters: - `userinfo`: A string of the form : """ if "@" in userinfo or userinfo.count(":") > 1 or _unquoted_percent(userinfo): diff --git a/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json b/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json index 0b7e86bca3..a072454112 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json @@ -55,7 +55,7 @@ "key_vault_data": [], "tests": [ { - "description": "FLE2 encrypt fails with mising key", + "description": "FLE2 encrypt fails with missing key", "clientOptions": { "autoEncryptOpts": { "kmsProviders": { @@ -86,7 +86,7 @@ ] }, { - "description": "FLE2 decrypt fails with mising key", + "description": "FLE2 decrypt fails with missing key", "clientOptions": { "autoEncryptOpts": { "kmsProviders": { diff --git a/test/csot/deprecated-options.json b/test/csot/deprecated-options.json index 0e2bdefd73..9c9b9a2288 100644 --- a/test/csot/deprecated-options.json +++ b/test/csot/deprecated-options.json @@ -1,5 +1,5 @@ { - "description": "operations ignore deprected timeout options if timeoutMS is set", + "description": "operations ignore deprecated timeout options if timeoutMS is set", "schemaVersion": "1.9", "runOnRequirements": [ { diff --git a/test/mod_wsgi_test/mod_wsgi_test.conf b/test/mod_wsgi_test/mod_wsgi_test.conf index 9505933e96..6a77c675d5 100644 --- a/test/mod_wsgi_test/mod_wsgi_test.conf +++ b/test/mod_wsgi_test/mod_wsgi_test.conf @@ -27,7 +27,7 @@ WSGISocketPrefix /tmp/ WSGIProcessGroup mod_wsgi_test - # For the convienience of unittests, rather than hard-code the location of + # For the convenience of unittests, rather than hard-code the location of # mod_wsgi_test.wsgi, include it in the URL, so # http://localhost/location-of-pymongo-checkout will work: diff --git a/test/mypy_fails/raw_bson_document.py b/test/mypy_fails/raw_bson_document.py index 427140dfac..0e17224874 100644 --- a/test/mypy_fails/raw_bson_document.py +++ b/test/mypy_fails/raw_bson_document.py @@ -5,9 +5,9 @@ coll = client.test.test doc = {"my": "doc"} coll.insert_one(doc) -retreived = coll.find_one({"_id": doc["_id"]}) -assert retreived is not None -assert len(retreived.raw) > 0 -retreived[ +retrieved = coll.find_one({"_id": doc["_id"]}) +assert retrieved is not None +assert len(retrieved.raw) > 0 +retrieved[ "foo" ] = "bar" # error: Unsupported target for indexed assignment ("RawBSONDocument") [index] diff --git a/test/mypy_fails/typedict_client.py b/test/mypy_fails/typedict_client.py index 24dd84ee28..6619df10fd 100644 --- a/test/mypy_fails/typedict_client.py +++ b/test/mypy_fails/typedict_client.py @@ -10,9 +10,9 @@ class Movie(TypedDict): client: MongoClient[Movie] = MongoClient() coll = client.test.test -retreived = coll.find_one({"_id": "foo"}) -assert retreived is not None -assert retreived["year"] == 1 +retrieved = coll.find_one({"_id": "foo"}) +assert retrieved is not None +assert retrieved["year"] == 1 assert ( - retreived["name"] == 2 + retrieved["name"] == 2 ) # error: Non-overlapping equality check (left operand type: "str", right operand type: "Literal[2]") [comparison-overlap] diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 5cc4845e32..26bc111f00 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -280,7 +280,7 @@ def run_test(self): self.assertEqual(write_concern.acknowledged, test_case["isAcknowledged"]) self.assertEqual(write_concern.is_server_default, test_case["isServerDefault"]) if "readConcern" in test_case: - # Any string for 'level' is equaly valid + # Any string for 'level' is equally valid read_concern = ReadConcern(**test_case["readConcern"]) self.assertEqual(read_concern.document, test_case["readConcernDocument"]) self.assertEqual(not bool(read_concern.level), test_case["isServerDefault"]) diff --git a/test/test_typing.py b/test/test_typing.py index 8fc0f5a23e..0aebc707cd 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -111,11 +111,11 @@ def test_insert_find(self) -> None: coll2 = self.client.test.test2 result = self.coll.insert_one(doc) self.assertEqual(result.inserted_id, doc["_id"]) - retreived = self.coll.find_one({"_id": doc["_id"]}) - if retreived: + retrieved = self.coll.find_one({"_id": doc["_id"]}) + if retrieved: # Documents returned from find are mutable. - retreived["new_field"] = 1 - result2 = coll2.insert_one(retreived) + retrieved["new_field"] = 1 + result2 = coll2.insert_one(retrieved) self.assertEqual(result2.inserted_id, result.inserted_id) def test_cursor_iterable(self) -> None: @@ -182,9 +182,9 @@ def test_default_document_type(self) -> None: coll = client.test.test doc = {"my": "doc"} coll.insert_one(doc) - retreived = coll.find_one({"_id": doc["_id"]}) - assert retreived is not None - retreived["a"] = 1 + retrieved = coll.find_one({"_id": doc["_id"]}) + assert retrieved is not None + retrieved["a"] = 1 def test_aggregate_pipeline(self) -> None: coll3 = self.client.test.test3 @@ -329,26 +329,26 @@ class TestDocumentType(unittest.TestCase): def test_default(self) -> None: client: MongoClient = MongoClient() coll = client.test.test - retreived = coll.find_one({"_id": "foo"}) - assert retreived is not None - retreived["a"] = 1 + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + retrieved["a"] = 1 @only_type_check def test_explicit_document_type(self) -> None: client: MongoClient[Dict[str, Any]] = MongoClient() coll = client.test.test - retreived = coll.find_one({"_id": "foo"}) - assert retreived is not None - retreived["a"] = 1 + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + retrieved["a"] = 1 @only_type_check def test_typeddict_document_type(self) -> None: client: MongoClient[Movie] = MongoClient() coll = client.test.test - retreived = coll.find_one({"_id": "foo"}) - assert retreived is not None - assert retreived["year"] == 1 - assert retreived["name"] == "a" + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + assert retrieved["year"] == 1 + assert retrieved["name"] == "a" @only_type_check def test_typeddict_document_type_insertion(self) -> None: @@ -450,17 +450,17 @@ def test_typeddict_find_notrequired(self): def test_raw_bson_document_type(self) -> None: client = MongoClient(document_class=RawBSONDocument) coll = client.test.test - retreived = coll.find_one({"_id": "foo"}) - assert retreived is not None - assert len(retreived.raw) > 0 + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + assert len(retrieved.raw) > 0 @only_type_check def test_son_document_type(self) -> None: client = MongoClient(document_class=SON[str, Any]) coll = client.test.test - retreived = coll.find_one({"_id": "foo"}) - assert retreived is not None - retreived["a"] = 1 + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + retrieved["a"] = 1 def test_son_document_type_runtime(self) -> None: client = MongoClient(document_class=SON[str, Any], connect=False) From 3d3e4dc2384606412448fad9103b9e4e296c4faa Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 4 May 2023 18:24:14 -0700 Subject: [PATCH 0384/1588] PYTHON-3464 Add FaaS platform to handshake metadata (#1204) Truncate metadata env, os, and platform fields if needed. --- pymongo/pool.py | 111 +++++++++++++++++++- test/test_client.py | 244 +++++++++++++++++++++++++++++--------------- 2 files changed, 272 insertions(+), 83 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index 6355692ac9..42e6a642a4 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -23,8 +23,9 @@ import threading import time import weakref -from typing import Any, NoReturn, Optional +from typing import Any, Dict, NoReturn, Optional +import bson from bson import DEFAULT_CODEC_OPTIONS from bson.son import SON from pymongo import __version__, _csot, auth, helpers @@ -231,6 +232,108 @@ def _set_keepalive_times(sock): ) +def _is_lambda() -> bool: + if os.getenv("AWS_LAMBDA_RUNTIME_API"): + return True + env = os.getenv("AWS_EXECUTION_ENV") + if env: + return env.startswith("AWS_Lambda_") + return False + + +def _is_azure_func() -> bool: + return bool(os.getenv("FUNCTIONS_WORKER_RUNTIME")) + + +def _is_gcp_func() -> bool: + return bool(os.getenv("K_SERVICE") or os.getenv("FUNCTION_NAME")) + + +def _is_vercel() -> bool: + return bool(os.getenv("VERCEL")) + + +def _getenv_int(key: str) -> Optional[int]: + """Like os.getenv but returns an int, or None if the value is missing/malformed.""" + val = os.getenv(key) + if not val: + return None + try: + return int(val) + except ValueError: + return None + + +def _metadata_env() -> Dict[str, Any]: + env: Dict[str, Any] = {} + # Skip if multiple (or no) envs are matched. + if (_is_lambda(), _is_azure_func(), _is_gcp_func(), _is_vercel()).count(True) != 1: + return env + if _is_lambda(): + env["name"] = "aws.lambda" + region = os.getenv("AWS_REGION") + if region: + env["region"] = region + memory_mb = _getenv_int("AWS_LAMBDA_FUNCTION_MEMORY_SIZE") + if memory_mb is not None: + env["memory_mb"] = memory_mb + elif _is_azure_func(): + env["name"] = "azure.func" + elif _is_gcp_func(): + env["name"] = "gcp.func" + region = os.getenv("FUNCTION_REGION") + if region: + env["region"] = region + memory_mb = _getenv_int("FUNCTION_MEMORY_MB") + if memory_mb is not None: + env["memory_mb"] = memory_mb + timeout_sec = _getenv_int("FUNCTION_TIMEOUT_SEC") + if timeout_sec is not None: + env["timeout_sec"] = timeout_sec + elif _is_vercel(): + env["name"] = "vercel" + region = os.getenv("VERCEL_REGION") + if region: + env["region"] = region + return env + + +_MAX_METADATA_SIZE = 512 + + +# See: https://github.com/mongodb/specifications/blob/5112bcc/source/mongodb-handshake/handshake.rst#limitations +def _truncate_metadata(metadata): + """Perform metadata truncation.""" + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 1. Omit fields from env except env.name. + env_name = metadata.get("env", {}).get("name") + if env_name: + metadata["env"] = {"name": env_name} + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 2. Omit fields from os except os.type. + os_type = metadata.get("os", {}).get("type") + if os_type: + metadata["os"] = {"type": os_type} + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 3. Omit the env document entirely. + metadata.pop("env", None) + encoded_size = len(bson.encode(metadata)) + if encoded_size <= _MAX_METADATA_SIZE: + return + # 4. Truncate platform. + overflow = encoded_size - _MAX_METADATA_SIZE + plat = metadata.get("platform", "") + if plat: + plat = plat[:-overflow] + if plat: + metadata["platform"] = plat + else: + metadata.pop("platform", None) + + # If the first getaddrinfo call of this interpreter's life is on a thread, # while the main thread holds the import lock, getaddrinfo deadlocks trying # to import the IDNA codec. Import it here, where presumably we're on the @@ -364,6 +467,12 @@ def __init__( if driver.platform: self.__metadata["platform"] = "%s|%s" % (_METADATA["platform"], driver.platform) + env = _metadata_env() + if env: + self.__metadata["env"] = env + + _truncate_metadata(self.__metadata) + @property def _credentials(self): """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" diff --git a/test/test_client.py b/test/test_client.py index b2f128f11a..624c460c08 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -28,6 +28,7 @@ import threading import time from typing import Iterable, Type, no_type_check +from unittest.mock import patch sys.path[0:0] = [""] @@ -113,7 +114,6 @@ class ClientUnitTest(unittest.TestCase): client: MongoClient @classmethod - @client_context.require_connection def setUpClass(cls): cls.client = rs_or_single_client(connect=False, serverSelectionTimeoutMS=100) @@ -1751,6 +1751,86 @@ def test_sigstop_sigcont(self): self.assertIn("TEST COMPLETED", log_output) self.assertNotIn("ServerHeartbeatFailedEvent", log_output) + def _test_handshake(self, env_vars, expected_env): + with patch.dict("os.environ", env_vars): + metadata = copy.deepcopy(_METADATA) + if expected_env is not None: + metadata["env"] = expected_env + with rs_or_single_client(serverSelectionTimeoutMS=10000) as client: + client.admin.command("ping") + options = client._MongoClient__options + self.assertEqual(options.pool_options.metadata, metadata) + + def test_handshake_01_aws(self): + self._test_handshake( + { + "AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", + "AWS_REGION": "us-east-2", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "1024", + }, + {"name": "aws.lambda", "region": "us-east-2", "memory_mb": 1024}, + ) + + def test_handshake_02_azure(self): + self._test_handshake({"FUNCTIONS_WORKER_RUNTIME": "python"}, {"name": "azure.func"}) + + def test_handshake_03_gcp(self): + self._test_handshake( + { + "K_SERVICE": "servicename", + "FUNCTION_MEMORY_MB": "1024", + "FUNCTION_TIMEOUT_SEC": "60", + "FUNCTION_REGION": "us-central1", + }, + {"name": "gcp.func", "region": "us-central1", "memory_mb": 1024, "timeout_sec": 60}, + ) + # Extra case for FUNCTION_NAME. + self._test_handshake( + { + "FUNCTION_NAME": "funcname", + "FUNCTION_MEMORY_MB": "1024", + "FUNCTION_TIMEOUT_SEC": "60", + "FUNCTION_REGION": "us-central1", + }, + {"name": "gcp.func", "region": "us-central1", "memory_mb": 1024, "timeout_sec": 60}, + ) + + def test_handshake_04_vercel(self): + self._test_handshake( + {"VERCEL": "1", "VERCEL_REGION": "cdg1"}, {"name": "vercel", "region": "cdg1"} + ) + + def test_handshake_05_multiple(self): + self._test_handshake( + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", "FUNCTIONS_WORKER_RUNTIME": "python"}, + None, + ) + # Extra cases for other combos. + self._test_handshake( + {"FUNCTIONS_WORKER_RUNTIME": "python", "K_SERVICE": "servicename"}, + None, + ) + self._test_handshake({"K_SERVICE": "servicename", "VERCEL": "1"}, None) + + def test_handshake_06_region_too_long(self): + self._test_handshake( + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", "AWS_REGION": "a" * 512}, + {"name": "aws.lambda"}, + ) + + def test_handshake_07_memory_invalid_int(self): + self._test_handshake( + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "big"}, + {"name": "aws.lambda"}, + ) + + def test_handshake_08_invalid_aws_ec2(self): + # AWS_EXECUTION_ENV needs to start with "AWS_Lambda_". + self._test_handshake( + {"AWS_EXECUTION_ENV": "EC2"}, + None, + ) + class TestExhaustCursor(IntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" @@ -1867,6 +1947,87 @@ def test_exhaust_getmore_network_error(self): self.assertNotIn(sock_info, pool.sockets) self.assertEqual(0, pool.requests) + def test_gevent_task(self): + if not gevent_monkey_patched(): + raise SkipTest("Must be running monkey patched by gevent") + from gevent import spawn + + def poller(): + while True: + client_context.client.pymongo_test.test.insert_one({}) + + task = spawn(poller) + task.kill() + self.assertTrue(task.dead) + + def test_gevent_timeout(self): + if not gevent_monkey_patched(): + raise SkipTest("Must be running monkey patched by gevent") + from gevent import Timeout, spawn + + client = rs_or_single_client(maxPoolSize=1) + coll = client.pymongo_test.test + coll.insert_one({}) + + def contentious_task(): + # The 10 second timeout causes this test to fail without blocking + # forever if a bug like PYTHON-2334 is reintroduced. + with Timeout(10): + coll.find_one({"$where": delay(1)}) + + def timeout_task(): + with Timeout(0.5): + try: + coll.find_one({}) + except Timeout: + pass + + ct = spawn(contentious_task) + tt = spawn(timeout_task) + tt.join(15) + ct.join(15) + self.assertTrue(tt.dead) + self.assertTrue(ct.dead) + self.assertIsNone(tt.get()) + self.assertIsNone(ct.get()) + + def test_gevent_timeout_when_creating_connection(self): + if not gevent_monkey_patched(): + raise SkipTest("Must be running monkey patched by gevent") + from gevent import Timeout, spawn + + client = rs_or_single_client() + self.addCleanup(client.close) + coll = client.pymongo_test.test + pool = get_pool(client) + + # Patch the pool to delay the connect method. + def delayed_connect(*args, **kwargs): + time.sleep(3) + return pool.__class__.connect(pool, *args, **kwargs) + + pool.connect = delayed_connect + + def timeout_task(): + with Timeout(1): + try: + coll.find_one({}) + return False + except Timeout: + return True + + tt = spawn(timeout_task) + tt.join(10) + + # Assert that we got our active_sockets count back + self.assertEqual(pool.active_sockets, 0) + # Assert the greenlet is dead + self.assertTrue(tt.dead) + # Assert that the Timeout was raised all the way to the try + self.assertTrue(tt.get()) + # Unpatch the instance. + del pool.connect + class TestClientLazyConnect(IntegrationTest): """Test concurrent operations on a lazily-connecting MongoClient.""" @@ -2046,87 +2207,6 @@ def test_network_error_on_delete(self): callback = lambda client: client.db.collection.delete_many({}) self._test_network_error(callback) - def test_gevent_task(self): - if not gevent_monkey_patched(): - raise SkipTest("Must be running monkey patched by gevent") - from gevent import spawn - - def poller(): - while True: - client_context.client.pymongo_test.test.insert_one({}) - - task = spawn(poller) - task.kill() - self.assertTrue(task.dead) - - def test_gevent_timeout(self): - if not gevent_monkey_patched(): - raise SkipTest("Must be running monkey patched by gevent") - from gevent import Timeout, spawn - - client = rs_or_single_client(maxPoolSize=1) - coll = client.pymongo_test.test - coll.insert_one({}) - - def contentious_task(): - # The 10 second timeout causes this test to fail without blocking - # forever if a bug like PYTHON-2334 is reintroduced. - with Timeout(10): - coll.find_one({"$where": delay(1)}) - - def timeout_task(): - with Timeout(0.5): - try: - coll.find_one({}) - except Timeout: - pass - - ct = spawn(contentious_task) - tt = spawn(timeout_task) - tt.join(15) - ct.join(15) - self.assertTrue(tt.dead) - self.assertTrue(ct.dead) - self.assertIsNone(tt.get()) - self.assertIsNone(ct.get()) - - def test_gevent_timeout_when_creating_connection(self): - if not gevent_monkey_patched(): - raise SkipTest("Must be running monkey patched by gevent") - from gevent import Timeout, spawn - - client = rs_or_single_client() - self.addCleanup(client.close) - coll = client.pymongo_test.test - pool = get_pool(client) - - # Patch the pool to delay the connect method. - def delayed_connect(*args, **kwargs): - time.sleep(3) - return pool.__class__.connect(pool, *args, **kwargs) - - pool.connect = delayed_connect - - def timeout_task(): - with Timeout(1): - try: - coll.find_one({}) - return False - except Timeout: - return True - - tt = spawn(timeout_task) - tt.join(10) - - # Assert that we got our active_sockets count back - self.assertEqual(pool.active_sockets, 0) - # Assert the greenlet is dead - self.assertTrue(tt.dead) - # Assert that the Timeout was raised all the way to the try - self.assertTrue(tt.get()) - # Unpatch the instance. - del pool.connect - class TestClientPool(MockClientTest): @client_context.require_connection From ae83a0b8be9bbcff2d6f251fa8bf1cdf545c99b1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 8 May 2023 11:49:05 -0500 Subject: [PATCH 0385/1588] PYTHON-3570 Deprecate currentOp/collStats commands by 7.0 (#1205) --- pymongo/database.py | 4 +-- test/test_examples.py | 2 +- .../collectionData-createOptions.json | 32 ++++++++++++------- 3 files changed, 24 insertions(+), 14 deletions(-) diff --git a/pymongo/database.py b/pymongo/database.py index 358b946201..1e19d860e3 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -740,10 +740,10 @@ def command( >>> db.command("buildinfo") - For a command where the value matters, like ``{collstats: + For a command where the value matters, like ``{count: collection_name}`` we can do: - >>> db.command("collstats", collection_name) + >>> db.command("count", collection_name) For commands that take additional arguments we can use kwargs. So ``{filemd5: object_id, root: file_root}`` becomes: diff --git a/test/test_examples.py b/test/test_examples.py index 9c1adda69c..c08cb17e20 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -869,7 +869,7 @@ def test_commands(self): # End runCommand Example 1 # Start runCommand Example 2 - db.command("collStats", "restaurants") + db.command("count", "restaurants") # End runCommand Example 2 def test_index_management(self): diff --git a/test/unified-test-format/valid-pass/collectionData-createOptions.json b/test/unified-test-format/valid-pass/collectionData-createOptions.json index 64f8fb02ff..19edc2247b 100644 --- a/test/unified-test-format/valid-pass/collectionData-createOptions.json +++ b/test/unified-test-format/valid-pass/collectionData-createOptions.json @@ -49,19 +49,29 @@ "description": "collection is created with the correct options", "operations": [ { - "name": "runCommand", - "object": "database0", + "object": "collection0", + "name": "aggregate", "arguments": { - "commandName": "collStats", - "command": { - "collStats": "coll0", - "scale": 1 - } + "pipeline": [ + { + "$collStats": { + "storageStats": {} + } + }, + { + "$project": { + "capped": "$storageStats.capped", + "maxSize": "$storageStats.maxSize" + } + } + ] }, - "expectResult": { - "capped": true, - "maxSize": 4096 - } + "expectResult": [ + { + "capped": true, + "maxSize": 4096 + } + ] } ] } From 873032660bf22d09cbf0013f6b077196d0b95f40 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 8 May 2023 12:33:28 -0500 Subject: [PATCH 0386/1588] PYTHON-3708 Fix ReadTheDocs Build Failure (#1206) --- .readthedocs.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.readthedocs.yaml b/.readthedocs.yaml index e2956c122b..39c86fff03 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -12,9 +12,13 @@ sphinx: # Set the version of Python and requirements required to build the docs. python: - version: 3.8 install: # Install pymongo itself. - method: pip path: . - requirements: doc/docs-requirements.txt + +build: + os: ubuntu-22.04 + tools: + python: "3.11" From 2752a7dd306d9793d449f035473351dd04be6917 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 9 May 2023 20:19:57 -0500 Subject: [PATCH 0387/1588] PYTHON-3456 CSFLE/QE Naming (#1208) --- doc/changelog.rst | 6 +- doc/examples/encryption.rst | 399 ++++++++++++++++++------------------ doc/index.rst | 2 +- 3 files changed, 208 insertions(+), 199 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index db2259f95f..7b35e4cd61 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -12,9 +12,8 @@ Changes in Version 4.4 or Visual Studio. - Improved support for type-checking with MyPy "strict" mode (`--strict`). - Added support for Python 3.11. -- pymongocrypt 1.6.0 or later is now required for Client Side Field Level Encryption (CSFLE) - and Queryable Encryption (QE) support. MongoDB Server 7.0 introduced a backwards breaking - change to the QE protocol. Users taking advantage of the QE beta must now upgrade to +- pymongocrypt 1.6.0 or later is now required for :ref:`In-Use Encryption` support. MongoDB Server 7.0 introduced a backwards breaking + change to the QE protocol. Users taking advantage of the Queryable Encryption beta must now upgrade to MongoDB 7.0+ and PyMongo 4.4+. Issues Resolved @@ -198,7 +197,6 @@ in this release. .. _PYTHON-3311: https://jira.mongodb.org/browse/PYTHON-3311 .. _PYTHON-3187: https://jira.mongodb.org/browse/PYTHON-3187 .. _PyMongo 4.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33196 -.. _Queryable Encryption: automatic-queryable-client-side-encryption Changes in Version 4.1.1 ------------------------- diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 57c1a84b0f..2823d3f9bc 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -1,7 +1,12 @@ +.. _In-Use Encryption: + +In-Use Encryption +================= + .. _Client-Side Field Level Encryption: Client-Side Field Level Encryption -================================== +---------------------------------- New in MongoDB 4.2, client-side field level encryption allows an application to encrypt specific data fields in addition to pre-existing MongoDB @@ -359,199 +364,6 @@ data key and create a collection with the if __name__ == "__main__": main() -.. _automatic-queryable-client-side-encryption: - -Automatic Queryable Encryption (Beta) -````````````````````````````````````` - -PyMongo 4.4 brings beta support for Queryable Encryption with MongoDB >=7.0. - -Queryable Encryption is the second version of Client-Side Field Level Encryption. -Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, -which are further processed server-side. - -You must have MongoDB 7.0 Enterprise to preview the capability. - -Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, as demonstrated by the following example:: - - import os - from bson.codec_options import CodecOptions - from pymongo import MongoClient - from pymongo.encryption import Algorithm, ClientEncryption, QueryType - from pymongo.encryption_options import AutoEncryptionOpts - - - local_master_key = os.urandom(96) - kms_providers = {"local": {"key": local_master_key}} - key_vault_namespace = "keyvault.datakeys" - key_vault_client = MongoClient() - client_encryption = ClientEncryption( - kms_providers, key_vault_namespace, key_vault_client, CodecOptions() - ) - key_vault = key_vault_client["keyvault"]["datakeys"] - key_vault.drop() - key1_id = client_encryption.create_data_key("local", key_alt_names=["firstName"]) - key2_id = client_encryption.create_data_key("local", key_alt_names=["lastName"]) - - encrypted_fields_map = { - "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": key1_id, - "queries": [{"queryType": "equality"}], - }, - { - "path": "lastName", - "bsonType": "string", - "keyId": key2_id, - } - ] - } - } - - auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, encrypted_fields_map=encrypted_fields_map) - client = MongoClient(auto_encryption_opts=auto_encryption_opts) - client.default.drop_collection('encryptedCollection') - coll = client.default.create_collection('encryptedCollection') - coll.insert_one({ "_id": 1, "firstName": "Jane", "lastName": "Doe" }) - docs = list(coll.find({"firstName": "Jane"})) - print(docs) - -In the above example, the ``firstName`` and ``lastName`` fields are -automatically encrypted and decrypted. - -Explicit Queryable Encryption (Beta) -```````````````````````````````````` - -PyMongo 4.4 brings beta support for Queryable Encryption with MongoDB >=7.0. - -Queryable Encryption is the second version of Client-Side Field Level Encryption. -Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, -which are further processed server-side. - -Explicit encryption in Queryable Encryption is performed using the ``encrypt`` and ``decrypt`` -methods. Automatic encryption (to allow the ``find_one`` to automatically decrypt) is configured -using an ``encrypted_fields`` mapping, as demonstrated by the following example:: - - import os - - from pymongo import MongoClient - from pymongo.encryption import (Algorithm, AutoEncryptionOpts, - ClientEncryption, QueryType) - - - def main(): - # This must be the same master key that was used to create - # the encryption key. - local_master_key = os.urandom(96) - kms_providers = {"local": {"key": local_master_key}} - - # The MongoDB namespace (db.collection) used to store - # the encryption data keys. - key_vault_namespace = "encryption.__pymongoTestKeyVault" - key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) - - # Set up the key vault (key_vault_namespace) for this example. - client = MongoClient() - key_vault = client[key_vault_db_name][key_vault_coll_name] - - # Ensure that two data keys cannot share the same keyAltName. - key_vault.drop() - key_vault.create_index( - "keyAltNames", - unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) - - client_encryption = ClientEncryption( - kms_providers, - key_vault_namespace, - # The MongoClient to use for reading/writing to the key vault. - # This can be the same MongoClient used by the main application. - client, - # The CodecOptions class used for encrypting and decrypting. - # This should be the same CodecOptions instance you have configured - # on MongoClient, Database, or Collection. - client.codec_options) - - # Create a new data key for the encryptedField. - indexed_key_id = client_encryption.create_data_key( - 'local') - unindexed_key_id = client_encryption.create_data_key( - 'local') - - encrypted_fields = { - "escCollection": "enxcol_.default.esc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": indexed_key_id, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality" - } - }, - { - "keyId": unindexed_key_id, - "path": "encryptedUnindexed", - "bsonType": "string", - } - ] - } - - opts = AutoEncryptionOpts( - {"local": {"key": local_master_key}}, - key_vault.full_name, - bypass_query_analysis=True, - key_vault_client=client, - ) - - # The MongoClient used to read/write application data. - encrypted_client = MongoClient(auto_encryption_opts=opts) - encrypted_client.drop_database("test") - db = encrypted_client.test - - # Create the collection with encrypted fields. - coll = db.create_collection("coll", encryptedFields=encrypted_fields) - - # Create and encrypt an indexed and unindexed value. - val = "encrypted indexed value" - unindexed_val = "encrypted unindexed value" - insert_payload_indexed = client_encryption.encrypt(val, Algorithm.INDEXED, indexed_key_id, contention_factor=1) - insert_payload_unindexed = client_encryption.encrypt(unindexed_val, Algorithm.UNINDEXED, - unindexed_key_id) - - # Insert the payloads. - coll.insert_one({ - "encryptedIndexed": insert_payload_indexed, - "encryptedUnindexed": insert_payload_unindexed - }) - - # Encrypt our find payload using QueryType.EQUALITY. - # The value of "data_key_id" must be the same as used to encrypt the values - # above. - find_payload = client_encryption.encrypt( - val, Algorithm.INDEXED, indexed_key_id, query_type=QueryType.EQUALITY, contention_factor=1 - ) - - # Find the document we inserted using the encrypted payload. - # The returned document is automatically decrypted. - doc = coll.find_one({"encryptedIndexed": find_payload}) - print('Returned document: %s' % (doc,)) - - # Cleanup resources. - client_encryption.close() - encrypted_client.close() - client.close() - - - if __name__ == "__main__": - main() .. _explicit-client-side-encryption: @@ -785,3 +597,202 @@ An application using Azure credentials would look like, this time using coll.insert_one({"encryptedField": "123456789"}) The driver will `acquire an access token `_ from the Azure VM. + +.. _Queryable Encryption: + +Queryable Encryption +-------------------- + +.. _automatic-queryable-client-side-encryption: + +Automatic Queryable Encryption (Beta) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +PyMongo 4.4 brings beta support for Queryable Encryption with MongoDB >=7.0. + +Queryable Encryption is the second version of Client-Side Field Level Encryption. +Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, +which are further processed server-side. + +You must have MongoDB 7.0 Enterprise to preview the capability. + +Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, as demonstrated by the following example:: + + import os + from bson.codec_options import CodecOptions + from pymongo import MongoClient + from pymongo.encryption import Algorithm, ClientEncryption, QueryType + from pymongo.encryption_options import AutoEncryptionOpts + + + local_master_key = os.urandom(96) + kms_providers = {"local": {"key": local_master_key}} + key_vault_namespace = "keyvault.datakeys" + key_vault_client = MongoClient() + client_encryption = ClientEncryption( + kms_providers, key_vault_namespace, key_vault_client, CodecOptions() + ) + key_vault = key_vault_client["keyvault"]["datakeys"] + key_vault.drop() + key1_id = client_encryption.create_data_key("local", key_alt_names=["firstName"]) + key2_id = client_encryption.create_data_key("local", key_alt_names=["lastName"]) + + encrypted_fields_map = { + "default.encryptedCollection": { + "escCollection": "encryptedCollection.esc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": key1_id, + "queries": [{"queryType": "equality"}], + }, + { + "path": "lastName", + "bsonType": "string", + "keyId": key2_id, + } + ] + } + } + + auto_encryption_opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace, encrypted_fields_map=encrypted_fields_map) + client = MongoClient(auto_encryption_opts=auto_encryption_opts) + client.default.drop_collection('encryptedCollection') + coll = client.default.create_collection('encryptedCollection') + coll.insert_one({ "_id": 1, "firstName": "Jane", "lastName": "Doe" }) + docs = list(coll.find({"firstName": "Jane"})) + print(docs) + +In the above example, the ``firstName`` and ``lastName`` fields are +automatically encrypted and decrypted. + +Explicit Queryable Encryption (Beta) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +PyMongo 4.4 brings beta support for Queryable Encryption with MongoDB >=7.0. + +Queryable Encryption is the second version of Client-Side Field Level Encryption. +Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, +which are further processed server-side. + +Explicit encryption in Queryable Encryption is performed using the ``encrypt`` and ``decrypt`` +methods. Automatic encryption (to allow the ``find_one`` to automatically decrypt) is configured +using an ``encrypted_fields`` mapping, as demonstrated by the following example:: + + import os + + from pymongo import MongoClient + from pymongo.encryption import (Algorithm, AutoEncryptionOpts, + ClientEncryption, QueryType) + + + def main(): + # This must be the same master key that was used to create + # the encryption key. + local_master_key = os.urandom(96) + kms_providers = {"local": {"key": local_master_key}} + + # The MongoDB namespace (db.collection) used to store + # the encryption data keys. + key_vault_namespace = "encryption.__pymongoTestKeyVault" + key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) + + # Set up the key vault (key_vault_namespace) for this example. + client = MongoClient() + key_vault = client[key_vault_db_name][key_vault_coll_name] + + # Ensure that two data keys cannot share the same keyAltName. + key_vault.drop() + key_vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}) + + client_encryption = ClientEncryption( + kms_providers, + key_vault_namespace, + # The MongoClient to use for reading/writing to the key vault. + # This can be the same MongoClient used by the main application. + client, + # The CodecOptions class used for encrypting and decrypting. + # This should be the same CodecOptions instance you have configured + # on MongoClient, Database, or Collection. + client.codec_options) + + # Create a new data key for the encryptedField. + indexed_key_id = client_encryption.create_data_key( + 'local') + unindexed_key_id = client_encryption.create_data_key( + 'local') + + encrypted_fields = { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": indexed_key_id, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality" + } + }, + { + "keyId": unindexed_key_id, + "path": "encryptedUnindexed", + "bsonType": "string", + } + ] + } + + opts = AutoEncryptionOpts( + {"local": {"key": local_master_key}}, + key_vault.full_name, + bypass_query_analysis=True, + key_vault_client=client, + ) + + # The MongoClient used to read/write application data. + encrypted_client = MongoClient(auto_encryption_opts=opts) + encrypted_client.drop_database("test") + db = encrypted_client.test + + # Create the collection with encrypted fields. + coll = db.create_collection("coll", encryptedFields=encrypted_fields) + + # Create and encrypt an indexed and unindexed value. + val = "encrypted indexed value" + unindexed_val = "encrypted unindexed value" + insert_payload_indexed = client_encryption.encrypt(val, Algorithm.INDEXED, indexed_key_id, contention_factor=1) + insert_payload_unindexed = client_encryption.encrypt(unindexed_val, Algorithm.UNINDEXED, + unindexed_key_id) + + # Insert the payloads. + coll.insert_one({ + "encryptedIndexed": insert_payload_indexed, + "encryptedUnindexed": insert_payload_unindexed + }) + + # Encrypt our find payload using QueryType.EQUALITY. + # The value of "data_key_id" must be the same as used to encrypt the values + # above. + find_payload = client_encryption.encrypt( + val, Algorithm.INDEXED, indexed_key_id, query_type=QueryType.EQUALITY, contention_factor=1 + ) + + # Find the document we inserted using the encrypted payload. + # The returned document is automatically decrypted. + doc = coll.find_one({"encryptedIndexed": find_payload}) + print('Returned document: %s' % (doc,)) + + # Cleanup resources. + client_encryption.close() + encrypted_client.close() + client.close() + + + if __name__ == "__main__": + main() diff --git a/doc/index.rst b/doc/index.rst index b43f5cf580..e474d27d8f 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -26,7 +26,7 @@ everything you need to know to use **PyMongo**. Using PyMongo with TLS / SSL. :doc:`examples/encryption` - Using PyMongo with client side encryption. + Using PyMongo with In-Use Encryption. :doc:`examples/type_hints` Using PyMongo with type hints. From d504322a740701fb465d36bb7c2ff5bcb1f02557 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 9 May 2023 23:19:44 -0700 Subject: [PATCH 0388/1588] PYTHON-3694 Test with MongoDB 7.0 (#1207) --- .evergreen/config.yml | 98 +++++++++++++++++++++++++++++++------------ README.rst | 2 +- doc/changelog.rst | 1 + 3 files changed, 74 insertions(+), 27 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index f102668206..8398244071 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1448,6 +1448,7 @@ tasks: VERSION: "5.0" TOPOLOGY: "sharded_cluster" - func: "run tests" + - name: "test-6.0-standalone" tags: ["6.0", "standalone"] commands: @@ -1475,6 +1476,33 @@ tasks: TOPOLOGY: "sharded_cluster" - func: "run tests" + - name: "test-7.0-standalone" + tags: ["7.0", "standalone"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "7.0" + TOPOLOGY: "server" + - func: "run tests" + + - name: "test-7.0-replica_set" + tags: ["7.0", "replica_set"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "7.0" + TOPOLOGY: "replica_set" + - func: "run tests" + + - name: "test-7.0-sharded_cluster" + tags: ["7.0", "sharded_cluster"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "7.0" + TOPOLOGY: "sharded_cluster" + - func: "run tests" + - name: "test-latest-standalone" tags: ["latest", "standalone"] commands: @@ -1955,14 +1983,14 @@ tasks: - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" - - name: "aws-auth-test-latest" + - name: "aws-auth-test-7.0" commands: - func: "bootstrap mongo-orchestration" vars: AUTH: "auth" ORCHESTRATION_FILE: "auth-aws.json" TOPOLOGY: "server" - VERSION: "latest" + VERSION: "7.0" - func: "add aws auth variables to file" - func: "run aws auth test with regular aws credentials" - func: "run aws auth test with assume role credentials" @@ -1971,6 +1999,7 @@ tasks: - func: "run aws auth test with aws EC2 credentials" - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" + - name: "aws-auth-test-rapid" commands: - func: "bootstrap mongo-orchestration" @@ -1988,6 +2017,23 @@ tasks: - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" + - name: "aws-auth-test-latest" + commands: + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + VERSION: "latest" + - func: "add aws auth variables to file" + - func: "run aws auth test with regular aws credentials" + - func: "run aws auth test with assume role credentials" + - func: "run aws auth test with aws credentials as environment variables" + - func: "run aws auth test with aws credentials and session token as environment variables" + - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" + - func: "run aws ECS auth test" + - name: load-balancer-test commands: - func: "bootstrap mongo-orchestration" @@ -2319,6 +2365,10 @@ axes: display_name: "MongoDB 6.0" variables: VERSION: "6.0" + - id: "7.0" + display_name: "MongoDB 7.0" + variables: + VERSION: "7.0" - id: "latest" display_name: "MongoDB latest" variables: @@ -2568,21 +2618,6 @@ axes: batchtime: 10080 # 7 days buildvariants: -- matrix_name: "tests-all" - matrix_spec: - platform: - # OSes that support versions of MongoDB>=3.6 with SSL. - - rhel84 - auth-ssl: "*" - display_name: "${platform} ${auth-ssl}" - tasks: - - ".6.0" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" - - ".3.6" - - matrix_name: "tests-archlinux" matrix_spec: platform: @@ -2622,6 +2657,7 @@ buildvariants: display_name: "${platform} ${auth} ${ssl}" tasks: - ".latest" + - ".7.0" - ".6.0" - ".5.0" - ".4.4" @@ -2637,6 +2673,7 @@ buildvariants: display_name: "${platform} ${auth-ssl}" tasks: - ".latest" + - ".7.0" - ".6.0" - ".5.0" - ".4.4" @@ -2660,6 +2697,7 @@ buildvariants: add_tasks: &encryption-server-versions - ".rapid" - ".latest" + - ".7.0" - ".6.0" - ".5.0" - ".4.4" @@ -2688,6 +2726,7 @@ buildvariants: tasks: &all-server-versions - ".rapid" - ".latest" + - ".7.0" - ".6.0" - ".5.0" - ".4.4" @@ -2712,8 +2751,8 @@ buildvariants: display_name: "PyOpenSSL ${platform} ${python-version} ${auth}" tasks: - '.replica_set' - # Test standalone and sharded only on 5.0 and later. - - '.5.0' + # Test standalone and sharded only on 7.0. + - '.7.0' - matrix_name: "tests-pyopenssl-macOS" matrix_spec: @@ -2827,6 +2866,7 @@ buildvariants: tasks: - ".rapid" - ".latest" + - ".7.0" - ".6.0" - ".5.0" - ".4.4" @@ -2889,6 +2929,8 @@ buildvariants: then: add_tasks: - "test-latest-standalone" + - "test-7.0-standalone" + - "test-6.0-standalone" - "test-5.0-standalone" - "test-4.4-standalone" - "test-4.2-standalone" @@ -3028,7 +3070,7 @@ buildvariants: matrix_spec: platform: ubuntu-20.04 python-version: ["3.7", "3.10", "pypy3.7", "pypy3.8"] - mongodb-version: ["4.4", "5.0", "6.0", "latest"] + mongodb-version: ["4.4", "5.0", "6.0", "7.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${python-version} ${mongodb-version}" @@ -3040,7 +3082,7 @@ buildvariants: matrix_spec: platform: windows-64-vsMulti-small python-version-windows: ["3.7", "3.10"] - mongodb-version: ["4.4", "5.0", "6.0", "latest"] + mongodb-version: ["4.4", "5.0", "6.0", "7.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${python-version-windows} ${mongodb-version}" @@ -3052,7 +3094,7 @@ buildvariants: - matrix_name: "ocsp-test-macos" matrix_spec: platform: macos-1014 - mongodb-version: ["4.4", "5.0", "6.0", "latest"] + mongodb-version: ["4.4", "5.0", "6.0", "7.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${mongodb-version}" @@ -3069,9 +3111,10 @@ buildvariants: tasks: - name: "aws-auth-test-4.4" - name: "aws-auth-test-5.0" - - name: "aws-auth-test-latest" - name: "aws-auth-test-6.0" + - name: "aws-auth-test-7.0" - name: "aws-auth-test-rapid" + - name: "aws-auth-test-latest" - matrix_name: "aws-auth-test-mac" matrix_spec: @@ -3081,9 +3124,11 @@ buildvariants: tasks: - name: "aws-auth-test-4.4" - name: "aws-auth-test-5.0" - - name: "aws-auth-test-latest" - name: "aws-auth-test-6.0" + - name: "aws-auth-test-7.0" - name: "aws-auth-test-rapid" + - name: "aws-auth-test-latest" + - matrix_name: "aws-auth-test-windows" matrix_spec: platform: [windows-64-vsMulti-small] @@ -3092,14 +3137,15 @@ buildvariants: tasks: - name: "aws-auth-test-4.4" - name: "aws-auth-test-5.0" - - name: "aws-auth-test-latest" - name: "aws-auth-test-6.0" + - name: "aws-auth-test-7.0" - name: "aws-auth-test-rapid" + - name: "aws-auth-test-latest" - matrix_name: "load-balancer" matrix_spec: platform: rhel84 - mongodb-version: ["rapid", "latest", "6.0"] + mongodb-version: ["6.0", "7.0", "rapid", "latest"] auth-ssl: "*" python-version: "*" loadbalancer: "*" diff --git a/README.rst b/README.rst index bb409a94ff..cc2b79d842 100644 --- a/README.rst +++ b/README.rst @@ -16,7 +16,7 @@ is a `gridfs `_ implementation on top of ``pymongo``. -PyMongo supports MongoDB 3.6, 4.0, 4.2, 4.4, 5.0, and 6.0. +PyMongo supports MongoDB 3.6, 4.0, 4.2, 4.4, 5.0, 6.0, and 7.0. Support / Feedback ================== diff --git a/doc/changelog.rst b/doc/changelog.rst index 7b35e4cd61..a0d73eb4de 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,6 +4,7 @@ Changelog Changes in Version 4.4 ----------------------- +- Added support for MongoDB 7.0. - Added support for passing a list containing (key, direction) pairs or keys to :meth:`~pymongo.collection.Collection.create_index`. - pymongocrypt 1.5.0 or later is now required for client side field level From afd7e1c2cdeb7bf33a9e21036450ff0a56fcc39a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 11 May 2023 14:35:30 -0500 Subject: [PATCH 0389/1588] PYTHON-3460 Implement OIDC SASL mechanism (#1138) --- .evergreen/config.yml | 83 ++ .evergreen/resync-specs.sh | 3 + .evergreen/run-mongodb-oidc-test.sh | 85 ++ pymongo/auth.py | 62 +- pymongo/auth_oidc.py | 299 +++++++ pymongo/common.py | 40 +- pymongo/helpers.py | 35 + pymongo/message.py | 2 + pymongo/pool.py | 13 +- pymongo/server.py | 3 +- test/auth/{ => legacy}/connection-string.json | 129 ++- .../unified/reauthenticate_with_retry.json | 191 ++++ .../unified/reauthenticate_without_retry.json | 191 ++++ test/auth_aws/test_auth_oidc.py | 821 ++++++++++++++++++ test/test_auth_spec.py | 31 +- 15 files changed, 1970 insertions(+), 18 deletions(-) create mode 100755 .evergreen/run-mongodb-oidc-test.sh create mode 100644 pymongo/auth_oidc.py rename test/auth/{ => legacy}/connection-string.json (76%) create mode 100644 test/auth/unified/reauthenticate_with_retry.json create mode 100644 test/auth/unified/reauthenticate_without_retry.json create mode 100644 test/auth_aws/test_auth_oidc.py diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 8398244071..3f06fc1a03 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -749,6 +749,68 @@ functions: fi PYTHON_BINARY=${PYTHON_BINARY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh + "bootstrap oidc": + - command: ec2.assume_role + params: + role_arn: ${aws_test_secrets_role} + - command: shell.exec + type: test + params: + working_dir: "src" + shell: bash + script: | + ${PREPARE_SHELL} + if [ "${skip_EC2_auth_test}" = "true" ]; then + echo "This platform does not support the oidc auth test, skipping..." + exit 0 + fi + + cd ${DRIVERS_TOOLS}/.evergreen/auth_oidc + export AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} + export AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} + export AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN} + export OIDC_TOKEN_DIR=/tmp/tokens + + . ./activate-authoidcvenv.sh + python oidc_write_orchestration.py + python oidc_get_tokens.py + + "run oidc auth test with aws credentials": + - command: shell.exec + type: test + params: + working_dir: "src" + shell: bash + script: | + ${PREPARE_SHELL} + if [ "${skip_EC2_auth_test}" = "true" ]; then + echo "This platform does not support the oidc auth test, skipping..." + exit 0 + fi + cd ${DRIVERS_TOOLS}/.evergreen/auth_oidc + mongosh setup_oidc.js + - command: shell.exec + type: test + params: + working_dir: "src" + silent: true + script: | + # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) + cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_oidc.sh" + export OIDC_TOKEN_DIR=/tmp/tokens + EOF + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + if [ "${skip_web_identity_auth_test}" = "true" ]; then + echo "This platform does not support the oidc auth test, skipping..." + exit 0 + fi + PYTHON_BINARY=${PYTHON_BINARY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-oidc-test.sh + "run aws auth test with aws credentials as environment variables": - command: shell.exec type: test @@ -2034,6 +2096,19 @@ tasks: - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" + - name: "oidc-auth-test-latest" + commands: + - func: "bootstrap oidc" + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-oidc.json" + TOPOLOGY: "replica_set" + VERSION: "latest" + - func: "run oidc auth test with aws credentials" + vars: + AWS_WEB_IDENTITY_TOKEN_FILE: /tmp/tokens/test1 + - name: load-balancer-test commands: - func: "bootstrap mongo-orchestration" @@ -3103,6 +3178,14 @@ buildvariants: # macOS MongoDB servers do not staple OCSP responses and only support RSA. - name: ".ocsp-rsa !.ocsp-staple" +- matrix_name: "oidc-auth-test" + matrix_spec: + platform: [ ubuntu-20.04 ] + python-version: ["3.9"] + display_name: "MONGODB-OIDC Auth ${platform} ${python-version}" + tasks: + - name: "oidc-auth-test-latest" + - matrix_name: "aws-auth-test" matrix_spec: platform: [ubuntu-20.04] diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index 489ff28b3a..817a2d96bc 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -70,6 +70,9 @@ for spec in "$@" do # Match the spec dir name, the python test dir name, and/or common abbreviations. case "$spec" in + auth) + cpjson auth/tests/ auth + ;; atlas-data-lake-testing|data_lake) cpjson atlas-data-lake-testing/tests/ data_lake ;; diff --git a/.evergreen/run-mongodb-oidc-test.sh b/.evergreen/run-mongodb-oidc-test.sh new file mode 100755 index 0000000000..46bb779578 --- /dev/null +++ b/.evergreen/run-mongodb-oidc-test.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +set -o xtrace +set -o errexit # Exit the script with error if any of the commands fail + +############################################ +# Main Program # +############################################ + +# Supported/used environment variables: +# MONGODB_URI Set the URI, including an optional username/password to use +# to connect to the server via MONGODB-OIDC authentication +# mechanism. +# PYTHON_BINARY The Python version to use. + +echo "Running MONGODB-OIDC authentication tests" +# ensure no secrets are printed in log files +set +x + +# load the script +shopt -s expand_aliases # needed for `urlencode` alias +[ -s "${PROJECT_DIRECTORY}/prepare_mongodb_oidc.sh" ] && source "${PROJECT_DIRECTORY}/prepare_mongodb_oidc.sh" + +MONGODB_URI=${MONGODB_URI:-"mongodb://localhost"} +MONGODB_URI_SINGLE="${MONGODB_URI}/?authMechanism=MONGODB-OIDC" +MONGODB_URI_MULTIPLE="${MONGODB_URI}:27018/?authMechanism=MONGODB-OIDC&directConnection=true" + +if [ -z "${OIDC_TOKEN_DIR}" ]; then + echo "Must specify OIDC_TOKEN_DIR" + exit 1 +fi + +export MONGODB_URI_SINGLE="$MONGODB_URI_SINGLE" +export MONGODB_URI_MULTIPLE="$MONGODB_URI_MULTIPLE" +export MONGODB_URI="$MONGODB_URI" + +echo $MONGODB_URI_SINGLE +echo $MONGODB_URI_MULTIPLE +echo $MONGODB_URI + +if [ "$ASSERT_NO_URI_CREDS" = "true" ]; then + if echo "$MONGODB_URI" | grep -q "@"; then + echo "MONGODB_URI unexpectedly contains user credentials!"; + exit 1 + fi +fi + +# show test output +set -x + +# Workaround macOS python 3.9 incompatibility with system virtualenv. +if [ "$(uname -s)" = "Darwin" ]; then + VIRTUALENV="/Library/Frameworks/Python.framework/Versions/3.9/bin/python3 -m virtualenv" +else + VIRTUALENV=$(command -v virtualenv) +fi + +authtest () { + if [ "Windows_NT" = "$OS" ]; then + PYTHON=$(cygpath -m $PYTHON) + fi + + echo "Running MONGODB-OIDC authentication tests with $PYTHON" + $PYTHON --version + + $VIRTUALENV -p $PYTHON --never-download venvoidc + if [ "Windows_NT" = "$OS" ]; then + . venvoidc/Scripts/activate + else + . venvoidc/bin/activate + fi + python -m pip install -U pip setuptools + python -m pip install '.[aws]' + python test/auth_aws/test_auth_oidc.py -v + deactivate + rm -rf venvoidc +} + +PYTHON=${PYTHON_BINARY:-} +if [ -z "$PYTHON" ]; then + echo "Cannot test without specifying PYTHON_BINARY" + exit 1 +fi + +authtest diff --git a/pymongo/auth.py b/pymongo/auth.py index 3d259335b0..4bc31ee97b 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -27,6 +27,7 @@ from bson.binary import Binary from bson.son import SON from pymongo.auth_aws import _authenticate_aws +from pymongo.auth_oidc import _authenticate_oidc, _get_authenticator, _OIDCProperties from pymongo.errors import ConfigurationError, OperationFailure from pymongo.saslprep import saslprep @@ -48,6 +49,7 @@ [ "GSSAPI", "MONGODB-CR", + "MONGODB-OIDC", "MONGODB-X509", "MONGODB-AWS", "PLAIN", @@ -101,7 +103,7 @@ def __hash__(self): def _build_credentials_tuple(mech, source, user, passwd, extra, database): """Build and return a mechanism specific credentials tuple.""" - if mech not in ("MONGODB-X509", "MONGODB-AWS") and user is None: + if mech not in ("MONGODB-X509", "MONGODB-AWS", "MONGODB-OIDC") and user is None: raise ConfigurationError("%s requires a username." % (mech,)) if mech == "GSSAPI": if source is not None and source != "$external": @@ -137,6 +139,32 @@ def _build_credentials_tuple(mech, source, user, passwd, extra, database): aws_props = _AWSProperties(aws_session_token=aws_session_token) # user can be None for temporary link-local EC2 credentials. return MongoCredential(mech, "$external", user, passwd, aws_props, None) + elif mech == "MONGODB-OIDC": + properties = extra.get("authmechanismproperties", {}) + request_token_callback = properties.get("request_token_callback") + refresh_token_callback = properties.get("refresh_token_callback", None) + provider_name = properties.get("PROVIDER_NAME", "") + default_allowed = [ + "*.mongodb.net", + "*.mongodb-dev.net", + "*.mongodbgov.net", + "localhost", + "127.0.0.1", + "::1", + ] + allowed_hosts = properties.get("allowed_hosts", default_allowed) + if not request_token_callback and provider_name != "aws": + raise ConfigurationError( + "authentication with MONGODB-OIDC requires providing an request_token_callback or a provider_name of 'aws'" + ) + oidc_props = _OIDCProperties( + request_token_callback=request_token_callback, + refresh_token_callback=refresh_token_callback, + provider_name=provider_name, + allowed_hosts=allowed_hosts, + ) + return MongoCredential(mech, "$external", user, passwd, oidc_props, None) + elif mech == "PLAIN": source_database = source or database or "$external" return MongoCredential(mech, source_database, user, passwd, None, None) @@ -439,7 +467,7 @@ def _authenticate_x509(credentials, sock_info): # MONGODB-X509 is done after the speculative auth step. return - cmd = _X509Context(credentials).speculate_command() + cmd = _X509Context(credentials, sock_info.address).speculate_command() sock_info.command("$external", cmd) @@ -482,6 +510,7 @@ def _authenticate_default(credentials, sock_info): "MONGODB-CR": _authenticate_mongo_cr, "MONGODB-X509": _authenticate_x509, "MONGODB-AWS": _authenticate_aws, + "MONGODB-OIDC": _authenticate_oidc, "PLAIN": _authenticate_plain, "SCRAM-SHA-1": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-1"), "SCRAM-SHA-256": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-256"), @@ -490,15 +519,16 @@ def _authenticate_default(credentials, sock_info): class _AuthContext(object): - def __init__(self, credentials): + def __init__(self, credentials, address): self.credentials = credentials self.speculative_authenticate = None + self.address = address @staticmethod - def from_credentials(creds): + def from_credentials(creds, address): spec_cls = _SPECULATIVE_AUTH_MAP.get(creds.mechanism) if spec_cls: - return spec_cls(creds) + return spec_cls(creds, address) return None def speculate_command(self): @@ -512,8 +542,8 @@ def speculate_succeeded(self): class _ScramContext(_AuthContext): - def __init__(self, credentials, mechanism): - super(_ScramContext, self).__init__(credentials) + def __init__(self, credentials, address, mechanism): + super(_ScramContext, self).__init__(credentials, address) self.scram_data = None self.mechanism = mechanism @@ -534,16 +564,30 @@ def speculate_command(self): return cmd +class _OIDCContext(_AuthContext): + def speculate_command(self): + authenticator = _get_authenticator(self.credentials, self.address) + cmd = authenticator.auth_start_cmd(False) + if cmd is None: + return + cmd["db"] = self.credentials.source + return cmd + + _SPECULATIVE_AUTH_MAP: Mapping[str, Callable] = { "MONGODB-X509": _X509Context, "SCRAM-SHA-1": functools.partial(_ScramContext, mechanism="SCRAM-SHA-1"), "SCRAM-SHA-256": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), + "MONGODB-OIDC": _OIDCContext, "DEFAULT": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), } -def authenticate(credentials, sock_info): +def authenticate(credentials, sock_info, reauthenticate=False): """Authenticate sock_info.""" mechanism = credentials.mechanism auth_func = _AUTH_MAP[mechanism] - auth_func(credentials, sock_info) + if mechanism == "MONGODB-OIDC": + _authenticate_oidc(credentials, sock_info, reauthenticate) + else: + auth_func(credentials, sock_info) diff --git a/pymongo/auth_oidc.py b/pymongo/auth_oidc.py new file mode 100644 index 0000000000..530b1bb068 --- /dev/null +++ b/pymongo/auth_oidc.py @@ -0,0 +1,299 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MONGODB-OIDC Authentication helpers.""" +import os +import threading +from dataclasses import dataclass, field +from datetime import datetime, timedelta, timezone +from typing import Callable, Dict, List, Optional + +import bson +from bson.binary import Binary +from bson.son import SON +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.helpers import _REAUTHENTICATION_REQUIRED_CODE + + +@dataclass +class _OIDCProperties: + request_token_callback: Optional[Callable[..., Dict]] + refresh_token_callback: Optional[Callable[..., Dict]] + provider_name: Optional[str] + allowed_hosts: List[str] + + +"""Mechanism properties for MONGODB-OIDC authentication.""" + +TOKEN_BUFFER_MINUTES = 5 +CALLBACK_TIMEOUT_SECONDS = 5 * 60 +CACHE_TIMEOUT_MINUTES = 60 * 5 +CALLBACK_VERSION = 0 + +_CACHE: Dict[str, "_OIDCAuthenticator"] = {} + + +def _get_authenticator(credentials, address): + # Clear out old items in the cache. + now_utc = datetime.now(timezone.utc) + to_remove = [] + for key, value in _CACHE.items(): + if value.cache_exp_utc is not None and value.cache_exp_utc < now_utc: + to_remove.append(key) + for key in to_remove: + del _CACHE[key] + + # Extract values. + principal_name = credentials.username + properties = credentials.mechanism_properties + request_cb = properties.request_token_callback + refresh_cb = properties.refresh_token_callback + + # Validate that the address is allowed. + if not properties.provider_name: + found = False + allowed_hosts = properties.allowed_hosts + for patt in allowed_hosts: + if patt == address[0]: + found = True + elif patt.startswith("*.") and address[0].endswith(patt[1:]): + found = True + if not found: + raise ConfigurationError( + f"Refusing to connect to {address[0]}, which is not in authOIDCAllowedHosts: {allowed_hosts}" + ) + + # Get or create the cache item. + cache_key = f"{principal_name}{address[0]}{address[1]}{id(request_cb)}{id(refresh_cb)}" + _CACHE.setdefault(cache_key, _OIDCAuthenticator(username=principal_name, properties=properties)) + + return _CACHE[cache_key] + + +def _get_cache_exp(): + return datetime.now(timezone.utc) + timedelta(minutes=CACHE_TIMEOUT_MINUTES) + + +@dataclass +class _OIDCAuthenticator: + username: str + properties: _OIDCProperties + idp_info: Optional[Dict] = field(default=None) + idp_resp: Optional[Dict] = field(default=None) + reauth_gen_id: int = field(default=0) + idp_info_gen_id: int = field(default=0) + token_gen_id: int = field(default=0) + token_exp_utc: Optional[datetime] = field(default=None) + cache_exp_utc: datetime = field(default_factory=_get_cache_exp) + lock: threading.Lock = field(default_factory=threading.Lock) + + def get_current_token(self, use_callbacks=True): + properties = self.properties + + request_cb = properties.request_token_callback + refresh_cb = properties.refresh_token_callback + if not use_callbacks: + request_cb = None + refresh_cb = None + + current_valid_token = False + if self.token_exp_utc is not None: + now_utc = datetime.now(timezone.utc) + exp_utc = self.token_exp_utc + buffer_seconds = TOKEN_BUFFER_MINUTES * 60 + if (exp_utc - now_utc).total_seconds() >= buffer_seconds: + current_valid_token = True + + timeout = CALLBACK_TIMEOUT_SECONDS + + if not use_callbacks and not current_valid_token: + return None + + if not current_valid_token and request_cb is not None: + prev_token = self.idp_resp and self.idp_resp["access_token"] + with self.lock: + # See if the token was changed while we were waiting for the + # lock. + new_token = self.idp_resp and self.idp_resp["access_token"] + if new_token != prev_token: + return new_token + + refresh_token = self.idp_resp and self.idp_resp.get("refresh_token") + refresh_token = refresh_token or "" + context = dict( + timeout_seconds=timeout, + version=CALLBACK_VERSION, + refresh_token=refresh_token, + ) + + if self.idp_resp is None or refresh_cb is None: + self.idp_resp = request_cb(self.idp_info, context) + elif request_cb is not None: + self.idp_resp = refresh_cb(self.idp_info, context) + cache_exp_utc = datetime.now(timezone.utc) + timedelta( + minutes=CACHE_TIMEOUT_MINUTES + ) + self.cache_exp_utc = cache_exp_utc + self.token_gen_id += 1 + + token_result = self.idp_resp + + # Validate callback return value. + if not isinstance(token_result, dict): + raise ValueError("OIDC callback returned invalid result") + + if "access_token" not in token_result: + raise ValueError("OIDC callback did not return an access_token") + + expected = ["access_token", "expires_in_seconds", "refesh_token"] + for key in token_result: + if key not in expected: + raise ValueError(f'Unexpected field in callback result "{key}"') + + token = token_result["access_token"] + + if "expires_in_seconds" in token_result: + expires_in = int(token_result["expires_in_seconds"]) + buffer_seconds = TOKEN_BUFFER_MINUTES * 60 + if expires_in >= buffer_seconds: + now_utc = datetime.now(timezone.utc) + exp_utc = now_utc + timedelta(seconds=expires_in) + self.token_exp_utc = exp_utc + + return token + + def auth_start_cmd(self, use_callbacks=True): + properties = self.properties + + # Handle aws provider credentials. + if properties.provider_name == "aws": + aws_identity_file = os.environ["AWS_WEB_IDENTITY_TOKEN_FILE"] + with open(aws_identity_file) as fid: + token = fid.read().strip() + payload = dict(jwt=token) + cmd = SON( + [ + ("saslStart", 1), + ("mechanism", "MONGODB-OIDC"), + ("payload", Binary(bson.encode(payload))), + ] + ) + return cmd + + principal_name = self.username + + if self.idp_info is not None: + self.cache_exp_utc = datetime.now(timezone.utc) + timedelta( + minutes=CACHE_TIMEOUT_MINUTES + ) + + if self.idp_info is None: + self.cache_exp_utc = _get_cache_exp() + + if self.idp_info is None: + # Send the SASL start with the optional principal name. + payload = dict() + + if principal_name: + payload["n"] = principal_name + + cmd = SON( + [ + ("saslStart", 1), + ("mechanism", "MONGODB-OIDC"), + ("payload", Binary(bson.encode(payload))), + ("autoAuthorize", 1), + ] + ) + return cmd + + token = self.get_current_token(use_callbacks) + if not token: + return None + bin_payload = Binary(bson.encode(dict(jwt=token))) + return SON( + [ + ("saslStart", 1), + ("mechanism", "MONGODB-OIDC"), + ("payload", bin_payload), + ] + ) + + def clear(self): + self.idp_info = None + self.idp_resp = None + self.token_exp_utc = None + + def run_command(self, sock_info, cmd): + try: + return sock_info.command("$external", cmd, no_reauth=True) + except OperationFailure as exc: + self.clear() + if exc.code == _REAUTHENTICATION_REQUIRED_CODE: + if "jwt" in bson.decode(cmd["payload"]): # type:ignore[attr-defined] + if self.idp_info_gen_id > self.reauth_gen_id: + raise + return self.authenticate(sock_info, reauthenticate=True) + raise + + def authenticate(self, sock_info, reauthenticate=False): + if reauthenticate: + prev_id = getattr(sock_info, "oidc_token_gen_id", None) + # Check if we've already changed tokens. + if prev_id == self.token_gen_id: + self.reauth_gen_id = self.idp_info_gen_id + self.token_exp_utc = None + if not self.properties.refresh_token_callback: + self.clear() + + ctx = sock_info.auth_ctx + cmd = None + + if ctx and ctx.speculate_succeeded(): + resp = ctx.speculative_authenticate + else: + cmd = self.auth_start_cmd() + resp = self.run_command(sock_info, cmd) + + if resp["done"]: + sock_info.oidc_token_gen_id = self.token_gen_id + return + + server_resp: Dict = bson.decode(resp["payload"]) + if "issuer" in server_resp: + self.idp_info = server_resp + self.idp_info_gen_id += 1 + + conversation_id = resp["conversationId"] + token = self.get_current_token() + sock_info.oidc_token_gen_id = self.token_gen_id + bin_payload = Binary(bson.encode(dict(jwt=token))) + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", conversation_id), + ("payload", bin_payload), + ] + ) + resp = self.run_command(sock_info, cmd) + if not resp["done"]: + self.clear() + raise OperationFailure("SASL conversation failed to complete.") + return resp + + +def _authenticate_oidc(credentials, sock_info, reauthenticate): + """Authenticate using MONGODB-OIDC.""" + authenticator = _get_authenticator(credentials, sock_info.address) + return authenticator.authenticate(sock_info, reauthenticate=reauthenticate) diff --git a/pymongo/common.py b/pymongo/common.py index 4b8aeb020c..4e39c8e514 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -16,6 +16,7 @@ """Functions and classes common to multiple pymongo modules.""" import datetime +import inspect import warnings from collections import OrderedDict, abc from typing import ( @@ -416,14 +417,48 @@ def validate_read_preference_tags(name: str, value: Any) -> List[Dict[str, str]] _MECHANISM_PROPS = frozenset( - ["SERVICE_NAME", "CANONICALIZE_HOST_NAME", "SERVICE_REALM", "AWS_SESSION_TOKEN"] + [ + "SERVICE_NAME", + "CANONICALIZE_HOST_NAME", + "SERVICE_REALM", + "AWS_SESSION_TOKEN", + "PROVIDER_NAME", + ] ) def validate_auth_mechanism_properties(option: str, value: Any) -> Dict[str, Union[bool, str]]: """Validate authMechanismProperties.""" - value = validate_string(option, value) props: Dict[str, Any] = {} + if not isinstance(value, str): + if not isinstance(value, dict): + raise ValueError("Auth mechanism properties must be given as a string or a dictionary") + for key, value in value.items(): + if isinstance(value, str): + props[key] = value + elif isinstance(value, bool): + props[key] = str(value).lower() + elif key in ["allowed_hosts"] and isinstance(value, list): + props[key] = value + elif inspect.isfunction(value): + signature = inspect.signature(value) + if key == "request_token_callback": + expected_params = 2 + elif key == "refresh_token_callback": + expected_params = 2 + else: + raise ValueError(f"Unrecognized Auth mechanism function {key}") + if len(signature.parameters) != expected_params: + msg = f"{key} must accept {expected_params} parameters" + raise ValueError(msg) + props[key] = value + else: + raise ValueError( + "Auth mechanism property values must be strings or callback functions" + ) + return props + + value = validate_string(option, value) for opt in value.split(","): try: key, val = opt.split(":") @@ -715,6 +750,7 @@ def validate_datetime_conversion(option: Any, value: Any) -> Optional[DatetimeCo "password": validate_string_or_none, "server_selector": validate_is_callable_or_none, "auto_encryption_opts": validate_auto_encryption_opts_or_none, + "authoidcallowedhosts": validate_list, } # Dictionary where keys are any URI option name, and values are the diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 31325c8af2..1a753c66f4 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -68,6 +68,9 @@ ] ) +# Server code raised when re-authentication is required +_REAUTHENTICATION_REQUIRED_CODE = 391 + def _gen_index_name(keys): """Generate an index name from the set of fields it is over.""" @@ -267,3 +270,35 @@ def _handle_exception(): pass finally: del einfo + + +def _handle_reauth(func): + def inner(*args, **kwargs): + no_reauth = kwargs.pop("no_reauth", False) + from pymongo.pool import SocketInfo + + try: + return func(*args, **kwargs) + except OperationFailure as exc: + if no_reauth: + raise + if exc.code == _REAUTHENTICATION_REQUIRED_CODE: + # Look for an argument that either is a SocketInfo + # or has a socket_info attribute, so we can trigger + # a reauth. + sock_info = None + for arg in args: + if isinstance(arg, SocketInfo): + sock_info = arg + break + if hasattr(arg, "sock_info"): + sock_info = arg.sock_info + break + if sock_info: + sock_info.authenticate(reauthenticate=True) + else: + raise + return func(*args, **kwargs) + raise + + return inner diff --git a/pymongo/message.py b/pymongo/message.py index f7a173ca8a..3510d210a5 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -54,6 +54,7 @@ ProtocolError, ) from pymongo.hello import HelloCompat +from pymongo.helpers import _handle_reauth from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -909,6 +910,7 @@ def unack_write(self, cmd, request_id, msg, max_doc_size, docs): self.start_time = datetime.datetime.now() return result + @_handle_reauth def write_command(self, cmd, request_id, msg, docs): """A proxy for SocketInfo.write_command that handles event publishing.""" if self.publish: diff --git a/pymongo/pool.py b/pymongo/pool.py index 42e6a642a4..6ba1554231 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -57,6 +57,7 @@ _CertificateError, ) from pymongo.hello import Hello, HelloCompat +from pymongo.helpers import _handle_reauth from pymongo.lock import _create_lock from pymongo.monitoring import ConnectionCheckOutFailedReason, ConnectionClosedReason from pymongo.network import command, receive_message @@ -756,7 +757,7 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): if creds: if creds.mechanism == "DEFAULT" and creds.username: cmd["saslSupportedMechs"] = creds.source + "." + creds.username - auth_ctx = auth._AuthContext.from_credentials(creds) + auth_ctx = auth._AuthContext.from_credentials(creds, self.address) if auth_ctx: cmd["speculativeAuthenticate"] = auth_ctx.speculate_command() else: @@ -813,6 +814,7 @@ def _next_reply(self): helpers._check_command_response(response_doc, self.max_wire_version) return response_doc + @_handle_reauth def command( self, dbname, @@ -966,17 +968,22 @@ def write_command(self, request_id, msg, codec_options): helpers._check_command_response(result, self.max_wire_version) return result - def authenticate(self): + def authenticate(self, reauthenticate=False): """Authenticate to the server if needed. Can raise ConnectionFailure or OperationFailure. """ # CMAP spec says to publish the ready event only after authenticating # the connection. + if reauthenticate: + if self.performed_handshake: + # Existing auth_ctx is stale, remove it. + self.auth_ctx = None + self.ready = False if not self.ready: creds = self.opts._credentials if creds: - auth.authenticate(creds, self) + auth.authenticate(creds, self, reauthenticate=reauthenticate) self.ready = True if self.enabled_for_cmap: self.listeners.publish_connection_ready(self.address, self.id) diff --git a/pymongo/server.py b/pymongo/server.py index f26f473c32..16c905abb7 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -18,7 +18,7 @@ from bson import _decode_all_selective from pymongo.errors import NotPrimaryError, OperationFailure -from pymongo.helpers import _check_command_response +from pymongo.helpers import _check_command_response, _handle_reauth from pymongo.message import _convert_exception, _OpMsg from pymongo.response import PinnedResponse, Response @@ -73,6 +73,7 @@ def request_check(self): """Check the server's state soon.""" self._monitor.request_check() + @_handle_reauth def run_operation(self, sock_info, operation, read_preference, listeners, unpack_res): """Run a _Query or _GetMore operation and return a Response object. diff --git a/test/auth/connection-string.json b/test/auth/legacy/connection-string.json similarity index 76% rename from test/auth/connection-string.json rename to test/auth/legacy/connection-string.json index 2a37ae8df4..ca979010af 100644 --- a/test/auth/connection-string.json +++ b/test/auth/legacy/connection-string.json @@ -444,6 +444,133 @@ "AWS_SESSION_TOKEN": "token!@#$%^&*()_+" } } + }, + { + "description": "should recognise the mechanism and request callback (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC", + "callback": ["oidcRequest"], + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "REQUEST_TOKEN_CALLBACK": true + } + } + }, + { + "description": "should recognise the mechanism when auth source is explicitly specified and with request callback (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authSource=$external", + "callback": ["oidcRequest"], + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "REQUEST_TOKEN_CALLBACK": true + } + } + }, + { + "description": "should recognise the mechanism with request and refresh callback (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC", + "callback": ["oidcRequest", "oidcRefresh"], + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "REQUEST_TOKEN_CALLBACK": true, + "REFRESH_TOKEN_CALLBACK": true + } + } + }, + { + "description": "should recognise the mechanism and username with request callback (MONGODB-OIDC)", + "uri": "mongodb://principalName@localhost/?authMechanism=MONGODB-OIDC", + "callback": ["oidcRequest"], + "valid": true, + "credential": { + "username": "principalName", + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "REQUEST_TOKEN_CALLBACK": true + } + } + }, + { + "description": "should recognise the mechanism with aws device (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=PROVIDER_NAME:aws", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "PROVIDER_NAME": "aws" + } + } + }, + { + "description": "should recognise the mechanism when auth source is explicitly specified and with aws device (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authSource=$external&authMechanismProperties=PROVIDER_NAME:aws", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "PROVIDER_NAME": "aws" + } + } + }, + { + "description": "should throw an exception if username and password are specified (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC", + "callback": ["oidcRequest"], + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if username and deviceName are specified (MONGODB-OIDC)", + "uri": "mongodb://principalName@localhost/?authMechanism=MONGODB-OIDC&PROVIDER_NAME:gcp", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if specified deviceName is not supported (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=PROVIDER_NAME:unexisted", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if neither deviceName nor callbacks specified (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception when only refresh callback is specified (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC", + "callback": ["oidcRefresh"], + "valid": false, + "credential": null + }, + { + "description": "should throw an exception when unsupported auth property is specified (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=UnsupportedProperty:unexisted", + "valid": false, + "credential": null } ] -} +} \ No newline at end of file diff --git a/test/auth/unified/reauthenticate_with_retry.json b/test/auth/unified/reauthenticate_with_retry.json new file mode 100644 index 0000000000..ef110562ed --- /dev/null +++ b/test/auth/unified/reauthenticate_with_retry.json @@ -0,0 +1,191 @@ +{ + "description": "reauthenticate_with_retry", + "schemaVersion": "1.12", + "runOnRequirements": [ + { + "minServerVersion": "6.3", + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": { + "retryReads": true, + "retryWrites": true + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collName" + } + } + ], + "initialData": [ + { + "collectionName": "collName", + "databaseName": "db", + "documents": [] + } + ], + "tests": [ + { + "description": "Read command should reauthenticate when receive ReauthenticationRequired error code and retryReads=true", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {} + }, + "object": "collection0", + "expectResult": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Write command should reauthenticate when receive ReauthenticationRequired error code and retryWrites=true", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/auth/unified/reauthenticate_without_retry.json b/test/auth/unified/reauthenticate_without_retry.json new file mode 100644 index 0000000000..6fded47634 --- /dev/null +++ b/test/auth/unified/reauthenticate_without_retry.json @@ -0,0 +1,191 @@ +{ + "description": "reauthenticate_without_retry", + "schemaVersion": "1.12", + "runOnRequirements": [ + { + "minServerVersion": "6.3", + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": { + "retryReads": false, + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collName" + } + } + ], + "initialData": [ + { + "collectionName": "collName", + "databaseName": "db", + "documents": [] + } + ], + "tests": [ + { + "description": "Read command should reauthenticate when receive ReauthenticationRequired error code and retryReads=false", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {} + }, + "object": "collection0", + "expectResult": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Write command should reauthenticate when receive ReauthenticationRequired error code and retryWrites=false", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/auth_aws/test_auth_oidc.py b/test/auth_aws/test_auth_oidc.py new file mode 100644 index 0000000000..470e4581c2 --- /dev/null +++ b/test/auth_aws/test_auth_oidc.py @@ -0,0 +1,821 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test MONGODB-OIDC Authentication.""" + +import os +import sys +import threading +import time +import unittest +from contextlib import contextmanager +from typing import Dict + +sys.path[0:0] = [""] + +from test.utils import EventListener + +from bson import SON +from pymongo import MongoClient +from pymongo.auth_oidc import _CACHE as _oidc_cache +from pymongo.cursor import CursorType +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.hello import HelloCompat +from pymongo.operations import InsertOne + + +class TestAuthOIDC(unittest.TestCase): + uri: str + + @classmethod + def setUpClass(cls): + cls.uri_single = os.environ["MONGODB_URI_SINGLE"] + cls.uri_multiple = os.environ["MONGODB_URI_MULTIPLE"] + cls.uri_admin = os.environ["MONGODB_URI"] + cls.token_dir = os.environ["OIDC_TOKEN_DIR"] + + def setUp(self): + self.request_called = 0 + self.refresh_called = 0 + _oidc_cache.clear() + os.environ["AWS_WEB_IDENTITY_TOKEN_FILE"] = os.path.join(self.token_dir, "test_user1") + + def create_request_cb(self, username="test_user1", expires_in_seconds=None, sleep=0): + + token_file = os.path.join(self.token_dir, username) + + def request_token(server_info, context): + # Validate the info. + self.assertIn("issuer", server_info) + self.assertIn("clientId", server_info) + + # Validate the timeout. + timeout_seconds = context["timeout_seconds"] + self.assertEqual(timeout_seconds, 60 * 5) + with open(token_file) as fid: + token = fid.read() + resp = dict(access_token=token) + + time.sleep(sleep) + + if expires_in_seconds is not None: + resp["expires_in_seconds"] = expires_in_seconds + self.request_called += 1 + return resp + + return request_token + + def create_refresh_cb(self, username="test_user1", expires_in_seconds=None): + + token_file = os.path.join(self.token_dir, username) + + def refresh_token(server_info, context): + with open(token_file) as fid: + token = fid.read() + + # Validate the info. + self.assertIn("issuer", server_info) + self.assertIn("clientId", server_info) + + # Validate the creds + self.assertIsNotNone(context["refresh_token"]) + + # Validate the timeout. + self.assertEqual(context["timeout_seconds"], 60 * 5) + + resp = dict(access_token=token) + if expires_in_seconds is not None: + resp["expires_in_seconds"] = expires_in_seconds + self.refresh_called += 1 + return resp + + return refresh_token + + @contextmanager + def fail_point(self, command_args): + cmd_on = SON([("configureFailPoint", "failCommand")]) + cmd_on.update(command_args) + client = MongoClient(self.uri_admin) + client.admin.command(cmd_on) + try: + yield + finally: + client.admin.command("configureFailPoint", cmd_on["configureFailPoint"], mode="off") + + def test_connect_callbacks_single_implicit_username(self): + request_token = self.create_request_cb() + props: Dict = dict(request_token_callback=request_token) + client = MongoClient(self.uri_single, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + def test_connect_callbacks_single_explicit_username(self): + request_token = self.create_request_cb() + props: Dict = dict(request_token_callback=request_token) + client = MongoClient(self.uri_single, username="test_user1", authmechanismproperties=props) + client.test.test.find_one() + client.close() + + def test_connect_callbacks_multiple_principal_user1(self): + request_token = self.create_request_cb() + props: Dict = dict(request_token_callback=request_token) + client = MongoClient( + self.uri_multiple, username="test_user1", authmechanismproperties=props + ) + client.test.test.find_one() + client.close() + + def test_connect_callbacks_multiple_principal_user2(self): + request_token = self.create_request_cb("test_user2") + props: Dict = dict(request_token_callback=request_token) + client = MongoClient( + self.uri_multiple, username="test_user2", authmechanismproperties=props + ) + client.test.test.find_one() + client.close() + + def test_connect_callbacks_multiple_no_username(self): + request_token = self.create_request_cb() + props: Dict = dict(request_token_callback=request_token) + client = MongoClient(self.uri_multiple, authmechanismproperties=props) + with self.assertRaises(OperationFailure): + client.test.test.find_one() + client.close() + + def test_allowed_hosts_blocked(self): + request_token = self.create_request_cb() + props: Dict = dict(request_token_callback=request_token, allowed_hosts=[]) + client = MongoClient(self.uri_single, authmechanismproperties=props) + with self.assertRaises(ConfigurationError): + client.test.test.find_one() + client.close() + + props: Dict = dict(request_token_callback=request_token, allowed_hosts=["example.com"]) + client = MongoClient( + self.uri_single + "&ignored=example.com", authmechanismproperties=props, connect=False + ) + with self.assertRaises(ConfigurationError): + client.test.test.find_one() + client.close() + + def test_connect_aws_single_principal(self): + props = dict(PROVIDER_NAME="aws") + client = MongoClient(self.uri_single, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + def test_connect_aws_multiple_principal_user1(self): + props = dict(PROVIDER_NAME="aws") + client = MongoClient(self.uri_multiple, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + def test_connect_aws_multiple_principal_user2(self): + os.environ["AWS_WEB_IDENTITY_TOKEN_FILE"] = os.path.join(self.token_dir, "test_user2") + props = dict(PROVIDER_NAME="aws") + client = MongoClient(self.uri_multiple, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + def test_connect_aws_allowed_hosts_ignored(self): + props = dict(PROVIDER_NAME="aws", allowed_hosts=[]) + client = MongoClient(self.uri_multiple, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + def test_valid_callbacks(self): + request_cb = self.create_request_cb(expires_in_seconds=60) + refresh_cb = self.create_refresh_cb() + + props: Dict = dict( + request_token_callback=request_cb, + refresh_token_callback=refresh_cb, + ) + client = MongoClient(self.uri_single, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + client = MongoClient(self.uri_single, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + def test_lock_avoids_extra_callbacks(self): + request_cb = self.create_request_cb(sleep=0.5) + refresh_cb = self.create_refresh_cb() + + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + + def run_test(): + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + t1 = threading.Thread(target=run_test) + t2 = threading.Thread(target=run_test) + t1.start() + t2.start() + t1.join() + t2.join() + + self.assertEqual(self.request_called, 1) + self.assertEqual(self.refresh_called, 2) + + def test_request_callback_returns_null(self): + def request_token_null(a, b): + return None + + props: Dict = dict(request_token_callback=request_token_null) + client = MongoClient(self.uri_single, authMechanismProperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_refresh_callback_returns_null(self): + request_cb = self.create_request_cb(expires_in_seconds=60) + + def refresh_token_null(a, b): + return None + + props: Dict = dict( + request_token_callback=request_cb, refresh_token_callback=refresh_token_null + ) + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + client = MongoClient(self.uri_single, authMechanismProperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_request_callback_invalid_result(self): + def request_token_invalid(a, b): + return dict() + + props: Dict = dict(request_token_callback=request_token_invalid) + client = MongoClient(self.uri_single, authMechanismProperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def request_cb_extra_value(server_info, context): + result = self.create_request_cb()(server_info, context) + result["foo"] = "bar" + return result + + props: Dict = dict(request_token_callback=request_cb_extra_value) + client = MongoClient(self.uri_single, authMechanismProperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_refresh_callback_missing_data(self): + request_cb = self.create_request_cb(expires_in_seconds=60) + + def refresh_cb_no_token(a, b): + return dict() + + props: Dict = dict( + request_token_callback=request_cb, refresh_token_callback=refresh_cb_no_token + ) + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + client = MongoClient(self.uri_single, authMechanismProperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_refresh_callback_extra_data(self): + request_cb = self.create_request_cb(expires_in_seconds=60) + + def refresh_cb_extra_value(server_info, context): + result = self.create_refresh_cb()(server_info, context) + result["foo"] = "bar" + return result + + props: Dict = dict( + request_token_callback=request_cb, refresh_token_callback=refresh_cb_extra_value + ) + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + client = MongoClient(self.uri_single, authMechanismProperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_cache_with_refresh(self): + # Create a new client with a request callback and a refresh callback. Both callbacks will read the contents of the ``AWS_WEB_IDENTITY_TOKEN_FILE`` location to obtain a valid access token. + + # Give a callback response with a valid accessToken and an expiresInSeconds that is within one minute. + request_cb = self.create_request_cb(expires_in_seconds=60) + refresh_cb = self.create_refresh_cb() + + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + + # Ensure that a ``find`` operation adds credentials to the cache. + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + self.assertEqual(len(_oidc_cache), 1) + + # Create a new client with the same request callback and a refresh callback. + # Ensure that a ``find`` operation results in a call to the refresh callback. + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + self.assertEqual(self.refresh_called, 1) + self.assertEqual(len(_oidc_cache), 1) + + def test_cache_with_no_refresh(self): + # Create a new client with a request callback callback. + # Give a callback response with a valid accessToken and an expiresInSeconds that is within one minute. + request_cb = self.create_request_cb() + + props = dict(request_token_callback=request_cb) + client = MongoClient(self.uri_single, authMechanismProperties=props) + + # Ensure that a ``find`` operation adds credentials to the cache. + self.request_called = 0 + client.test.test.find_one() + client.close() + self.assertEqual(self.request_called, 1) + self.assertEqual(len(_oidc_cache), 1) + + # Create a new client with the same request callback. + # Ensure that a ``find`` operation results in a call to the request callback. + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + self.assertEqual(self.request_called, 2) + self.assertEqual(len(_oidc_cache), 1) + + def test_cache_key_includes_callback(self): + request_cb = self.create_request_cb() + + props: Dict = dict(request_token_callback=request_cb) + + # Ensure that a ``find`` operation adds a new entry to the cache. + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + # Create a new client with a different request callback. + def request_token_2(a, b): + return request_cb(a, b) + + props["request_token_callback"] = request_token_2 + client = MongoClient(self.uri_single, authMechanismProperties=props) + + # Ensure that a ``find`` operation adds a new entry to the cache. + client.test.test.find_one() + client.close() + self.assertEqual(len(_oidc_cache), 2) + + def test_cache_clears_on_error(self): + request_cb = self.create_request_cb() + + # Create a new client with a valid request callback that gives credentials that expire within 5 minutes and a refresh callback that gives invalid credentials. + def refresh_cb(a, b): + return dict(access_token="bad") + + # Add a token to the cache that will expire soon. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + # Create a new client with the same callbacks. + client = MongoClient(self.uri_single, authMechanismProperties=props) + + # Ensure that another ``find`` operation results in an error. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + + client.close() + + # Ensure that the cache has been cleared. + authenticator = list(_oidc_cache.values())[0] + self.assertIsNone(authenticator.idp_info) + + def test_cache_is_not_used_in_aws_automatic_workflow(self): + # Create a new client using the AWS device workflow. + # Ensure that a ``find`` operation does not add credentials to the cache. + props = dict(PROVIDER_NAME="aws") + client = MongoClient(self.uri_single, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + # Ensure that the cache has been cleared. + authenticator = list(_oidc_cache.values())[0] + self.assertIsNone(authenticator.idp_info) + + def test_speculative_auth_success(self): + # Clear the cache + _oidc_cache.clear() + token_file = os.path.join(self.token_dir, "test_user1") + + def request_token(a, b): + with open(token_file) as fid: + token = fid.read() + return dict(access_token=token, expires_in_seconds=1000) + + # Create a client with a request callback that returns a valid token + # that will not expire soon. + props: Dict = dict(request_token_callback=request_token) + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Set a fail point for saslStart commands. + with self.fail_point( + { + "mode": {"times": 2}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a find operation. + client.test.test.find_one() + + # Close the client. + client.close() + + # Create a new client. + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Set a fail point for saslStart commands. + with self.fail_point( + { + "mode": {"times": 2}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a find operation. + client.test.test.find_one() + + # Close the client. + client.close() + + def test_reauthenticate_succeeds(self): + listener = EventListener() + + # Create request and refresh callbacks that return valid credentials + # that will not expire soon. + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient( + self.uri_single, event_listeners=[listener], authmechanismproperties=props + ) + + # Perform a find operation. + client.test.test.find_one() + + # Assert that the refresh callback has not been called. + self.assertEqual(self.refresh_called, 0) + + listener.reset() + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation. + client.test.test.find_one() + + started_events = [ + i.command_name for i in listener.started_events if not i.command_name.startswith("sasl") + ] + succeeded_events = [ + i.command_name + for i in listener.succeeded_events + if not i.command_name.startswith("sasl") + ] + failed_events = [ + i.command_name for i in listener.failed_events if not i.command_name.startswith("sasl") + ] + + self.assertEqual( + started_events, + [ + "find", + "find", + ], + ) + self.assertEqual(succeeded_events, ["find"]) + self.assertEqual(failed_events, ["find"]) + + # Assert that the refresh callback has been called. + self.assertEqual(self.refresh_called, 1) + client.close() + + def test_reauthenticate_succeeds_bulk_write(self): + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform a find operation. + client.test.test.find_one() + + # Assert that the refresh callback has not been called. + self.assertEqual(self.refresh_called, 0) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform a bulk write operation. + client.test.test.bulk_write([InsertOne({})]) + + # Assert that the refresh callback has been called. + self.assertEqual(self.refresh_called, 1) + client.close() + + def test_reauthenticate_succeeds_bulk_read(self): + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform a find operation. + client.test.test.find_one() + + # Perform a bulk write operation. + client.test.test.bulk_write([InsertOne({})]) + + # Assert that the refresh callback has not been called. + self.assertEqual(self.refresh_called, 0) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a bulk read operation. + cursor = client.test.test.find_raw_batches({}) + list(cursor) + + # Assert that the refresh callback has been called. + self.assertEqual(self.refresh_called, 1) + client.close() + + def test_reauthenticate_succeeds_cursor(self): + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform an insert operation. + client.test.test.insert_one({"a": 1}) + + # Assert that the refresh callback has not been called. + self.assertEqual(self.refresh_called, 0) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}) + self.assertGreaterEqual(len(list(cursor)), 1) + + # Assert that the refresh callback has been called. + self.assertEqual(self.refresh_called, 1) + client.close() + + def test_reauthenticate_succeeds_get_more(self): + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform an insert operation. + client.test.test.insert_many([{"a": 1}, {"a": 1}]) + + # Assert that the refresh callback has not been called. + self.assertEqual(self.refresh_called, 0) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}, batch_size=1) + self.assertGreaterEqual(len(list(cursor)), 1) + + # Assert that the refresh callback has been called. + self.assertEqual(self.refresh_called, 1) + client.close() + + def test_reauthenticate_succeeds_get_more_exhaust(self): + # Ensure no mongos + props = dict(PROVIDER_NAME="aws") + client = MongoClient(self.uri_single, authmechanismproperties=props) + hello = client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get("msg") != "isdbgrid": + raise unittest.SkipTest("Must not be a mongos") + + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform an insert operation. + client.test.test.insert_many([{"a": 1}, {"a": 1}]) + + # Assert that the refresh callback has not been called. + self.assertEqual(self.refresh_called, 0) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}, batch_size=1, cursor_type=CursorType.EXHAUST) + self.assertGreaterEqual(len(list(cursor)), 1) + + # Assert that the refresh callback has been called. + self.assertEqual(self.refresh_called, 1) + client.close() + + def test_reauthenticate_succeeds_command(self): + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + + print("start of test") + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform an insert operation. + client.test.test.insert_one({"a": 1}) + + # Assert that the refresh callback has not been called. + self.assertEqual(self.refresh_called, 0) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["count"], "errorCode": 391}, + } + ): + # Perform a count operation. + cursor = client.test.command(dict(count="test")) + + self.assertGreaterEqual(len(list(cursor)), 1) + + # Assert that the refresh callback has been called. + self.assertEqual(self.refresh_called, 1) + client.close() + + def test_reauthenticate_retries_and_succeeds_with_cache(self): + listener = EventListener() + + # Create request and refresh callbacks that return valid credentials + # that will not expire soon. + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient( + self.uri_single, event_listeners=[listener], authmechanismproperties=props + ) + + # Perform a find operation. + client.test.test.find_one() + + # Set a fail point for ``saslStart`` commands of the form + with self.fail_point( + { + "mode": {"times": 2}, + "data": {"failCommands": ["find", "saslStart"], "errorCode": 391}, + } + ): + # Perform a find operation that succeeds. + client.test.test.find_one() + + # Close the client. + client.close() + + def test_reauthenticate_fails_with_no_cache(self): + listener = EventListener() + + # Create request and refresh callbacks that return valid credentials + # that will not expire soon. + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient( + self.uri_single, event_listeners=[listener], authmechanismproperties=props + ) + + # Perform a find operation. + client.test.test.find_one() + + # Clear the cache. + _oidc_cache.clear() + + with self.fail_point( + { + "mode": {"times": 2}, + "data": {"failCommands": ["find", "saslStart"], "errorCode": 391}, + } + ): + # Perform a find operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + + client.close() + + def test_late_reauth_avoids_callback(self): + # Step 1: connect with both clients + request_cb = self.create_request_cb(expires_in_seconds=1e6) + refresh_cb = self.create_refresh_cb(expires_in_seconds=1e6) + + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client1 = MongoClient(self.uri_single, authMechanismProperties=props) + client1.test.test.find_one() + client2 = MongoClient(self.uri_single, authMechanismProperties=props) + client2.test.test.find_one() + + self.assertEqual(self.refresh_called, 0) + self.assertEqual(self.request_called, 1) + + # Step 2: cause a find 391 on the first client + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that succeeds. + client1.test.test.find_one() + + self.assertEqual(self.refresh_called, 1) + self.assertEqual(self.request_called, 1) + + # Step 3: cause a find 391 on the second client + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that succeeds. + client2.test.test.find_one() + + self.assertEqual(self.refresh_called, 1) + self.assertEqual(self.request_called, 1) + + client1.close() + client2.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index 9f2fa374ac..78f4d21929 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -22,6 +22,7 @@ sys.path[0:0] = [""] from test import unittest +from test.unified_format import generate_test_classes from pymongo import MongoClient @@ -41,7 +42,16 @@ def run_test(self): if not valid: self.assertRaises(Exception, MongoClient, uri, connect=False) else: - client = MongoClient(uri, connect=False) + props = {} + if credential: + props = credential["mechanism_properties"] or {} + if props.get("REQUEST_TOKEN_CALLBACK"): + props["request_token_callback"] = lambda x, y: 1 + del props["REQUEST_TOKEN_CALLBACK"] + if props.get("REFRESH_TOKEN_CALLBACK"): + props["refresh_token_callback"] = lambda a, b: 1 + del props["REFRESH_TOKEN_CALLBACK"] + client = MongoClient(uri, connect=False, authmechanismproperties=props) credentials = client.options.pool_options._credentials if credential is None: self.assertIsNone(credentials) @@ -70,6 +80,16 @@ def run_test(self): self.assertEqual( actual.aws_session_token, expected["AWS_SESSION_TOKEN"] ) + elif "PROVIDER_NAME" in expected: + self.assertEqual(actual.provider_name, expected["PROVIDER_NAME"]) + elif "request_token_callback" in expected: + self.assertEqual( + actual.request_token_callback, expected["request_token_callback"] + ) + elif "refresh_token_callback" in expected: + self.assertEqual( + actual.refresh_token_callback, expected["refresh_token_callback"] + ) else: self.fail("Unhandled property: %s" % (key,)) else: @@ -82,7 +102,7 @@ def run_test(self): def create_tests(): - for filename in glob.glob(os.path.join(_TEST_PATH, "*.json")): + for filename in glob.glob(os.path.join(_TEST_PATH, "legacy", "*.json")): test_suffix, _ = os.path.splitext(os.path.basename(filename)) with open(filename) as auth_tests: test_cases = json.load(auth_tests)["tests"] @@ -97,5 +117,12 @@ def create_tests(): create_tests() +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) + if __name__ == "__main__": unittest.main() From 0092b0af79378abf35b6db73a082ecb91af1d973 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 11 May 2023 15:27:17 -0700 Subject: [PATCH 0390/1588] PYTHON-2504 Run pyupgrade 3.4.0 and ruff 0.0.265 (#1196) pyupgrade --py37-plus bson/*.py pymongo/*.py gridfs/*.py test/*.py tools/*.py test/*/*.py ruff --fix-only --select ALL --fixable ALL --target-version py37 --line-length=100 --unfixable COM812,D400,D415,ERA001,RUF100,SIM108,D211,D212,SIM105,SIM,PT,ANN204,EM bson/*.py pymongo/*.py gridfs/*.py test/*.py test/*/*.py --- bson/__init__.py | 17 +- bson/_helpers.py | 4 +- bson/binary.py | 11 +- bson/code.py | 7 +- bson/codec_options.py | 24 +-- bson/dbref.py | 10 +- bson/decimal128.py | 6 +- bson/json_util.py | 71 ++++---- bson/max_key.py | 5 +- bson/min_key.py | 5 +- bson/objectid.py | 16 +- bson/raw_bson.py | 4 +- bson/regex.py | 5 +- bson/son.py | 16 +- bson/timestamp.py | 7 +- gridfs/__init__.py | 5 +- gridfs/grid_file.py | 18 +- pymongo/_csot.py | 2 +- pymongo/aggregation.py | 6 +- pymongo/auth.py | 16 +- pymongo/auth_aws.py | 6 +- pymongo/auth_oidc.py | 20 +-- pymongo/bulk.py | 5 +- pymongo/change_stream.py | 7 +- pymongo/client_options.py | 2 +- pymongo/client_session.py | 33 ++-- pymongo/collation.py | 14 +- pymongo/collection.py | 29 ++-- pymongo/command_cursor.py | 8 +- pymongo/common.py | 92 +++++----- pymongo/compression_support.py | 18 +- pymongo/cursor.py | 14 +- pymongo/database.py | 21 ++- pymongo/driver_info.py | 6 +- pymongo/encryption.py | 8 +- pymongo/encryption_options.py | 2 +- pymongo/errors.py | 24 +-- pymongo/helpers.py | 10 +- pymongo/message.py | 32 ++-- pymongo/mongo_client.py | 21 +-- pymongo/monitor.py | 8 +- pymongo/monitoring.py | 135 +++++++-------- pymongo/network.py | 14 +- pymongo/ocsp_cache.py | 2 +- pymongo/operations.py | 22 +-- pymongo/periodic_executor.py | 4 +- pymongo/pool.py | 44 +++-- pymongo/pyopenssl_context.py | 21 +-- pymongo/read_concern.py | 5 +- pymongo/read_preferences.py | 35 ++-- pymongo/response.py | 9 +- pymongo/results.py | 23 ++- pymongo/saslprep.py | 6 +- pymongo/server.py | 4 +- pymongo/server_api.py | 8 +- pymongo/server_description.py | 6 +- pymongo/server_selectors.py | 3 +- pymongo/settings.py | 4 +- pymongo/socket_checker.py | 4 +- pymongo/srv_resolver.py | 6 +- pymongo/ssl_support.py | 2 +- pymongo/topology.py | 18 +- pymongo/topology_description.py | 14 +- pymongo/typings.py | 3 +- pymongo/uri_parser.py | 19 +-- pymongo/write_concern.py | 6 +- test/__init__.py | 79 ++++----- test/atlas/test_connection.py | 2 +- test/auth_aws/test_auth_aws.py | 14 +- test/auth_aws/test_auth_oidc.py | 107 ++++++------ test/crud_v2_format.py | 4 +- test/mockupdb/operations.py | 2 +- test/mockupdb/test_handshake.py | 1 + test/mockupdb/test_mixed_version_sharded.py | 2 +- .../mockupdb/test_mongos_command_read_mode.py | 2 +- .../test_network_disconnect_primary.py | 2 +- test/mockupdb/test_op_msg.py | 2 +- test/mockupdb/test_op_msg_read_preference.py | 16 +- test/mockupdb/test_reset_and_request_check.py | 4 +- test/mockupdb/test_slave_okay_sharded.py | 6 +- test/mockupdb/test_slave_okay_single.py | 2 +- test/mod_wsgi_test/test_client.py | 14 +- test/ocsp/test_ocsp.py | 2 +- test/performance/perf_test.py | 39 ++--- test/pymongo_mocks.py | 8 +- test/qcheck.py | 7 +- test/sigstop_sigcont.py | 2 +- test/test_auth.py | 32 ++-- test/test_auth_spec.py | 6 +- test/test_binary.py | 14 +- test/test_bson.py | 11 +- test/test_bulk.py | 14 +- test/test_change_stream.py | 36 ++-- test/test_client.py | 44 +++-- test/test_client_context.py | 10 +- test/test_cmap.py | 16 +- test/test_code.py | 3 +- test/test_collation.py | 6 +- test/test_collection.py | 14 +- test/test_comment.py | 2 +- test/test_common.py | 8 +- ...nnections_survive_primary_stepdown_spec.py | 2 +- test/test_crud_v1.py | 2 +- test/test_cursor.py | 6 +- test/test_custom_types.py | 32 ++-- test/test_data_lake.py | 4 +- test/test_database.py | 13 +- test/test_dbref.py | 2 +- test/test_discovery_and_monitoring.py | 10 +- test/test_encryption.py | 62 +++---- test/test_examples.py | 32 ++-- test/test_grid_file.py | 12 +- test/test_gridfs.py | 14 +- test/test_gridfs_bucket.py | 12 +- test/test_load_balancer.py | 2 +- test/test_mongos_load_balancing.py | 14 +- test/test_monitor.py | 6 +- test/test_monitoring.py | 84 +++++----- test/test_on_demand_csfle.py | 8 +- test/test_pooling.py | 18 +- test/test_read_concern.py | 8 +- test/test_read_preferences.py | 35 ++-- test/test_read_write_concern_spec.py | 8 +- test/test_replica_set_reconfig.py | 12 +- test/test_retryable_reads.py | 10 +- test/test_retryable_writes.py | 46 ++--- test/test_sdam_monitoring_spec.py | 30 ++-- test/test_server_selection.py | 4 +- test/test_server_selection_in_window.py | 4 +- test/test_server_selection_rtt.py | 2 +- test/test_session.py | 55 +++--- test/test_son.py | 28 ++-- test/test_srv_polling.py | 2 +- test/test_ssl.py | 4 +- test/test_threads.py | 2 +- test/test_topology.py | 8 +- test/test_transactions.py | 12 +- test/test_typing.py | 13 +- test/test_uri_parser.py | 2 +- test/test_uri_spec.py | 17 +- test/test_write_concern.py | 2 +- test/unified_format.py | 157 +++++++++--------- test/utils.py | 60 +++---- test/utils_selection_tests.py | 10 +- test/utils_spec_runner.py | 22 +-- test/version.py | 2 +- 146 files changed, 1234 insertions(+), 1241 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index d95c511fc7..d0a8daa273 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -237,8 +237,8 @@ def get_data_and_view(data: Any) -> Tuple[Any, memoryview]: def _raise_unknown_type(element_type: int, element_name: str) -> NoReturn: """Unknown type helper.""" raise InvalidBSON( - "Detected unknown BSON type %r for fieldname '%s'. Are " - "you using the latest driver version?" % (chr(element_type).encode(), element_name) + "Detected unknown BSON type {!r} for fieldname '{}'. Are " + "you using the latest driver version?".format(chr(element_type).encode(), element_name) ) @@ -626,8 +626,7 @@ def gen_list_name() -> Generator[bytes, None, None]: The first 1000 keys are returned from a pre-built cache. All subsequent keys are generated on the fly. """ - for name in _LIST_NAMES: - yield name + yield from _LIST_NAMES counter = itertools.count(1000) while True: @@ -942,18 +941,18 @@ def _name_value_to_bson( name, fallback_encoder(value), check_keys, opts, in_fallback_call=True ) - raise InvalidDocument("cannot encode object: %r, of type: %r" % (value, type(value))) + raise InvalidDocument(f"cannot encode object: {value!r}, of type: {type(value)!r}") def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: CodecOptions) -> bytes: """Encode a single key, value pair.""" if not isinstance(key, str): - raise InvalidDocument("documents must have only string keys, key was %r" % (key,)) + raise InvalidDocument(f"documents must have only string keys, key was {key!r}") if check_keys: if key.startswith("$"): - raise InvalidDocument("key %r must not start with '$'" % (key,)) + raise InvalidDocument(f"key {key!r} must not start with '$'") if "." in key: - raise InvalidDocument("key %r must not contain '.'" % (key,)) + raise InvalidDocument(f"key {key!r} must not contain '.'") name = _make_name(key) return _name_value_to_bson(name, value, check_keys, opts) @@ -971,7 +970,7 @@ def _dict_to_bson(doc: Any, check_keys: bool, opts: CodecOptions, top_level: boo if not top_level or key != "_id": elements.append(_element_to_bson(key, value, check_keys, opts)) except AttributeError: - raise TypeError("encoder expected a mapping type but got: %r" % (doc,)) + raise TypeError(f"encoder expected a mapping type but got: {doc!r}") encoded = b"".join(elements) return _PACK_INT(len(encoded) + 5) + encoded + b"\x00" diff --git a/bson/_helpers.py b/bson/_helpers.py index ee3b0f1099..5643d77c24 100644 --- a/bson/_helpers.py +++ b/bson/_helpers.py @@ -13,7 +13,7 @@ # limitations under the License. """Setstate and getstate functions for objects with __slots__, allowing - compatibility with default pickling protocol +compatibility with default pickling protocol """ from typing import Any, Mapping @@ -33,7 +33,7 @@ def _mangle_name(name: str, prefix: str) -> str: def _getstate_slots(self: Any) -> Mapping[Any, Any]: prefix = self.__class__.__name__ - ret = dict() + ret = {} for name in self.__slots__: mangled_name = _mangle_name(name, prefix) if hasattr(self, mangled_name): diff --git a/bson/binary.py b/bson/binary.py index a270eae8d2..77e3a3d478 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -306,7 +306,7 @@ def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUI .. versionadded:: 3.11 """ if self.subtype not in ALL_UUID_SUBTYPES: - raise ValueError("cannot decode subtype %s as a uuid" % (self.subtype,)) + raise ValueError(f"cannot decode subtype {self.subtype} as a uuid") if uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError( @@ -330,8 +330,7 @@ def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUI return UUID(bytes=self) raise ValueError( - "cannot decode subtype %s to %s" - % (self.subtype, UUID_REPRESENTATION_NAMES[uuid_representation]) + f"cannot decode subtype {self.subtype} to {UUID_REPRESENTATION_NAMES[uuid_representation]}" ) @property @@ -341,7 +340,7 @@ def subtype(self) -> int: def __getnewargs__(self) -> Tuple[bytes, int]: # type: ignore[override] # Work around http://bugs.python.org/issue7382 - data = super(Binary, self).__getnewargs__()[0] + data = super().__getnewargs__()[0] if not isinstance(data, bytes): data = data.encode("latin-1") return data, self.__subtype @@ -355,10 +354,10 @@ def __eq__(self, other: Any) -> bool: return False def __hash__(self) -> int: - return super(Binary, self).__hash__() ^ hash(self.__subtype) + return super().__hash__() ^ hash(self.__subtype) def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self): - return "Binary(%s, %s)" % (bytes.__repr__(self), self.__subtype) + return f"Binary({bytes.__repr__(self)}, {self.__subtype})" diff --git a/bson/code.py b/bson/code.py index b732e82469..27ec588fae 100644 --- a/bson/code.py +++ b/bson/code.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for representing JavaScript code in BSON. -""" +"""Tools for representing JavaScript code in BSON.""" from collections.abc import Mapping as _Mapping from typing import Any, Mapping, Optional, Type, Union @@ -54,7 +53,7 @@ def __new__( cls: Type["Code"], code: Union[str, "Code"], scope: Optional[Mapping[str, Any]] = None, - **kwargs: Any + **kwargs: Any, ) -> "Code": if not isinstance(code, str): raise TypeError("code must be an instance of str") @@ -88,7 +87,7 @@ def scope(self) -> Optional[Mapping[str, Any]]: return self.__scope def __repr__(self): - return "Code(%s, %r)" % (str.__repr__(self), self.__scope) + return f"Code({str.__repr__(self)}, {self.__scope!r})" def __eq__(self, other: Any) -> bool: if isinstance(other, Code): diff --git a/bson/codec_options.py b/bson/codec_options.py index 096be85264..a0bdd0eeb9 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -63,12 +63,10 @@ class TypeEncoder(abc.ABC): @abc.abstractproperty def python_type(self) -> Any: """The Python type to be converted into something serializable.""" - pass @abc.abstractmethod def transform_python(self, value: Any) -> Any: """Convert the given Python object into something serializable.""" - pass class TypeDecoder(abc.ABC): @@ -84,12 +82,10 @@ class TypeDecoder(abc.ABC): @abc.abstractproperty def bson_type(self) -> Any: """The BSON type to be converted into our own type.""" - pass @abc.abstractmethod def transform_bson(self, value: Any) -> Any: """Convert the given BSON value into our own type.""" - pass class TypeCodec(TypeEncoder, TypeDecoder): @@ -105,14 +101,12 @@ class TypeCodec(TypeEncoder, TypeDecoder): See :ref:`custom-type-type-codec` documentation for an example. """ - pass - _Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] _Fallback = Callable[[Any], Any] -class TypeRegistry(object): +class TypeRegistry: """Encapsulates type codecs used in encoding and / or decoding BSON, as well as the fallback encoder. Type registries cannot be modified after instantiation. @@ -164,8 +158,7 @@ def __init__( self._decoder_map[codec.bson_type] = codec.transform_bson if not is_valid_codec: raise TypeError( - "Expected an instance of %s, %s, or %s, got %r instead" - % (TypeEncoder.__name__, TypeDecoder.__name__, TypeCodec.__name__, codec) + f"Expected an instance of {TypeEncoder.__name__}, {TypeDecoder.__name__}, or {TypeCodec.__name__}, got {codec!r} instead" ) def _validate_type_encoder(self, codec: _Codec) -> None: @@ -175,12 +168,12 @@ def _validate_type_encoder(self, codec: _Codec) -> None: if issubclass(cast(TypeCodec, codec).python_type, pytype): err_msg = ( "TypeEncoders cannot change how built-in types are " - "encoded (encoder %s transforms type %s)" % (codec, pytype) + "encoded (encoder {} transforms type {})".format(codec, pytype) ) raise TypeError(err_msg) def __repr__(self): - return "%s(type_codecs=%r, fallback_encoder=%r)" % ( + return "{}(type_codecs={!r}, fallback_encoder={!r})".format( self.__class__.__name__, self.__type_codecs, self._fallback_encoder, @@ -446,10 +439,9 @@ def _arguments_repr(self) -> str: ) return ( - "document_class=%s, tz_aware=%r, uuid_representation=%s, " - "unicode_decode_error_handler=%r, tzinfo=%r, " - "type_registry=%r, datetime_conversion=%s" - % ( + "document_class={}, tz_aware={!r}, uuid_representation={}, " + "unicode_decode_error_handler={!r}, tzinfo={!r}, " + "type_registry={!r}, datetime_conversion={!s}".format( document_class_repr, self.tz_aware, uuid_rep_repr, @@ -474,7 +466,7 @@ def _options_dict(self) -> Dict[str, Any]: } def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, self._arguments_repr()) + return f"{self.__class__.__name__}({self._arguments_repr()})" def with_options(self, **kwargs: Any) -> "CodecOptions": """Make a copy of this CodecOptions, overriding some options:: diff --git a/bson/dbref.py b/bson/dbref.py index 7849435f23..491278e6f4 100644 --- a/bson/dbref.py +++ b/bson/dbref.py @@ -21,7 +21,7 @@ from bson.son import SON -class DBRef(object): +class DBRef: """A reference to a document stored in MongoDB.""" __slots__ = "__collection", "__id", "__database", "__kwargs" @@ -36,7 +36,7 @@ def __init__( id: Any, database: Optional[str] = None, _extra: Optional[Mapping[str, Any]] = None, - **kwargs: Any + **kwargs: Any, ) -> None: """Initialize a new :class:`DBRef`. @@ -102,10 +102,10 @@ def as_doc(self) -> SON[str, Any]: return doc def __repr__(self): - extra = "".join([", %s=%r" % (k, v) for k, v in self.__kwargs.items()]) + extra = "".join([f", {k}={v!r}" for k, v in self.__kwargs.items()]) if self.database is None: - return "DBRef(%r, %r%s)" % (self.collection, self.id, extra) - return "DBRef(%r, %r, %r%s)" % (self.collection, self.id, self.database, extra) + return f"DBRef({self.collection!r}, {self.id!r}{extra})" + return f"DBRef({self.collection!r}, {self.id!r}, {self.database!r}{extra})" def __eq__(self, other: Any) -> bool: if isinstance(other, DBRef): diff --git a/bson/decimal128.py b/bson/decimal128.py index bce5b251e9..0e24b5bbae 100644 --- a/bson/decimal128.py +++ b/bson/decimal128.py @@ -115,7 +115,7 @@ def _decimal_to_128(value: _VALUE_OPTIONS) -> Tuple[int, int]: return high, low -class Decimal128(object): +class Decimal128: """BSON Decimal128 type:: >>> Decimal128(Decimal("0.0005")) @@ -226,7 +226,7 @@ def __init__(self, value: _VALUE_OPTIONS) -> None: ) self.__high, self.__low = value # type: ignore else: - raise TypeError("Cannot convert %r to Decimal128" % (value,)) + raise TypeError(f"Cannot convert {value!r} to Decimal128") def to_decimal(self) -> decimal.Decimal: """Returns an instance of :class:`decimal.Decimal` for this @@ -297,7 +297,7 @@ def __str__(self) -> str: return str(dec) def __repr__(self): - return "Decimal128('%s')" % (str(self),) + return f"Decimal128('{str(self)}')" def __setstate__(self, value: Tuple[int, int]) -> None: self.__high, self.__low = value diff --git a/bson/json_util.py b/bson/json_util.py index 8842d5c74d..bc566fa982 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -288,7 +288,7 @@ def __new__( strict_uuid: Optional[bool] = None, json_mode: int = JSONMode.RELAXED, *args: Any, - **kwargs: Any + **kwargs: Any, ) -> "JSONOptions": kwargs["tz_aware"] = kwargs.get("tz_aware", False) if kwargs["tz_aware"]: @@ -303,7 +303,7 @@ def __new__( "JSONOptions.datetime_representation must be one of LEGACY, " "NUMBERLONG, or ISO8601 from DatetimeRepresentation." ) - self = cast(JSONOptions, super(JSONOptions, cls).__new__(cls, *args, **kwargs)) + self = cast(JSONOptions, super().__new__(cls, *args, **kwargs)) if json_mode not in (JSONMode.LEGACY, JSONMode.RELAXED, JSONMode.CANONICAL): raise ValueError( "JSONOptions.json_mode must be one of LEGACY, RELAXED, " @@ -350,21 +350,20 @@ def __new__( def _arguments_repr(self) -> str: return ( - "strict_number_long=%r, " - "datetime_representation=%r, " - "strict_uuid=%r, json_mode=%r, %s" - % ( + "strict_number_long={!r}, " + "datetime_representation={!r}, " + "strict_uuid={!r}, json_mode={!r}, {}".format( self.strict_number_long, self.datetime_representation, self.strict_uuid, self.json_mode, - super(JSONOptions, self)._arguments_repr(), + super()._arguments_repr(), ) ) def _options_dict(self) -> Dict[Any, Any]: # TODO: PYTHON-2442 use _asdict() instead - options_dict = super(JSONOptions, self)._options_dict() + options_dict = super()._options_dict() options_dict.update( { "strict_number_long": self.strict_number_long, @@ -492,7 +491,7 @@ def _json_convert(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> if hasattr(obj, "items"): return SON(((k, _json_convert(v, json_options)) for k, v in obj.items())) elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes)): - return list((_json_convert(v, json_options) for v in obj)) + return [_json_convert(v, json_options) for v in obj] try: return default(obj, json_options) except TypeError: @@ -568,9 +567,9 @@ def _parse_legacy_regex(doc: Any) -> Any: def _parse_legacy_uuid(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: """Decode a JSON legacy $uuid to Python UUID.""" if len(doc) != 1: - raise TypeError("Bad $uuid, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $uuid, extra field(s): {doc}") if not isinstance(doc["$uuid"], str): - raise TypeError("$uuid must be a string: %s" % (doc,)) + raise TypeError(f"$uuid must be a string: {doc}") if json_options.uuid_representation == UuidRepresentation.UNSPECIFIED: return Binary.from_uuid(uuid.UUID(doc["$uuid"])) else: @@ -613,11 +612,11 @@ def _parse_canonical_binary(doc: Any, json_options: JSONOptions) -> Union[Binary b64 = binary["base64"] subtype = binary["subType"] if not isinstance(b64, str): - raise TypeError("$binary base64 must be a string: %s" % (doc,)) + raise TypeError(f"$binary base64 must be a string: {doc}") if not isinstance(subtype, str) or len(subtype) > 2: - raise TypeError("$binary subType must be a string at most 2 characters: %s" % (doc,)) + raise TypeError(f"$binary subType must be a string at most 2 characters: {doc}") if len(binary) != 2: - raise TypeError('$binary must include only "base64" and "subType" components: %s' % (doc,)) + raise TypeError(f'$binary must include only "base64" and "subType" components: {doc}') data = base64.b64decode(b64.encode()) return _binary_or_uuid(data, int(subtype, 16), json_options) @@ -629,7 +628,7 @@ def _parse_canonical_datetime( """Decode a JSON datetime to python datetime.datetime.""" dtm = doc["$date"] if len(doc) != 1: - raise TypeError("Bad $date, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $date, extra field(s): {doc}") # mongoexport 2.6 and newer if isinstance(dtm, str): # Parse offset @@ -692,7 +691,7 @@ def _parse_canonical_datetime( def _parse_canonical_oid(doc: Any) -> ObjectId: """Decode a JSON ObjectId to bson.objectid.ObjectId.""" if len(doc) != 1: - raise TypeError("Bad $oid, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $oid, extra field(s): {doc}") return ObjectId(doc["$oid"]) @@ -700,7 +699,7 @@ def _parse_canonical_symbol(doc: Any) -> str: """Decode a JSON symbol to Python string.""" symbol = doc["$symbol"] if len(doc) != 1: - raise TypeError("Bad $symbol, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $symbol, extra field(s): {doc}") return str(symbol) @@ -708,7 +707,7 @@ def _parse_canonical_code(doc: Any) -> Code: """Decode a JSON code to bson.code.Code.""" for key in doc: if key not in ("$code", "$scope"): - raise TypeError("Bad $code, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $code, extra field(s): {doc}") return Code(doc["$code"], scope=doc.get("$scope")) @@ -716,11 +715,11 @@ def _parse_canonical_regex(doc: Any) -> Regex: """Decode a JSON regex to bson.regex.Regex.""" regex = doc["$regularExpression"] if len(doc) != 1: - raise TypeError("Bad $regularExpression, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $regularExpression, extra field(s): {doc}") if len(regex) != 2: raise TypeError( 'Bad $regularExpression must include only "pattern"' - 'and "options" components: %s' % (doc,) + 'and "options" components: {}'.format(doc) ) opts = regex["options"] if not isinstance(opts, str): @@ -739,28 +738,28 @@ def _parse_canonical_dbpointer(doc: Any) -> Any: """Decode a JSON (deprecated) DBPointer to bson.dbref.DBRef.""" dbref = doc["$dbPointer"] if len(doc) != 1: - raise TypeError("Bad $dbPointer, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $dbPointer, extra field(s): {doc}") if isinstance(dbref, DBRef): dbref_doc = dbref.as_doc() # DBPointer must not contain $db in its value. if dbref.database is not None: - raise TypeError("Bad $dbPointer, extra field $db: %s" % (dbref_doc,)) + raise TypeError(f"Bad $dbPointer, extra field $db: {dbref_doc}") if not isinstance(dbref.id, ObjectId): - raise TypeError("Bad $dbPointer, $id must be an ObjectId: %s" % (dbref_doc,)) + raise TypeError(f"Bad $dbPointer, $id must be an ObjectId: {dbref_doc}") if len(dbref_doc) != 2: - raise TypeError("Bad $dbPointer, extra field(s) in DBRef: %s" % (dbref_doc,)) + raise TypeError(f"Bad $dbPointer, extra field(s) in DBRef: {dbref_doc}") return dbref else: - raise TypeError("Bad $dbPointer, expected a DBRef: %s" % (doc,)) + raise TypeError(f"Bad $dbPointer, expected a DBRef: {doc}") def _parse_canonical_int32(doc: Any) -> int: """Decode a JSON int32 to python int.""" i_str = doc["$numberInt"] if len(doc) != 1: - raise TypeError("Bad $numberInt, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $numberInt, extra field(s): {doc}") if not isinstance(i_str, str): - raise TypeError("$numberInt must be string: %s" % (doc,)) + raise TypeError(f"$numberInt must be string: {doc}") return int(i_str) @@ -768,7 +767,7 @@ def _parse_canonical_int64(doc: Any) -> Int64: """Decode a JSON int64 to bson.int64.Int64.""" l_str = doc["$numberLong"] if len(doc) != 1: - raise TypeError("Bad $numberLong, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $numberLong, extra field(s): {doc}") return Int64(l_str) @@ -776,9 +775,9 @@ def _parse_canonical_double(doc: Any) -> float: """Decode a JSON double to python float.""" d_str = doc["$numberDouble"] if len(doc) != 1: - raise TypeError("Bad $numberDouble, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $numberDouble, extra field(s): {doc}") if not isinstance(d_str, str): - raise TypeError("$numberDouble must be string: %s" % (doc,)) + raise TypeError(f"$numberDouble must be string: {doc}") return float(d_str) @@ -786,18 +785,18 @@ def _parse_canonical_decimal128(doc: Any) -> Decimal128: """Decode a JSON decimal128 to bson.decimal128.Decimal128.""" d_str = doc["$numberDecimal"] if len(doc) != 1: - raise TypeError("Bad $numberDecimal, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $numberDecimal, extra field(s): {doc}") if not isinstance(d_str, str): - raise TypeError("$numberDecimal must be string: %s" % (doc,)) + raise TypeError(f"$numberDecimal must be string: {doc}") return Decimal128(d_str) def _parse_canonical_minkey(doc: Any) -> MinKey: """Decode a JSON MinKey to bson.min_key.MinKey.""" if type(doc["$minKey"]) is not int or doc["$minKey"] != 1: - raise TypeError("$minKey value must be 1: %s" % (doc,)) + raise TypeError(f"$minKey value must be 1: {doc}") if len(doc) != 1: - raise TypeError("Bad $minKey, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $minKey, extra field(s): {doc}") return MinKey() @@ -806,7 +805,7 @@ def _parse_canonical_maxkey(doc: Any) -> MaxKey: if type(doc["$maxKey"]) is not int or doc["$maxKey"] != 1: raise TypeError("$maxKey value must be 1: %s", (doc,)) if len(doc) != 1: - raise TypeError("Bad $minKey, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $minKey, extra field(s): {doc}") return MaxKey() @@ -839,7 +838,7 @@ def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: millis = int(obj.microsecond / 1000) fracsecs = ".%03d" % (millis,) if millis else "" return { - "$date": "%s%s%s" % (obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string) + "$date": "{}{}{}".format(obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string) } millis = _datetime_to_millis(obj) diff --git a/bson/max_key.py b/bson/max_key.py index b4f38d072e..eb5705d378 100644 --- a/bson/max_key.py +++ b/bson/max_key.py @@ -12,12 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Representation for the MongoDB internal MaxKey type. -""" +"""Representation for the MongoDB internal MaxKey type.""" from typing import Any -class MaxKey(object): +class MaxKey: """MongoDB internal MaxKey type.""" __slots__ = () diff --git a/bson/min_key.py b/bson/min_key.py index babc655e43..2c8f73d560 100644 --- a/bson/min_key.py +++ b/bson/min_key.py @@ -12,12 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Representation for the MongoDB internal MinKey type. -""" +"""Representation for the MongoDB internal MinKey type.""" from typing import Any -class MinKey(object): +class MinKey: """MongoDB internal MinKey type.""" __slots__ = () diff --git a/bson/objectid.py b/bson/objectid.py index 1fab986b8b..b045e93d04 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for working with MongoDB ObjectIds. -""" +"""Tools for working with MongoDB ObjectIds.""" import binascii import calendar @@ -43,7 +42,7 @@ def _random_bytes() -> bytes: return os.urandom(5) -class ObjectId(object): +class ObjectId: """A MongoDB ObjectId.""" _pid = os.getpid() @@ -166,7 +165,6 @@ def _random(cls) -> bytes: def __generate(self) -> None: """Generate a new value for this ObjectId.""" - # 4 bytes current time oid = struct.pack(">I", int(time.time())) @@ -202,9 +200,7 @@ def __validate(self, oid: Any) -> None: else: _raise_invalid_id(oid) else: - raise TypeError( - "id must be an instance of (bytes, str, ObjectId), not %s" % (type(oid),) - ) + raise TypeError(f"id must be an instance of (bytes, str, ObjectId), not {type(oid)}") @property def binary(self) -> bytes: @@ -224,13 +220,13 @@ def generation_time(self) -> datetime.datetime: return datetime.datetime.fromtimestamp(timestamp, utc) def __getstate__(self) -> bytes: - """return value of object for pickling. + """Return value of object for pickling. needed explicitly because __slots__() defined. """ return self.__id def __setstate__(self, value: Any) -> None: - """explicit state set from pickling""" + """Explicit state set from pickling""" # Provide backwards compatibility with OIDs # pickled with pymongo-1.9 or older. if isinstance(value, dict): @@ -249,7 +245,7 @@ def __str__(self) -> str: return binascii.hexlify(self.__id).decode() def __repr__(self): - return "ObjectId('%s')" % (str(self),) + return f"ObjectId('{str(self)}')" def __eq__(self, other: Any) -> bool: if isinstance(other, ObjectId): diff --git a/bson/raw_bson.py b/bson/raw_bson.py index 2c2b3c97ca..bb1dbd22a5 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -131,7 +131,7 @@ class from the standard library so it can be used like a read-only elif not issubclass(codec_options.document_class, RawBSONDocument): raise TypeError( "RawBSONDocument cannot use CodecOptions with document " - "class %s" % (codec_options.document_class,) + "class {}".format(codec_options.document_class) ) self.__codec_options = codec_options # Validate the bson object size. @@ -174,7 +174,7 @@ def __eq__(self, other: Any) -> bool: return NotImplemented def __repr__(self): - return "%s(%r, codec_options=%r)" % ( + return "{}({!r}, codec_options={!r})".format( self.__class__.__name__, self.raw, self.__codec_options, diff --git a/bson/regex.py b/bson/regex.py index 3e98477198..c06e493f38 100644 --- a/bson/regex.py +++ b/bson/regex.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for representing MongoDB regular expressions. -""" +"""Tools for representing MongoDB regular expressions.""" import re from typing import Any, Generic, Pattern, Type, TypeVar, Union @@ -117,7 +116,7 @@ def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self): - return "Regex(%r, %r)" % (self.pattern, self.flags) + return f"Regex({self.pattern!r}, {self.flags!r})" def try_compile(self) -> "Pattern[_T]": """Compile this :class:`Regex` as a Python regular expression. diff --git a/bson/son.py b/bson/son.py index bba108aa80..482e8d2584 100644 --- a/bson/son.py +++ b/bson/son.py @@ -16,7 +16,8 @@ Regular dictionaries can be used instead of SON objects, but not when the order of keys is important. A SON object can be used just like a normal Python -dictionary.""" +dictionary. +""" import copy import re @@ -58,7 +59,7 @@ class SON(Dict[_Key, _Value]): def __init__( self, data: Optional[Union[Mapping[_Key, _Value], Iterable[Tuple[_Key, _Value]]]] = None, - **kwargs: Any + **kwargs: Any, ) -> None: self.__keys = [] dict.__init__(self) @@ -66,14 +67,14 @@ def __init__( self.update(kwargs) def __new__(cls: Type["SON[_Key, _Value]"], *args: Any, **kwargs: Any) -> "SON[_Key, _Value]": - instance = super(SON, cls).__new__(cls, *args, **kwargs) # type: ignore[type-var] + instance = super().__new__(cls, *args, **kwargs) # type: ignore[type-var] instance.__keys = [] return instance def __repr__(self): result = [] for key in self.__keys: - result.append("(%r, %r)" % (key, self[key])) + result.append(f"({key!r}, {self[key]!r})") return "SON([%s])" % ", ".join(result) def __setitem__(self, key: _Key, value: _Value) -> None: @@ -94,8 +95,7 @@ def copy(self) -> "SON[_Key, _Value]": # efficient. # second level definitions support higher levels def __iter__(self) -> Iterator[_Key]: - for k in self.__keys: - yield k + yield from self.__keys def has_key(self, key: _Key) -> bool: return key in self.__keys @@ -113,7 +113,7 @@ def values(self) -> List[_Value]: # type: ignore[override] def clear(self) -> None: self.__keys = [] - super(SON, self).clear() + super().clear() def setdefault(self, key: _Key, default: _Value) -> _Value: try: @@ -189,7 +189,7 @@ def transform_value(value: Any) -> Any: if isinstance(value, list): return [transform_value(v) for v in value] elif isinstance(value, _Mapping): - return dict([(k, transform_value(v)) for k, v in value.items()]) + return {k: transform_value(v) for k, v in value.items()} else: return value diff --git a/bson/timestamp.py b/bson/timestamp.py index a333b9fa3e..5591b60e41 100644 --- a/bson/timestamp.py +++ b/bson/timestamp.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for representing MongoDB internal Timestamps. -""" +"""Tools for representing MongoDB internal Timestamps.""" import calendar import datetime @@ -25,7 +24,7 @@ UPPERBOUND = 4294967296 -class Timestamp(object): +class Timestamp: """MongoDB internal timestamps used in the opLog.""" __slots__ = ("__time", "__inc") @@ -113,7 +112,7 @@ def __ge__(self, other: Any) -> bool: return NotImplemented def __repr__(self): - return "Timestamp(%s, %s)" % (self.__time, self.__inc) + return f"Timestamp({self.__time}, {self.__inc})" def as_datetime(self) -> datetime.datetime: """Return a :class:`~datetime.datetime` instance corresponding diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 692567b2de..9a4cda5527 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -53,7 +53,7 @@ ] -class GridFS(object): +class GridFS: """An instance of GridFS on top of a single Database.""" def __init__(self, database: Database, collection: str = "fs"): @@ -141,7 +141,6 @@ def put(self, data: Any, **kwargs: Any) -> Any: .. versionchanged:: 3.0 w=0 writes to GridFS are now prohibited. """ - with GridIn(self.__collection, **kwargs) as grid_file: grid_file.write(data) return grid_file._id @@ -449,7 +448,7 @@ def exists( return f is not None -class GridFSBucket(object): +class GridFSBucket: """An instance of GridFS on top of a single Database.""" def __init__( diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 5ec6352684..fd260963d7 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -76,7 +76,7 @@ def setter(self: Any, value: Any) -> Any: if read_only: docstring += "\n\nThis attribute is read-only." elif closed_only: - docstring = "%s\n\n%s" % ( + docstring = "{}\n\n{}".format( docstring, "This attribute is read-only and " "can only be read after :meth:`close` " @@ -114,7 +114,7 @@ def _disallow_transactions(session: Optional[ClientSession]) -> None: raise InvalidOperation("GridFS does not support multi-document transactions") -class GridIn(object): +class GridIn: """Class to write data to GridFS.""" def __init__( @@ -497,7 +497,7 @@ def _ensure_file(self) -> None: self._file = self.__files.find_one({"_id": self.__file_id}, session=self._session) if not self._file: raise NoFile( - "no file in gridfs collection %r with _id %r" % (self.__files, self.__file_id) + f"no file in gridfs collection {self.__files!r} with _id {self.__file_id!r}" ) def __getattr__(self, name: str) -> Any: @@ -640,10 +640,10 @@ def seek(self, pos: int, whence: int = _SEEK_SET) -> int: elif whence == _SEEK_END: new_pos = int(self.length) + pos else: - raise IOError(22, "Invalid value for `whence`") + raise OSError(22, "Invalid value for `whence`") if new_pos < 0: - raise IOError(22, "Invalid value for `pos` - must be positive") + raise OSError(22, "Invalid value for `pos` - must be positive") # Optimization, continue using the same buffer and chunk iterator. if new_pos == self.__position: @@ -732,7 +732,7 @@ def __del__(self) -> None: pass -class _GridOutChunkIterator(object): +class _GridOutChunkIterator: """Iterates over a file's chunks using a single cursor. Raises CorruptGridFile when encountering any truncated, missing, or extra @@ -832,7 +832,7 @@ def close(self) -> None: self._cursor = None -class GridOutIterator(object): +class GridOutIterator: def __init__(self, grid_out: GridOut, chunks: Collection, session: ClientSession): self.__chunk_iter = _GridOutChunkIterator(grid_out, chunks, session, 0) @@ -878,7 +878,7 @@ def __init__( # Hold on to the base "fs" collection to create GridOut objects later. self.__root_collection = collection - super(GridOutCursor, self).__init__( + super().__init__( collection.files, filter, skip=skip, @@ -892,7 +892,7 @@ def __init__( def next(self) -> GridOut: """Get next GridOut object from cursor.""" _disallow_transactions(self.session) - next_file = super(GridOutCursor, self).next() + next_file = super().next() return GridOut(self.__root_collection, file_document=next_file, session=self.session) __next__ = next diff --git a/pymongo/_csot.py b/pymongo/_csot.py index 8a4617ecaf..7a5a8a7302 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -57,7 +57,7 @@ def clamp_remaining(max_timeout: float) -> float: return min(timeout, max_timeout) -class _TimeoutContext(object): +class _TimeoutContext: """Internal timeout context manager. Use :func:`pymongo.timeout` instead:: diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index a13f164f53..a97455cb29 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -21,7 +21,7 @@ from pymongo.read_preferences import ReadPreference, _AggWritePref -class _AggregationCommand(object): +class _AggregationCommand: """The internal abstract base class for aggregation cursors. Should not be called directly by application developers. Use @@ -202,7 +202,7 @@ def _database(self): class _CollectionRawAggregationCommand(_CollectionAggregationCommand): def __init__(self, *args, **kwargs): - super(_CollectionRawAggregationCommand, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) # For raw-batches, we set the initial batchSize for the cursor to 0. if not self._performs_write: @@ -216,7 +216,7 @@ def _aggregation_target(self): @property def _cursor_namespace(self): - return "%s.$cmd.aggregate" % (self._target.name,) + return f"{self._target.name}.$cmd.aggregate" @property def _database(self): diff --git a/pymongo/auth.py b/pymongo/auth.py index 4bc31ee97b..ac7cb254e9 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -61,7 +61,7 @@ """The authentication mechanisms supported by PyMongo.""" -class _Cache(object): +class _Cache: __slots__ = ("data",) _hash_val = hash("_Cache") @@ -104,7 +104,7 @@ def __hash__(self): def _build_credentials_tuple(mech, source, user, passwd, extra, database): """Build and return a mechanism specific credentials tuple.""" if mech not in ("MONGODB-X509", "MONGODB-AWS", "MONGODB-OIDC") and user is None: - raise ConfigurationError("%s requires a username." % (mech,)) + raise ConfigurationError(f"{mech} requires a username.") if mech == "GSSAPI": if source is not None and source != "$external": raise ValueError("authentication source must be $external or None for GSSAPI") @@ -297,7 +297,7 @@ def _password_digest(username, password): raise TypeError("username must be an instance of str") md5hash = hashlib.md5() - data = "%s:mongo:%s" % (username, password) + data = f"{username}:mongo:{password}" md5hash.update(data.encode("utf-8")) return md5hash.hexdigest() @@ -306,7 +306,7 @@ def _auth_key(nonce, username, password): """Get an auth key to use for authentication.""" digest = _password_digest(username, password) md5hash = hashlib.md5() - data = "%s%s%s" % (nonce, username, digest) + data = f"{nonce}{username}{digest}" md5hash.update(data.encode("utf-8")) return md5hash.hexdigest() @@ -448,7 +448,7 @@ def _authenticate_plain(credentials, sock_info): source = credentials.source username = credentials.username password = credentials.password - payload = ("\x00%s\x00%s" % (username, password)).encode("utf-8") + payload = (f"\x00{username}\x00{password}").encode() cmd = SON( [ ("saslStart", 1), @@ -518,7 +518,7 @@ def _authenticate_default(credentials, sock_info): } -class _AuthContext(object): +class _AuthContext: def __init__(self, credentials, address): self.credentials = credentials self.speculative_authenticate = None @@ -543,7 +543,7 @@ def speculate_succeeded(self): class _ScramContext(_AuthContext): def __init__(self, credentials, address, mechanism): - super(_ScramContext, self).__init__(credentials, address) + super().__init__(credentials, address) self.scram_data = None self.mechanism = mechanism @@ -569,7 +569,7 @@ def speculate_command(self): authenticator = _get_authenticator(self.credentials, self.address) cmd = authenticator.auth_start_cmd(False) if cmd is None: - return + return None cmd["db"] = self.credentials.source return cmd diff --git a/pymongo/auth_aws.py b/pymongo/auth_aws.py index e84465ea66..bfa4c731d3 100644 --- a/pymongo/auth_aws.py +++ b/pymongo/auth_aws.py @@ -21,7 +21,7 @@ _HAVE_MONGODB_AWS = True except ImportError: - class AwsSaslContext(object): # type: ignore + class AwsSaslContext: # type: ignore def __init__(self, credentials): pass @@ -102,9 +102,7 @@ def _authenticate_aws(credentials, sock_info): # Clear the cached credentials if we hit a failure in auth. set_cached_credentials(None) # Convert to OperationFailure and include pymongo-auth-aws version. - raise OperationFailure( - "%s (pymongo-auth-aws version %s)" % (exc, pymongo_auth_aws.__version__) - ) + raise OperationFailure(f"{exc} (pymongo-auth-aws version {pymongo_auth_aws.__version__})") except Exception: # Clear the cached credentials if we hit a failure in auth. set_cached_credentials(None) diff --git a/pymongo/auth_oidc.py b/pymongo/auth_oidc.py index 530b1bb068..543dc0200d 100644 --- a/pymongo/auth_oidc.py +++ b/pymongo/auth_oidc.py @@ -131,11 +131,11 @@ def get_current_token(self, use_callbacks=True): refresh_token = self.idp_resp and self.idp_resp.get("refresh_token") refresh_token = refresh_token or "" - context = dict( - timeout_seconds=timeout, - version=CALLBACK_VERSION, - refresh_token=refresh_token, - ) + context = { + "timeout_seconds": timeout, + "version": CALLBACK_VERSION, + "refresh_token": refresh_token, + } if self.idp_resp is None or refresh_cb is None: self.idp_resp = request_cb(self.idp_info, context) @@ -181,7 +181,7 @@ def auth_start_cmd(self, use_callbacks=True): aws_identity_file = os.environ["AWS_WEB_IDENTITY_TOKEN_FILE"] with open(aws_identity_file) as fid: token = fid.read().strip() - payload = dict(jwt=token) + payload = {"jwt": token} cmd = SON( [ ("saslStart", 1), @@ -203,7 +203,7 @@ def auth_start_cmd(self, use_callbacks=True): if self.idp_info is None: # Send the SASL start with the optional principal name. - payload = dict() + payload = {} if principal_name: payload["n"] = principal_name @@ -221,7 +221,7 @@ def auth_start_cmd(self, use_callbacks=True): token = self.get_current_token(use_callbacks) if not token: return None - bin_payload = Binary(bson.encode(dict(jwt=token))) + bin_payload = Binary(bson.encode({"jwt": token})) return SON( [ ("saslStart", 1), @@ -268,7 +268,7 @@ def authenticate(self, sock_info, reauthenticate=False): if resp["done"]: sock_info.oidc_token_gen_id = self.token_gen_id - return + return None server_resp: Dict = bson.decode(resp["payload"]) if "issuer" in server_resp: @@ -278,7 +278,7 @@ def authenticate(self, sock_info, reauthenticate=False): conversation_id = resp["conversationId"] token = self.get_current_token() sock_info.oidc_token_gen_id = self.token_gen_id - bin_payload = Binary(bson.encode(dict(jwt=token))) + bin_payload = Binary(bson.encode({"jwt": token})) cmd = SON( [ ("saslContinue", 1), diff --git a/pymongo/bulk.py b/pymongo/bulk.py index b21b576aa5..b0f61b9f9f 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -60,7 +60,7 @@ _COMMANDS = ("insert", "update", "delete") -class _Run(object): +class _Run: """Represents a batch of write operations.""" def __init__(self, op_type): @@ -136,7 +136,7 @@ def _raise_bulk_write_error(full_result: Any) -> NoReturn: raise BulkWriteError(full_result) -class _Bulk(object): +class _Bulk: """The private guts of the bulk write API.""" def __init__(self, collection, ordered, bypass_document_validation, comment=None, let=None): @@ -509,5 +509,6 @@ def execute(self, write_concern, session): if not write_concern.acknowledged: with client._socket_for_writes(session) as sock_info: self.execute_no_results(sock_info, generator, write_concern) + return None else: return self.execute_command(generator, write_concern, session) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 775f93c79a..c53f981188 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -156,7 +156,8 @@ def _aggregation_command_class(self): @property def _client(self): """The client against which the aggregation commands for - this ChangeStream will be run.""" + this ChangeStream will be run. + """ raise NotImplementedError def _change_stream_options(self): @@ -221,7 +222,7 @@ def _process_result(self, result, sock_info): if self._start_at_operation_time is None: raise OperationFailure( "Expected field 'operationTime' missing from command " - "response : %r" % (result,) + "response : {!r}".format(result) ) def _run_aggregation_cmd(self, session, explicit_session): @@ -473,6 +474,6 @@ class ClusterChangeStream(DatabaseChangeStream, Generic[_DocumentType]): """ def _change_stream_options(self): - options = super(ClusterChangeStream, self)._change_stream_options() + options = super()._change_stream_options() options["allChangesForCluster"] = True return options diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 882474e258..c9f63dc95a 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -167,7 +167,7 @@ def _parse_pool_options(username, password, database, options): ) -class ClientOptions(object): +class ClientOptions: """Read only configuration options for a MongoClient. Should not be instantiated directly by application developers. Access diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 1ec0b16476..dbc5f3aa8d 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -169,7 +169,7 @@ from pymongo.write_concern import WriteConcern -class SessionOptions(object): +class SessionOptions: """Options for a new :class:`ClientSession`. :Parameters: @@ -203,8 +203,9 @@ def __init__( if not isinstance(default_transaction_options, TransactionOptions): raise TypeError( "default_transaction_options must be an instance of " - "pymongo.client_session.TransactionOptions, not: %r" - % (default_transaction_options,) + "pymongo.client_session.TransactionOptions, not: {!r}".format( + default_transaction_options + ) ) self._default_transaction_options = default_transaction_options self._snapshot = snapshot @@ -232,7 +233,7 @@ def snapshot(self) -> Optional[bool]: return self._snapshot -class TransactionOptions(object): +class TransactionOptions: """Options for :meth:`ClientSession.start_transaction`. :Parameters: @@ -275,25 +276,25 @@ def __init__( if not isinstance(read_concern, ReadConcern): raise TypeError( "read_concern must be an instance of " - "pymongo.read_concern.ReadConcern, not: %r" % (read_concern,) + "pymongo.read_concern.ReadConcern, not: {!r}".format(read_concern) ) if write_concern is not None: if not isinstance(write_concern, WriteConcern): raise TypeError( "write_concern must be an instance of " - "pymongo.write_concern.WriteConcern, not: %r" % (write_concern,) + "pymongo.write_concern.WriteConcern, not: {!r}".format(write_concern) ) if not write_concern.acknowledged: raise ConfigurationError( "transactions do not support unacknowledged write concern" - ": %r" % (write_concern,) + ": {!r}".format(write_concern) ) if read_preference is not None: if not isinstance(read_preference, _ServerMode): raise TypeError( - "%r is not valid for read_preference. See " + "{!r} is not valid for read_preference. See " "pymongo.read_preferences for valid " - "options." % (read_preference,) + "options.".format(read_preference) ) if max_commit_time_ms is not None: if not isinstance(max_commit_time_ms, int): @@ -340,12 +341,12 @@ def _validate_session_write_concern(session, write_concern): else: raise ConfigurationError( "Explicit sessions are incompatible with " - "unacknowledged write concern: %r" % (write_concern,) + "unacknowledged write concern: {!r}".format(write_concern) ) return session -class _TransactionContext(object): +class _TransactionContext: """Internal transaction context manager for start_transaction.""" def __init__(self, session): @@ -362,7 +363,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.__session.abort_transaction() -class _TxnState(object): +class _TxnState: NONE = 1 STARTING = 2 IN_PROGRESS = 3 @@ -371,7 +372,7 @@ class _TxnState(object): ABORTED = 6 -class _Transaction(object): +class _Transaction: """Internal class to hold transaction information in a ClientSession.""" def __init__(self, opts, client): @@ -973,7 +974,7 @@ def _apply_to(self, command, is_retryable, read_preference, sock_info): if read_preference != ReadPreference.PRIMARY: raise InvalidOperation( "read preference in a transaction must be primary, not: " - "%r" % (read_preference,) + "{!r}".format(read_preference) ) if self._transaction.state == _TxnState.STARTING: @@ -1023,7 +1024,7 @@ def inc_transaction_id(self): self.started_retryable_write = True -class _ServerSession(object): +class _ServerSession: def __init__(self, generation): # Ensure id is type 4, regardless of CodecOptions.uuid_representation. self.session_id = {"id": Binary(uuid.uuid4().bytes, 4)} @@ -1062,7 +1063,7 @@ class _ServerSessionPool(collections.deque): """ def __init__(self, *args, **kwargs): - super(_ServerSessionPool, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.generation = 0 def reset(self): diff --git a/pymongo/collation.py b/pymongo/collation.py index 3d8503f7d5..bdc996be1b 100644 --- a/pymongo/collation.py +++ b/pymongo/collation.py @@ -21,7 +21,7 @@ from pymongo import common -class CollationStrength(object): +class CollationStrength: """ An enum that defines values for `strength` on a :class:`~pymongo.collation.Collation`. @@ -43,7 +43,7 @@ class CollationStrength(object): """Differentiate unicode code point (characters are exactly identical).""" -class CollationAlternate(object): +class CollationAlternate: """ An enum that defines values for `alternate` on a :class:`~pymongo.collation.Collation`. @@ -62,7 +62,7 @@ class CollationAlternate(object): """ -class CollationMaxVariable(object): +class CollationMaxVariable: """ An enum that defines values for `max_variable` on a :class:`~pymongo.collation.Collation`. @@ -75,7 +75,7 @@ class CollationMaxVariable(object): """Spaces alone are ignored.""" -class CollationCaseFirst(object): +class CollationCaseFirst: """ An enum that defines values for `case_first` on a :class:`~pymongo.collation.Collation`. @@ -91,7 +91,7 @@ class CollationCaseFirst(object): """Default for locale or collation strength.""" -class Collation(object): +class Collation: """Collation :Parameters: @@ -163,7 +163,7 @@ def __init__( maxVariable: Optional[str] = None, normalization: Optional[bool] = None, backwards: Optional[bool] = None, - **kwargs: Any + **kwargs: Any, ) -> None: locale = common.validate_string("locale", locale) self.__document: Dict[str, Any] = {"locale": locale} @@ -201,7 +201,7 @@ def document(self) -> Dict[str, Any]: def __repr__(self): document = self.document - return "Collation(%s)" % (", ".join("%s=%r" % (key, document[key]) for key in document),) + return "Collation({})".format(", ".join(f"{key}={document[key]!r}" for key in document)) def __eq__(self, other: Any) -> bool: if isinstance(other, Collation): diff --git a/pymongo/collection.py b/pymongo/collection.py index 91b4013ee8..3b9001240e 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -88,7 +88,7 @@ ] -class ReturnDocument(object): +class ReturnDocument: """An enum used with :meth:`~pymongo.collection.Collection.find_one_and_replace` and :meth:`~pymongo.collection.Collection.find_one_and_update`. @@ -201,7 +201,7 @@ def __init__( .. seealso:: The MongoDB documentation on `collections `_. """ - super(Collection, self).__init__( + super().__init__( codec_options or database.codec_options, read_preference or database.read_preference, write_concern or database.write_concern, @@ -212,7 +212,7 @@ def __init__( if not name or ".." in name: raise InvalidName("collection names cannot be empty") - if "$" in name and not (name.startswith("oplog.$main") or name.startswith("$cmd")): + if "$" in name and not (name.startswith(("oplog.$main", "$cmd"))): raise InvalidName("collection names must not contain '$': %r" % name) if name[0] == "." or name[-1] == ".": raise InvalidName("collection names must not start or end with '.': %r" % name) @@ -222,7 +222,7 @@ def __init__( self.__database: Database[_DocumentType] = database self.__name = name - self.__full_name = "%s.%s" % (self.__database.name, self.__name) + self.__full_name = f"{self.__database.name}.{self.__name}" self.__write_response_codec_options = self.codec_options._replace( unicode_decode_error_handler="replace", document_class=dict ) @@ -344,17 +344,17 @@ def __getattr__(self, name: str) -> "Collection[_DocumentType]": - `name`: the name of the collection to get """ if name.startswith("_"): - full_name = "%s.%s" % (self.__name, name) + full_name = f"{self.__name}.{name}" raise AttributeError( - "Collection has no attribute %r. To access the %s" - " collection, use database['%s']." % (name, full_name, full_name) + "Collection has no attribute {!r}. To access the {}" + " collection, use database['{}'].".format(name, full_name, full_name) ) return self.__getitem__(name) def __getitem__(self, name: str) -> "Collection[_DocumentType]": return Collection( self.__database, - "%s.%s" % (self.__name, name), + f"{self.__name}.{name}", False, self.codec_options, self.read_preference, @@ -363,7 +363,7 @@ def __getitem__(self, name: str) -> "Collection[_DocumentType]": ) def __repr__(self): - return "Collection(%r, %r)" % (self.__database, self.__name) + return f"Collection({self.__database!r}, {self.__name!r})" def __eq__(self, other: Any) -> bool: if isinstance(other, Collection): @@ -541,7 +541,7 @@ def bulk_write( try: request._add_to_bulk(blk) except AttributeError: - raise TypeError("%r is not a valid request" % (request,)) + raise TypeError(f"{request!r} is not a valid request") write_concern = self._write_concern_for(session) bulk_api_result = blk.execute(write_concern, session) @@ -579,6 +579,7 @@ def _insert_command(session, sock_info, retryable_write): if not isinstance(doc, RawBSONDocument): return doc.get("_id") + return None def insert_one( self, @@ -719,7 +720,7 @@ def gen(): write_concern = self._write_concern_for(session) blk = _Bulk(self, ordered, bypass_document_validation, comment=comment) - blk.ops = [doc for doc in gen()] + blk.ops = list(gen()) blk.execute(write_concern, session=session) return InsertManyResult(inserted_ids, write_concern.acknowledged) @@ -1924,7 +1925,7 @@ def gen_indexes(): for index in indexes: if not isinstance(index, IndexModel): raise TypeError( - "%r is not an instance of pymongo.operations.IndexModel" % (index,) + f"{index!r} is not an instance of pymongo.operations.IndexModel" ) document = index.document names.append(document["name"]) @@ -2442,7 +2443,6 @@ def aggregate( .. _aggregate command: https://mongodb.com/docs/manual/reference/command/aggregate """ - with self.__database.client._tmp_session(session, close=False) as s: return self._aggregate( _CollectionAggregationCommand, @@ -2687,7 +2687,7 @@ def rename( if "$" in new_name and not new_name.startswith("oplog.$main"): raise InvalidName("collection names must not contain '$'") - new_name = "%s.%s" % (self.__database.name, new_name) + new_name = f"{self.__database.name}.{new_name}" cmd = SON([("renameCollection", self.__full_name), ("to", new_name)]) cmd.update(kwargs) if comment is not None: @@ -2794,7 +2794,6 @@ def __find_and_modify( **kwargs, ): """Internal findAndModify helper.""" - common.validate_is_mapping("filter", filter) if not isinstance(return_document, bool): raise ValueError( diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index 6f3f244419..d57b45154d 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -132,13 +132,15 @@ def batch_size(self, batch_size: int) -> "CommandCursor[_DocumentType]": def _has_next(self): """Returns `True` if the cursor has documents remaining from the - previous batch.""" + previous batch. + """ return len(self.__data) > 0 @property def _post_batch_resume_token(self): """Retrieve the postBatchResumeToken from the response to a - changeStream aggregate or getMore.""" + changeStream aggregate or getMore. + """ return self.__postbatchresumetoken def _maybe_pin_connection(self, sock_info): @@ -328,7 +330,7 @@ def __init__( .. seealso:: The MongoDB documentation on `cursors `_. """ assert not cursor_info.get("firstBatch") - super(RawBatchCommandCursor, self).__init__( + super().__init__( collection, cursor_info, address, diff --git a/pymongo/common.py b/pymongo/common.py index 4e39c8e514..82c773695a 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -157,7 +157,7 @@ def clean_node(node: str) -> Tuple[str, int]: def raise_config_error(key: str, dummy: Any) -> NoReturn: """Raise ConfigurationError with the given key name.""" - raise ConfigurationError("Unknown option %s" % (key,)) + raise ConfigurationError(f"Unknown option {key}") # Mapping of URI uuid representation options to valid subtypes. @@ -174,14 +174,14 @@ def validate_boolean(option: str, value: Any) -> bool: """Validates that 'value' is True or False.""" if isinstance(value, bool): return value - raise TypeError("%s must be True or False" % (option,)) + raise TypeError(f"{option} must be True or False") def validate_boolean_or_string(option: str, value: Any) -> bool: """Validates that value is True, False, 'true', or 'false'.""" if isinstance(value, str): if value not in ("true", "false"): - raise ValueError("The value of %s must be 'true' or 'false'" % (option,)) + raise ValueError(f"The value of {option} must be 'true' or 'false'") return value == "true" return validate_boolean(option, value) @@ -194,15 +194,15 @@ def validate_integer(option: str, value: Any) -> int: try: return int(value) except ValueError: - raise ValueError("The value of %s must be an integer" % (option,)) - raise TypeError("Wrong type for %s, value must be an integer" % (option,)) + raise ValueError(f"The value of {option} must be an integer") + raise TypeError(f"Wrong type for {option}, value must be an integer") def validate_positive_integer(option: str, value: Any) -> int: """Validate that 'value' is a positive integer, which does not include 0.""" val = validate_integer(option, value) if val <= 0: - raise ValueError("The value of %s must be a positive integer" % (option,)) + raise ValueError(f"The value of {option} must be a positive integer") return val @@ -210,7 +210,7 @@ def validate_non_negative_integer(option: str, value: Any) -> int: """Validate that 'value' is a positive integer or 0.""" val = validate_integer(option, value) if val < 0: - raise ValueError("The value of %s must be a non negative integer" % (option,)) + raise ValueError(f"The value of {option} must be a non negative integer") return val @@ -221,7 +221,7 @@ def validate_readable(option: str, value: Any) -> Optional[str]: # First make sure its a string py3.3 open(True, 'r') succeeds # Used in ssl cert checking due to poor ssl module error reporting value = validate_string(option, value) - open(value, "r").close() + open(value).close() return value @@ -243,7 +243,7 @@ def validate_string(option: str, value: Any) -> str: """Validates that 'value' is an instance of `str`.""" if isinstance(value, str): return value - raise TypeError("Wrong type for %s, value must be an instance of str" % (option,)) + raise TypeError(f"Wrong type for {option}, value must be an instance of str") def validate_string_or_none(option: str, value: Any) -> Optional[str]: @@ -262,7 +262,7 @@ def validate_int_or_basestring(option: str, value: Any) -> Union[int, str]: return int(value) except ValueError: return value - raise TypeError("Wrong type for %s, value must be an integer or a string" % (option,)) + raise TypeError(f"Wrong type for {option}, value must be an integer or a string") def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[int, str]: @@ -275,16 +275,14 @@ def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[in except ValueError: return value return validate_non_negative_integer(option, val) - raise TypeError( - "Wrong type for %s, value must be an non negative integer or a string" % (option,) - ) + raise TypeError(f"Wrong type for {option}, value must be an non negative integer or a string") def validate_positive_float(option: str, value: Any) -> float: """Validates that 'value' is a float, or can be converted to one, and is positive. """ - errmsg = "%s must be an integer or float" % (option,) + errmsg = f"{option} must be an integer or float" try: value = float(value) except ValueError: @@ -295,7 +293,7 @@ def validate_positive_float(option: str, value: Any) -> float: # float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at # one billion - this is a reasonable approximation for infinity if not 0 < value < 1e9: - raise ValueError("%s must be greater than 0 and less than one billion" % (option,)) + raise ValueError(f"{option} must be greater than 0 and less than one billion") return value @@ -324,7 +322,7 @@ def validate_timeout_or_zero(option: str, value: Any) -> float: config error. """ if value is None: - raise ConfigurationError("%s cannot be None" % (option,)) + raise ConfigurationError(f"{option} cannot be None") if value == 0 or value == "0": return 0 return validate_positive_float(option, value) / 1000.0 @@ -360,7 +358,7 @@ def validate_max_staleness(option: str, value: Any) -> int: def validate_read_preference(dummy: Any, value: Any) -> _ServerMode: """Validate a read preference.""" if not isinstance(value, _ServerMode): - raise TypeError("%r is not a read preference." % (value,)) + raise TypeError(f"{value!r} is not a read preference.") return value @@ -372,14 +370,14 @@ def validate_read_preference_mode(dummy: Any, value: Any) -> _ServerMode: mode. """ if value not in _MONGOS_MODES: - raise ValueError("%s is not a valid read preference" % (value,)) + raise ValueError(f"{value} is not a valid read preference") return value def validate_auth_mechanism(option: str, value: Any) -> str: """Validate the authMechanism URI option.""" if value not in MECHANISMS: - raise ValueError("%s must be in %s" % (option, tuple(MECHANISMS))) + raise ValueError(f"{option} must be in {tuple(MECHANISMS)}") return value @@ -389,9 +387,9 @@ def validate_uuid_representation(dummy: Any, value: Any) -> int: return _UUID_REPRESENTATIONS[value] except KeyError: raise ValueError( - "%s is an invalid UUID representation. " + "{} is an invalid UUID representation. " "Must be one of " - "%s" % (value, tuple(_UUID_REPRESENTATIONS)) + "{}".format(value, tuple(_UUID_REPRESENTATIONS)) ) @@ -412,7 +410,7 @@ def validate_read_preference_tags(name: str, value: Any) -> List[Dict[str, str]] tags[unquote_plus(key)] = unquote_plus(val) tag_sets.append(tags) except Exception: - raise ValueError("%r not a valid value for %s" % (tag_set, name)) + raise ValueError(f"{tag_set!r} not a valid value for {name}") return tag_sets @@ -472,13 +470,13 @@ def validate_auth_mechanism_properties(option: str, value: Any) -> Dict[str, Uni raise ValueError( "auth mechanism properties must be " "key:value pairs like SERVICE_NAME:" - "mongodb, not %s." % (opt,) + "mongodb, not {}.".format(opt) ) if key not in _MECHANISM_PROPS: raise ValueError( - "%s is not a supported auth " + "{} is not a supported auth " "mechanism property. Must be one of " - "%s." % (key, tuple(_MECHANISM_PROPS)) + "{}.".format(key, tuple(_MECHANISM_PROPS)) ) if key == "CANONICALIZE_HOST_NAME": props[key] = validate_boolean_or_string(key, val) @@ -502,9 +500,9 @@ def validate_document_class( is_mapping = issubclass(value.__origin__, abc.MutableMapping) if not is_mapping and not issubclass(value, RawBSONDocument): raise TypeError( - "%s must be dict, bson.son.SON, " + "{} must be dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or a " - "subclass of collections.MutableMapping" % (option,) + "subclass of collections.MutableMapping".format(option) ) return value @@ -512,14 +510,14 @@ def validate_document_class( def validate_type_registry(option: Any, value: Any) -> Optional[TypeRegistry]: """Validate the type_registry option.""" if value is not None and not isinstance(value, TypeRegistry): - raise TypeError("%s must be an instance of %s" % (option, TypeRegistry)) + raise TypeError(f"{option} must be an instance of {TypeRegistry}") return value def validate_list(option: str, value: Any) -> List: """Validates that 'value' is a list.""" if not isinstance(value, list): - raise TypeError("%s must be a list" % (option,)) + raise TypeError(f"{option} must be a list") return value @@ -534,9 +532,9 @@ def validate_list_or_mapping(option: Any, value: Any) -> None: """Validates that 'value' is a list or a document.""" if not isinstance(value, (abc.Mapping, list)): raise TypeError( - "%s must either be a list or an instance of dict, " + "{} must either be a list or an instance of dict, " "bson.son.SON, or any other type that inherits from " - "collections.Mapping" % (option,) + "collections.Mapping".format(option) ) @@ -544,9 +542,9 @@ def validate_is_mapping(option: str, value: Any) -> None: """Validate the type of method arguments that expect a document.""" if not isinstance(value, abc.Mapping): raise TypeError( - "%s must be an instance of dict, bson.son.SON, or " + "{} must be an instance of dict, bson.son.SON, or " "any other type that inherits from " - "collections.Mapping" % (option,) + "collections.Mapping".format(option) ) @@ -554,10 +552,10 @@ def validate_is_document_type(option: str, value: Any) -> None: """Validate the type of method arguments that expect a MongoDB document.""" if not isinstance(value, (abc.MutableMapping, RawBSONDocument)): raise TypeError( - "%s must be an instance of dict, bson.son.SON, " + "{} must be an instance of dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or " "a type that inherits from " - "collections.MutableMapping" % (option,) + "collections.MutableMapping".format(option) ) @@ -568,7 +566,7 @@ def validate_appname_or_none(option: str, value: Any) -> Optional[str]: validate_string(option, value) # We need length in bytes, so encode utf8 first. if len(value.encode("utf-8")) > 128: - raise ValueError("%s must be <= 128 bytes" % (option,)) + raise ValueError(f"{option} must be <= 128 bytes") return value @@ -577,7 +575,7 @@ def validate_driver_or_none(option: Any, value: Any) -> Optional[DriverInfo]: if value is None: return value if not isinstance(value, DriverInfo): - raise TypeError("%s must be an instance of DriverInfo" % (option,)) + raise TypeError(f"{option} must be an instance of DriverInfo") return value @@ -586,7 +584,7 @@ def validate_server_api_or_none(option: Any, value: Any) -> Optional[ServerApi]: if value is None: return value if not isinstance(value, ServerApi): - raise TypeError("%s must be an instance of ServerApi" % (option,)) + raise TypeError(f"{option} must be an instance of ServerApi") return value @@ -595,7 +593,7 @@ def validate_is_callable_or_none(option: Any, value: Any) -> Optional[Callable]: if value is None: return value if not callable(value): - raise ValueError("%s must be a callable" % (option,)) + raise ValueError(f"{option} must be a callable") return value @@ -629,9 +627,9 @@ def validate_unicode_decode_error_handler(dummy: Any, value: str) -> str: """Validate the Unicode decode error handler option of CodecOptions.""" if value not in _UNICODE_DECODE_ERROR_HANDLERS: raise ValueError( - "%s is an invalid Unicode decode error handler. " + "{} is an invalid Unicode decode error handler. " "Must be one of " - "%s" % (value, tuple(_UNICODE_DECODE_ERROR_HANDLERS)) + "{}".format(value, tuple(_UNICODE_DECODE_ERROR_HANDLERS)) ) return value @@ -650,7 +648,7 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A from pymongo.encryption_options import AutoEncryptionOpts if not isinstance(value, AutoEncryptionOpts): - raise TypeError("%s must be an instance of AutoEncryptionOpts" % (option,)) + raise TypeError(f"{option} must be an instance of AutoEncryptionOpts") return value @@ -667,7 +665,7 @@ def validate_datetime_conversion(option: Any, value: Any) -> Optional[DatetimeCo elif isinstance(value, int): return DatetimeConversion(value) - raise TypeError("%s must be a str or int representing DatetimeConversion" % (option,)) + raise TypeError(f"{option} must be a str or int representing DatetimeConversion") # Dictionary where keys are the names of public URI options, and values @@ -805,7 +803,7 @@ def validate_auth_option(option: str, value: Any) -> Tuple[str, Any]: """Validate optional authentication parameters.""" lower, value = validate(option, value) if lower not in _AUTH_OPTIONS: - raise ConfigurationError("Unknown authentication option: %s" % (option,)) + raise ConfigurationError(f"Unknown authentication option: {option}") return option, value @@ -866,7 +864,7 @@ def _ecoc_coll_name(encrypted_fields, name): WRITE_CONCERN_OPTIONS = frozenset(["w", "wtimeout", "wtimeoutms", "fsync", "j", "journal"]) -class BaseObject(object): +class BaseObject: """A base class that provides attributes and methods common to multiple pymongo classes. @@ -886,9 +884,9 @@ def __init__( if not isinstance(read_preference, _ServerMode): raise TypeError( - "%r is not valid for read_preference. See " + "{!r} is not valid for read_preference. See " "pymongo.read_preferences for valid " - "options." % (read_preference,) + "options.".format(read_preference) ) self.__read_preference = read_preference diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index c9632a43d3..40bad403f3 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -40,8 +40,8 @@ from pymongo.hello import HelloCompat from pymongo.monitoring import _SENSITIVE_COMMANDS -_SUPPORTED_COMPRESSORS = set(["snappy", "zlib", "zstd"]) -_NO_COMPRESSION = set([HelloCompat.CMD, HelloCompat.LEGACY_CMD]) +_SUPPORTED_COMPRESSORS = {"snappy", "zlib", "zstd"} +_NO_COMPRESSION = {HelloCompat.CMD, HelloCompat.LEGACY_CMD} _NO_COMPRESSION.update(_SENSITIVE_COMMANDS) @@ -56,7 +56,7 @@ def validate_compressors(dummy, value): for compressor in compressors[:]: if compressor not in _SUPPORTED_COMPRESSORS: compressors.remove(compressor) - warnings.warn("Unsupported compressor: %s" % (compressor,)) + warnings.warn(f"Unsupported compressor: {compressor}") elif compressor == "snappy" and not _HAVE_SNAPPY: compressors.remove(compressor) warnings.warn( @@ -82,13 +82,13 @@ def validate_zlib_compression_level(option, value): try: level = int(value) except Exception: - raise TypeError("%s must be an integer, not %r." % (option, value)) + raise TypeError(f"{option} must be an integer, not {value!r}.") if level < -1 or level > 9: raise ValueError("%s must be between -1 and 9, not %d." % (option, level)) return level -class CompressionSettings(object): +class CompressionSettings: def __init__(self, compressors, zlib_compression_level): self.compressors = compressors self.zlib_compression_level = zlib_compression_level @@ -102,9 +102,11 @@ def get_compression_context(self, compressors): return ZlibContext(self.zlib_compression_level) elif chosen == "zstd": return ZstdContext() + return None + return None -class SnappyContext(object): +class SnappyContext: compressor_id = 1 @staticmethod @@ -112,7 +114,7 @@ def compress(data): return snappy.compress(data) -class ZlibContext(object): +class ZlibContext: compressor_id = 2 def __init__(self, level): @@ -122,7 +124,7 @@ def compress(self, data: bytes) -> bytes: return zlib.compress(data, self.level) -class ZstdContext(object): +class ZstdContext: compressor_id = 3 @staticmethod diff --git a/pymongo/cursor.py b/pymongo/cursor.py index ccf0bfd71b..cc4e1a1146 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -97,7 +97,7 @@ } -class CursorType(object): +class CursorType: NON_TAILABLE = 0 """The standard cursor type.""" @@ -126,7 +126,7 @@ class CursorType(object): """ -class _SocketManager(object): +class _SocketManager: """Used with exhaust cursors to ensure the socket is returned.""" def __init__(self, sock, more_to_come): @@ -387,11 +387,11 @@ def _clone(self, deepcopy=True, base=None): "exhaust", "has_filter", ) - data = dict( - (k, v) + data = { + k: v for k, v in self.__dict__.items() if k.startswith("_Cursor__") and k[9:] in values_to_clone - ) + } if deepcopy: data = self._deepcopy(data) base.__dict__.update(data) @@ -412,7 +412,7 @@ def __die(self, synchronous=False): self.__killed = True if self.__id and not already_killed: cursor_id = self.__id - address = _CursorAddress(self.__address, "%s.%s" % (self.__dbname, self.__collname)) + address = _CursorAddress(self.__address, f"{self.__dbname}.{self.__collname}") else: # Skip killCursors. cursor_id = 0 @@ -1322,7 +1322,7 @@ def __init__(self, collection: "Collection[_DocumentType]", *args: Any, **kwargs .. seealso:: The MongoDB documentation on `cursors `_. """ - super(RawBatchCursor, self).__init__(collection, *args, **kwargs) + super().__init__(collection, *args, **kwargs) def _unpack_response( self, response, cursor_id, codec_options, user_fields=None, legacy_response=False diff --git a/pymongo/database.py b/pymongo/database.py index 1e19d860e3..66cfce2090 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -125,7 +125,7 @@ def __init__( db.__my_collection__ """ - super(Database, self).__init__( + super().__init__( codec_options or client.codec_options, read_preference or client.read_preference, write_concern or client.write_concern, @@ -211,7 +211,7 @@ def __hash__(self) -> int: return hash((self.__client, self.__name)) def __repr__(self): - return "Database(%r, %r)" % (self.__client, self.__name) + return f"Database({self.__client!r}, {self.__name!r})" def __getattr__(self, name: str) -> Collection[_DocumentType]: """Get a collection of this database by name. @@ -223,8 +223,8 @@ def __getattr__(self, name: str) -> Collection[_DocumentType]: """ if name.startswith("_"): raise AttributeError( - "Database has no attribute %r. To access the %s" - " collection, use database[%r]." % (name, name, name) + "Database has no attribute {!r}. To access the {}" + " collection, use database[{!r}].".format(name, name, name) ) return self.__getitem__(name) @@ -415,9 +415,9 @@ def create_collection( { // key pattern must be {_id: 1} key: , // required - unique: , // required, must be ‘true’ + unique: , // required, must be `true` name: , // optional, otherwise automatically generated - v: , // optional, must be ‘2’ if provided + v: , // optional, must be `2` if provided } - ``changeStreamPreAndPostImages`` (dict): a document with a boolean field ``enabled`` for enabling pre- and post-images. @@ -863,7 +863,6 @@ def _cmd(session, server, sock_info, read_preference): def _list_collections(self, sock_info, session, read_preference, **kwargs): """Internal listCollections helper.""" - coll = self.get_collection("$cmd", read_preference=read_preference) cmd = SON([("listCollections", 1), ("cursor", {})]) cmd.update(kwargs) @@ -1128,14 +1127,14 @@ def validate_collection( if "result" in result: info = result["result"] if info.find("exception") != -1 or info.find("corrupt") != -1: - raise CollectionInvalid("%s invalid: %s" % (name, info)) + raise CollectionInvalid(f"{name} invalid: {info}") # Sharded results elif "raw" in result: for _, res in result["raw"].items(): if "result" in res: info = res["result"] if info.find("exception") != -1 or info.find("corrupt") != -1: - raise CollectionInvalid("%s invalid: %s" % (name, info)) + raise CollectionInvalid(f"{name} invalid: {info}") elif not res.get("valid", False): valid = False break @@ -1144,7 +1143,7 @@ def validate_collection( valid = False if not valid: - raise CollectionInvalid("%s invalid: %r" % (name, result)) + raise CollectionInvalid(f"{name} invalid: {result!r}") return result @@ -1200,7 +1199,7 @@ def dereference( if dbref.database is not None and dbref.database != self.__name: raise ValueError( "trying to dereference a DBRef that points to " - "another database (%r not %r)" % (dbref.database, self.__name) + "another database ({!r} not {!r})".format(dbref.database, self.__name) ) return self[dbref.collection].find_one( {"_id": dbref.id}, session=session, comment=comment, **kwargs diff --git a/pymongo/driver_info.py b/pymongo/driver_info.py index 53fbfd3428..86ddfcfb3e 100644 --- a/pymongo/driver_info.py +++ b/pymongo/driver_info.py @@ -31,12 +31,12 @@ class DriverInfo(namedtuple("DriverInfo", ["name", "version", "platform"])): def __new__( cls, name: str, version: Optional[str] = None, platform: Optional[str] = None ) -> "DriverInfo": - self = super(DriverInfo, cls).__new__(cls, name, version, platform) + self = super().__new__(cls, name, version, platform) for key, value in self._asdict().items(): if value is not None and not isinstance(value, str): raise TypeError( - "Wrong type for DriverInfo %s option, value " - "must be an instance of str" % (key,) + "Wrong type for DriverInfo {} option, value " + "must be an instance of str".format(key) ) return self diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 4c46bf56ae..f2eb71ce71 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -177,6 +177,7 @@ def collection_info(self, database, filter): with self.client_ref()[database].list_collections(filter=RawBSONDocument(filter)) as cursor: for doc in cursor: return _dict_to_bson(doc, False, _DATA_KEY_OPTS) + return None def spawn(self): """Spawn mongocryptd. @@ -272,7 +273,7 @@ def close(self): self.mongocryptd_client = None -class RewrapManyDataKeyResult(object): +class RewrapManyDataKeyResult: """Result object returned by a :meth:`~ClientEncryption.rewrap_many_data_key` operation. .. versionadded:: 4.2 @@ -292,11 +293,12 @@ def bulk_write_result(self) -> Optional[BulkWriteResult]: return self._bulk_write_result -class _Encrypter(object): +class _Encrypter: """Encrypts and decrypts MongoDB commands. This class is used to support automatic encryption and decryption of - MongoDB commands.""" + MongoDB commands. + """ def __init__(self, client, opts): """Create a _Encrypter for a client. diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 0cb96d7dad..e87d96b31a 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -31,7 +31,7 @@ from pymongo.mongo_client import MongoClient -class AutoEncryptionOpts(object): +class AutoEncryptionOpts: """Options to configure automatic client-side field level encryption.""" def __init__( diff --git a/pymongo/errors.py b/pymongo/errors.py index 192eec99d9..36f97f4b5a 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -33,7 +33,7 @@ class PyMongoError(Exception): """Base class for all PyMongo exceptions.""" def __init__(self, message: str = "", error_labels: Optional[Iterable[str]] = None) -> None: - super(PyMongoError, self).__init__(message) + super().__init__(message) self._message = message self._error_labels = set(error_labels or []) @@ -105,7 +105,7 @@ def __init__( if errors is not None: if isinstance(errors, dict): error_labels = errors.get("errorLabels") - super(AutoReconnect, self).__init__(message, error_labels) + super().__init__(message, error_labels) self.errors = self.details = errors or [] @@ -125,7 +125,7 @@ def timeout(self) -> bool: def _format_detailed_error(message, details): if details is not None: - message = "%s, full error: %s" % (message, details) + message = f"{message}, full error: {details}" return message @@ -148,9 +148,7 @@ class NotPrimaryError(AutoReconnect): def __init__( self, message: str = "", errors: Optional[Union[Mapping[str, Any], List]] = None ) -> None: - super(NotPrimaryError, self).__init__( - _format_detailed_error(message, errors), errors=errors - ) + super().__init__(_format_detailed_error(message, errors), errors=errors) class ServerSelectionTimeoutError(AutoReconnect): @@ -191,9 +189,7 @@ def __init__( error_labels = None if details is not None: error_labels = details.get("errorLabels") - super(OperationFailure, self).__init__( - _format_detailed_error(error, details), error_labels=error_labels - ) + super().__init__(_format_detailed_error(error, details), error_labels=error_labels) self.__code = code self.__details = details self.__max_wire_version = max_wire_version @@ -293,7 +289,7 @@ class BulkWriteError(OperationFailure): details: Mapping[str, Any] def __init__(self, results: Mapping[str, Any]) -> None: - super(BulkWriteError, self).__init__("batch op errors occurred", 65, results) + super().__init__("batch op errors occurred", 65, results) def __reduce__(self) -> Tuple[Any, Any]: return self.__class__, (self.details,) @@ -331,8 +327,6 @@ class InvalidURI(ConfigurationError): class DocumentTooLarge(InvalidDocument): """Raised when an encoded document is too large for the connected server.""" - pass - class EncryptionError(PyMongoError): """Raised when encryption or decryption fails. @@ -344,7 +338,7 @@ class EncryptionError(PyMongoError): """ def __init__(self, cause: Exception) -> None: - super(EncryptionError, self).__init__(str(cause)) + super().__init__(str(cause)) self.__cause = cause @property @@ -369,7 +363,7 @@ class EncryptedCollectionError(EncryptionError): """ def __init__(self, cause: Exception, encrypted_fields: Mapping[str, Any]) -> None: - super(EncryptedCollectionError, self).__init__(cause) + super().__init__(cause) self.__encrypted_fields = encrypted_fields @property @@ -386,5 +380,3 @@ def encrypted_fields(self) -> Mapping[str, Any]: class _OperationCancelled(AutoReconnect): """Internal error raised when a socket operation is cancelled.""" - - pass diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 1a753c66f4..f4582854dc 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -74,7 +74,7 @@ def _gen_index_name(keys): """Generate an index name from the set of fields it is over.""" - return "_".join(["%s_%s" % item for item in keys]) + return "_".join(["{}_{}".format(*item) for item in keys]) def _index_list(key_or_list, direction=None): @@ -248,12 +248,10 @@ def _fields_list_to_dict(fields, option_name): if isinstance(fields, (abc.Sequence, abc.Set)): if not all(isinstance(field, str) for field in fields): - raise TypeError( - "%s must be a list of key names, each an instance of str" % (option_name,) - ) + raise TypeError(f"{option_name} must be a list of key names, each an instance of str") return dict.fromkeys(fields, 1) - raise TypeError("%s must be a mapping or list of key names" % (option_name,)) + raise TypeError(f"{option_name} must be a mapping or list of key names") def _handle_exception(): @@ -266,7 +264,7 @@ def _handle_exception(): einfo = sys.exc_info() try: traceback.print_exception(einfo[0], einfo[1], einfo[2], None, sys.stderr) - except IOError: + except OSError: pass finally: del einfo diff --git a/pymongo/message.py b/pymongo/message.py index 3510d210a5..34f6e6235d 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -115,7 +115,6 @@ def _convert_exception(exception): def _convert_write_result(operation, command, result): """Convert a legacy write result to write command format.""" - # Based on _merge_legacy from bulk.py affected = result.get("n", 0) res = {"ok": 1, "n": affected} @@ -240,7 +239,7 @@ def _gen_get_more_command(cursor_id, coll, batch_size, max_await_time_ms, commen return cmd -class _Query(object): +class _Query: """A query operation.""" __slots__ = ( @@ -310,7 +309,7 @@ def reset(self): self._as_command = None def namespace(self): - return "%s.%s" % (self.db, self.coll) + return f"{self.db}.{self.coll}" def use_command(self, sock_info): use_find_cmd = False @@ -421,7 +420,7 @@ def get_message(self, read_preference, sock_info, use_cmd=False): ) -class _GetMore(object): +class _GetMore: """A getmore operation.""" __slots__ = ( @@ -475,7 +474,7 @@ def reset(self): self._as_command = None def namespace(self): - return "%s.%s" % (self.db, self.coll) + return f"{self.db}.{self.coll}" def use_command(self, sock_info): use_cmd = False @@ -518,7 +517,6 @@ def as_command(self, sock_info, apply_timeout=False): def get_message(self, dummy0, sock_info, use_cmd=False): """Get a getmore message.""" - ns = self.namespace() ctx = sock_info.compression_context @@ -539,7 +537,7 @@ def get_message(self, dummy0, sock_info, use_cmd=False): class _RawBatchQuery(_Query): def use_command(self, sock_info): # Compatibility checks. - super(_RawBatchQuery, self).use_command(sock_info) + super().use_command(sock_info) if sock_info.max_wire_version >= 8: # MongoDB 4.2+ supports exhaust over OP_MSG return True @@ -551,7 +549,7 @@ def use_command(self, sock_info): class _RawBatchGetMore(_GetMore): def use_command(self, sock_info): # Compatibility checks. - super(_RawBatchGetMore, self).use_command(sock_info) + super().use_command(sock_info) if sock_info.max_wire_version >= 8: # MongoDB 4.2+ supports exhaust over OP_MSG return True @@ -578,7 +576,7 @@ def namespace(self): def __hash__(self): # Two _CursorAddress instances with different namespaces # must not hash the same. - return (self + (self.__namespace,)).__hash__() + return ((*self, self.__namespace)).__hash__() def __eq__(self, other): if isinstance(other, _CursorAddress): @@ -648,7 +646,7 @@ def _op_msg_no_header(flags, command, identifier, docs, opts): encoded_size = _pack_int(size) total_size += size max_doc_size = max(len(doc) for doc in encoded_docs) - data = [flags_type, encoded, type_one, encoded_size, cstring] + encoded_docs + data = [flags_type, encoded, type_one, encoded_size, cstring, *encoded_docs] else: data = [flags_type, encoded] return b"".join(data), total_size, max_doc_size @@ -795,7 +793,7 @@ def _get_more(collection_name, num_to_return, cursor_id, ctx=None): return _get_more_uncompressed(collection_name, num_to_return, cursor_id) -class _BulkWriteContext(object): +class _BulkWriteContext: """A wrapper around SocketInfo for use with write splitting functions.""" __slots__ = ( @@ -1033,7 +1031,7 @@ def _raise_document_too_large(operation: str, doc_size: int, max_size: int) -> N else: # There's nothing intelligent we can say # about size for update and delete - raise DocumentTooLarge("%r command document too large" % (operation,)) + raise DocumentTooLarge(f"{operation!r} command document too large") # OP_MSG ------------------------------------------------------------- @@ -1253,7 +1251,7 @@ def _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, return to_send, length -class _OpReply(object): +class _OpReply: """A MongoDB OP_REPLY response message.""" __slots__ = ("flags", "cursor_id", "number_returned", "documents") @@ -1363,7 +1361,7 @@ def unpack(cls, msg): return cls(flags, cursor_id, number_returned, documents) -class _OpMsg(object): +class _OpMsg: """A MongoDB OP_MSG response message.""" __slots__ = ("flags", "cursor_id", "number_returned", "payload_document") @@ -1427,12 +1425,12 @@ def unpack(cls, msg): flags, first_payload_type, first_payload_size = cls.UNPACK_FROM(msg) if flags != 0: if flags & cls.CHECKSUM_PRESENT: - raise ProtocolError("Unsupported OP_MSG flag checksumPresent: 0x%x" % (flags,)) + raise ProtocolError(f"Unsupported OP_MSG flag checksumPresent: 0x{flags:x}") if flags ^ cls.MORE_TO_COME: - raise ProtocolError("Unsupported OP_MSG flags: 0x%x" % (flags,)) + raise ProtocolError(f"Unsupported OP_MSG flags: 0x{flags:x}") if first_payload_type != 0: - raise ProtocolError("Unsupported OP_MSG payload type: 0x%x" % (first_payload_type,)) + raise ProtocolError(f"Unsupported OP_MSG payload type: 0x{first_payload_type:x}") if len(msg) != first_payload_size + 5: raise ProtocolError("Unsupported OP_MSG reply: >1 section") diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ca60affdf5..ccfaaa31c1 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -805,7 +805,7 @@ def __init__( self.__kill_cursors_queue: List = [] self._event_listeners = options.pool_options._event_listeners - super(MongoClient, self).__init__( + super().__init__( options.codec_options, options.read_preference, options.write_concern, @@ -1509,11 +1509,11 @@ def option_repr(option, value): if value is dict: return "document_class=dict" else: - return "document_class=%s.%s" % (value.__module__, value.__name__) + return f"document_class={value.__module__}.{value.__name__}" if option in common.TIMEOUT_OPTIONS and value is not None: - return "%s=%s" % (option, int(value * 1000)) + return f"{option}={int(value * 1000)}" - return "%s=%r" % (option, value) + return f"{option}={value!r}" # Host first... options = [ @@ -1536,7 +1536,7 @@ def option_repr(option, value): return ", ".join(options) def __repr__(self): - return "MongoClient(%s)" % (self._repr_helper(),) + return f"MongoClient({self._repr_helper()})" def __getattr__(self, name: str) -> database.Database[_DocumentType]: """Get a database by name. @@ -1549,8 +1549,8 @@ def __getattr__(self, name: str) -> database.Database[_DocumentType]: """ if name.startswith("_"): raise AttributeError( - "MongoClient has no attribute %r. To access the %s" - " database, use client[%r]." % (name, name, name) + "MongoClient has no attribute {!r}. To access the {}" + " database, use client[{!r}].".format(name, name, name) ) return self.__getitem__(name) @@ -1685,7 +1685,8 @@ def _process_kill_cursors(self): # This method is run periodically by a background thread. def _process_periodic_tasks(self): """Process any pending kill cursors requests and - maintain connection pool parameters.""" + maintain connection pool parameters. + """ try: self._process_kill_cursors() self._topology.update_pool() @@ -1742,7 +1743,7 @@ def _get_server_session(self): def _return_server_session(self, server_session, lock): """Internal: return a _ServerSession to the pool.""" if isinstance(server_session, _EmptyServerSession): - return + return None return self._topology.return_server_session(server_session, lock) def _ensure_session(self, session=None): @@ -2121,7 +2122,7 @@ def _add_retryable_write_error(exc, max_wire_version): exc._add_error_label("RetryableWriteError") -class _MongoClientErrorHandler(object): +class _MongoClientErrorHandler: """Handle errors raised when executing an operation.""" __slots__ = ( diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 9031d4b785..2fc0bf8bab 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -37,7 +37,7 @@ def _sanitize(error): error.__cause__ = None -class MonitorBase(object): +class MonitorBase: def __init__(self, topology, name, interval, min_interval): """Base class to do periodic work on a background thread. @@ -108,7 +108,7 @@ def __init__(self, server_description, topology, pool, topology_settings): The Topology is weakly referenced. The Pool must be exclusive to this Monitor. """ - super(Monitor, self).__init__( + super().__init__( topology, "pymongo_server_monitor_thread", topology_settings.heartbeat_frequency, @@ -290,7 +290,7 @@ def __init__(self, topology, topology_settings): The Topology is weakly referenced. """ - super(SrvMonitor, self).__init__( + super().__init__( topology, "pymongo_srv_polling_thread", common.MIN_SRV_RESCAN_INTERVAL, @@ -343,7 +343,7 @@ def __init__(self, topology, topology_settings, pool): The Topology is weakly referenced. """ - super(_RttMonitor, self).__init__( + super().__init__( topology, "pymongo_server_rtt_thread", topology_settings.heartbeat_frequency, diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 5b729652ad..391ca13540 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -211,7 +211,7 @@ def connection_checked_in(self, event): _LISTENERS = _Listeners([], [], [], [], []) -class _EventListener(object): +class _EventListener: """Abstract base class for all event listeners.""" @@ -486,14 +486,14 @@ def _to_micros(dur): def _validate_event_listeners(option, listeners): """Validate event listeners""" if not isinstance(listeners, abc.Sequence): - raise TypeError("%s must be a list or tuple" % (option,)) + raise TypeError(f"{option} must be a list or tuple") for listener in listeners: if not isinstance(listener, _EventListener): raise TypeError( - "Listeners for %s must be either a " + "Listeners for {} must be either a " "CommandListener, ServerHeartbeatListener, " "ServerListener, TopologyListener, or " - "ConnectionPoolListener." % (option,) + "ConnectionPoolListener.".format(option) ) return listeners @@ -508,10 +508,10 @@ def register(listener: _EventListener) -> None: """ if not isinstance(listener, _EventListener): raise TypeError( - "Listeners for %s must be either a " + "Listeners for {} must be either a " "CommandListener, ServerHeartbeatListener, " "ServerListener, TopologyListener, or " - "ConnectionPoolListener." % (listener,) + "ConnectionPoolListener.".format(listener) ) if isinstance(listener, CommandListener): _LISTENERS.command_listeners.append(listener) @@ -528,19 +528,17 @@ def register(listener: _EventListener) -> None: # Note - to avoid bugs from forgetting which if these is all lowercase and # which are camelCase, and at the same time avoid having to add a test for # every command, use all lowercase here and test against command_name.lower(). -_SENSITIVE_COMMANDS: set = set( - [ - "authenticate", - "saslstart", - "saslcontinue", - "getnonce", - "createuser", - "updateuser", - "copydbgetnonce", - "copydbsaslstart", - "copydb", - ] -) +_SENSITIVE_COMMANDS: set = { + "authenticate", + "saslstart", + "saslcontinue", + "getnonce", + "createuser", + "updateuser", + "copydbgetnonce", + "copydbsaslstart", + "copydb", +} # The "hello" command is also deemed sensitive when attempting speculative @@ -554,7 +552,7 @@ def _is_speculative_authenticate(command_name, doc): return False -class _CommandEvent(object): +class _CommandEvent: """Base class for command events.""" __slots__ = ("__cmd_name", "__rqst_id", "__conn_id", "__op_id", "__service_id") @@ -627,10 +625,10 @@ def __init__( service_id: Optional[ObjectId] = None, ) -> None: if not command: - raise ValueError("%r is not a valid command" % (command,)) + raise ValueError(f"{command!r} is not a valid command") # Command name must be first key. command_name = next(iter(command)) - super(CommandStartedEvent, self).__init__( + super().__init__( command_name, request_id, connection_id, operation_id, service_id=service_id ) cmd_name = command_name.lower() @@ -651,7 +649,7 @@ def database_name(self) -> str: return self.__db def __repr__(self): - return ("<%s %s db: %r, command: %r, operation_id: %s, service_id: %s>") % ( + return ("<{} {} db: {!r}, command: {!r}, operation_id: {}, service_id: {}>").format( self.__class__.__name__, self.connection_id, self.database_name, @@ -687,7 +685,7 @@ def __init__( operation_id: Optional[int], service_id: Optional[ObjectId] = None, ) -> None: - super(CommandSucceededEvent, self).__init__( + super().__init__( command_name, request_id, connection_id, operation_id, service_id=service_id ) self.__duration_micros = _to_micros(duration) @@ -708,7 +706,9 @@ def reply(self) -> _DocumentOut: return self.__reply def __repr__(self): - return ("<%s %s command: %r, operation_id: %s, duration_micros: %s, service_id: %s>") % ( + return ( + "<{} {} command: {!r}, operation_id: {}, duration_micros: {}, service_id: {}>" + ).format( self.__class__.__name__, self.connection_id, self.command_name, @@ -744,7 +744,7 @@ def __init__( operation_id: Optional[int], service_id: Optional[ObjectId] = None, ) -> None: - super(CommandFailedEvent, self).__init__( + super().__init__( command_name, request_id, connection_id, operation_id, service_id=service_id ) self.__duration_micros = _to_micros(duration) @@ -762,9 +762,9 @@ def failure(self) -> _DocumentOut: def __repr__(self): return ( - "<%s %s command: %r, operation_id: %s, duration_micros: %s, " - "failure: %r, service_id: %s>" - ) % ( + "<{} {} command: {!r}, operation_id: {}, duration_micros: {}, " + "failure: {!r}, service_id: {}>" + ).format( self.__class__.__name__, self.connection_id, self.command_name, @@ -775,7 +775,7 @@ def __repr__(self): ) -class _PoolEvent(object): +class _PoolEvent: """Base class for pool events.""" __slots__ = ("__address",) @@ -791,7 +791,7 @@ def address(self) -> _Address: return self.__address def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, self.__address) + return f"{self.__class__.__name__}({self.__address!r})" class PoolCreatedEvent(_PoolEvent): @@ -807,7 +807,7 @@ class PoolCreatedEvent(_PoolEvent): __slots__ = ("__options",) def __init__(self, address: _Address, options: Dict[str, Any]) -> None: - super(PoolCreatedEvent, self).__init__(address) + super().__init__(address) self.__options = options @property @@ -816,7 +816,7 @@ def options(self) -> Dict[str, Any]: return self.__options def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__options) + return f"{self.__class__.__name__}({self.address!r}, {self.__options!r})" class PoolReadyEvent(_PoolEvent): @@ -846,7 +846,7 @@ class PoolClearedEvent(_PoolEvent): __slots__ = ("__service_id",) def __init__(self, address: _Address, service_id: Optional[ObjectId] = None) -> None: - super(PoolClearedEvent, self).__init__(address) + super().__init__(address) self.__service_id = service_id @property @@ -860,7 +860,7 @@ def service_id(self) -> Optional[ObjectId]: return self.__service_id def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__service_id) + return f"{self.__class__.__name__}({self.address!r}, {self.__service_id!r})" class PoolClosedEvent(_PoolEvent): @@ -876,7 +876,7 @@ class PoolClosedEvent(_PoolEvent): __slots__ = () -class ConnectionClosedReason(object): +class ConnectionClosedReason: """An enum that defines values for `reason` on a :class:`ConnectionClosedEvent`. @@ -897,7 +897,7 @@ class ConnectionClosedReason(object): """The pool was closed, making the connection no longer valid.""" -class ConnectionCheckOutFailedReason(object): +class ConnectionCheckOutFailedReason: """An enum that defines values for `reason` on a :class:`ConnectionCheckOutFailedEvent`. @@ -916,7 +916,7 @@ class ConnectionCheckOutFailedReason(object): """ -class _ConnectionEvent(object): +class _ConnectionEvent: """Private base class for connection events.""" __slots__ = ("__address",) @@ -932,7 +932,7 @@ def address(self) -> _Address: return self.__address def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, self.__address) + return f"{self.__class__.__name__}({self.__address!r})" class _ConnectionIdEvent(_ConnectionEvent): @@ -950,7 +950,7 @@ def connection_id(self) -> int: return self.__connection_id def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__connection_id) + return f"{self.__class__.__name__}({self.address!r}, {self.__connection_id!r})" class ConnectionCreatedEvent(_ConnectionIdEvent): @@ -999,7 +999,7 @@ class ConnectionClosedEvent(_ConnectionIdEvent): __slots__ = ("__reason",) def __init__(self, address, connection_id, reason): - super(ConnectionClosedEvent, self).__init__(address, connection_id) + super().__init__(address, connection_id) self.__reason = reason @property @@ -1012,7 +1012,7 @@ def reason(self): return self.__reason def __repr__(self): - return "%s(%r, %r, %r)" % ( + return "{}({!r}, {!r}, {!r})".format( self.__class__.__name__, self.address, self.connection_id, @@ -1060,7 +1060,7 @@ def reason(self) -> str: return self.__reason def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__reason) + return f"{self.__class__.__name__}({self.address!r}, {self.__reason!r})" class ConnectionCheckedOutEvent(_ConnectionIdEvent): @@ -1091,7 +1091,7 @@ class ConnectionCheckedInEvent(_ConnectionIdEvent): __slots__ = () -class _ServerEvent(object): +class _ServerEvent: """Base class for server events.""" __slots__ = ("__server_address", "__topology_id") @@ -1111,7 +1111,7 @@ def topology_id(self) -> ObjectId: return self.__topology_id def __repr__(self): - return "<%s %s topology_id: %s>" % ( + return "<{} {} topology_id: {}>".format( self.__class__.__name__, self.server_address, self.topology_id, @@ -1130,26 +1130,28 @@ def __init__( self, previous_description: "ServerDescription", new_description: "ServerDescription", - *args: Any + *args: Any, ) -> None: - super(ServerDescriptionChangedEvent, self).__init__(*args) + super().__init__(*args) self.__previous_description = previous_description self.__new_description = new_description @property def previous_description(self) -> "ServerDescription": """The previous - :class:`~pymongo.server_description.ServerDescription`.""" + :class:`~pymongo.server_description.ServerDescription`. + """ return self.__previous_description @property def new_description(self) -> "ServerDescription": """The new - :class:`~pymongo.server_description.ServerDescription`.""" + :class:`~pymongo.server_description.ServerDescription`. + """ return self.__new_description def __repr__(self): - return "<%s %s changed from: %s, to: %s>" % ( + return "<{} {} changed from: {}, to: {}>".format( self.__class__.__name__, self.server_address, self.previous_description, @@ -1175,7 +1177,7 @@ class ServerClosedEvent(_ServerEvent): __slots__ = () -class TopologyEvent(object): +class TopologyEvent: """Base class for topology description events.""" __slots__ = "__topology_id" @@ -1189,7 +1191,7 @@ def topology_id(self) -> ObjectId: return self.__topology_id def __repr__(self): - return "<%s topology_id: %s>" % (self.__class__.__name__, self.topology_id) + return f"<{self.__class__.__name__} topology_id: {self.topology_id}>" class TopologyDescriptionChangedEvent(TopologyEvent): @@ -1204,26 +1206,28 @@ def __init__( self, previous_description: "TopologyDescription", new_description: "TopologyDescription", - *args: Any + *args: Any, ) -> None: - super(TopologyDescriptionChangedEvent, self).__init__(*args) + super().__init__(*args) self.__previous_description = previous_description self.__new_description = new_description @property def previous_description(self) -> "TopologyDescription": """The previous - :class:`~pymongo.topology_description.TopologyDescription`.""" + :class:`~pymongo.topology_description.TopologyDescription`. + """ return self.__previous_description @property def new_description(self) -> "TopologyDescription": """The new - :class:`~pymongo.topology_description.TopologyDescription`.""" + :class:`~pymongo.topology_description.TopologyDescription`. + """ return self.__new_description def __repr__(self): - return "<%s topology_id: %s changed from: %s, to: %s>" % ( + return "<{} topology_id: {} changed from: {}, to: {}>".format( self.__class__.__name__, self.topology_id, self.previous_description, @@ -1249,7 +1253,7 @@ class TopologyClosedEvent(TopologyEvent): __slots__ = () -class _ServerHeartbeatEvent(object): +class _ServerHeartbeatEvent: """Base class for server heartbeat events.""" __slots__ = "__connection_id" @@ -1260,11 +1264,12 @@ def __init__(self, connection_id: _Address) -> None: @property def connection_id(self) -> _Address: """The address (host, port) of the server this heartbeat was sent - to.""" + to. + """ return self.__connection_id def __repr__(self): - return "<%s %s>" % (self.__class__.__name__, self.connection_id) + return f"<{self.__class__.__name__} {self.connection_id}>" class ServerHeartbeatStartedEvent(_ServerHeartbeatEvent): @@ -1287,7 +1292,7 @@ class ServerHeartbeatSucceededEvent(_ServerHeartbeatEvent): def __init__( self, duration: float, reply: Hello, connection_id: _Address, awaited: bool = False ) -> None: - super(ServerHeartbeatSucceededEvent, self).__init__(connection_id) + super().__init__(connection_id) self.__duration = duration self.__reply = reply self.__awaited = awaited @@ -1313,7 +1318,7 @@ def awaited(self) -> bool: return self.__awaited def __repr__(self): - return "<%s %s duration: %s, awaited: %s, reply: %s>" % ( + return "<{} {} duration: {}, awaited: {}, reply: {}>".format( self.__class__.__name__, self.connection_id, self.duration, @@ -1334,7 +1339,7 @@ class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): def __init__( self, duration: float, reply: Exception, connection_id: _Address, awaited: bool = False ) -> None: - super(ServerHeartbeatFailedEvent, self).__init__(connection_id) + super().__init__(connection_id) self.__duration = duration self.__reply = reply self.__awaited = awaited @@ -1360,7 +1365,7 @@ def awaited(self) -> bool: return self.__awaited def __repr__(self): - return "<%s %s duration: %s, awaited: %s, reply: %r>" % ( + return "<{} {} duration: {}, awaited: {}, reply: {!r}>".format( self.__class__.__name__, self.connection_id, self.duration, @@ -1369,7 +1374,7 @@ def __repr__(self): ) -class _EventListeners(object): +class _EventListeners: """Configure event listeners for a client instance. Any event listeners registered globally are included by default. diff --git a/pymongo/network.py b/pymongo/network.py index a5c5459e14..d105c8b8b5 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -219,15 +219,15 @@ def receive_message(sock_info, request_id, max_message_size=MAX_MESSAGE_SIZE): # No request_id for exhaust cursor "getMore". if request_id is not None: if request_id != response_to: - raise ProtocolError("Got response id %r but expected %r" % (response_to, request_id)) + raise ProtocolError(f"Got response id {response_to!r} but expected {request_id!r}") if length <= 16: raise ProtocolError( - "Message length (%r) not longer than standard message header size (16)" % (length,) + f"Message length ({length!r}) not longer than standard message header size (16)" ) if length > max_message_size: raise ProtocolError( - "Message length (%r) is larger than server max " - "message size (%r)" % (length, max_message_size) + "Message length ({!r}) is larger than server max " + "message size ({!r})".format(length, max_message_size) ) if op_code == 2012: op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER( @@ -240,7 +240,7 @@ def receive_message(sock_info, request_id, max_message_size=MAX_MESSAGE_SIZE): try: unpack_reply = _UNPACK_REPLY[op_code] except KeyError: - raise ProtocolError("Got opcode %r but expected %r" % (op_code, _UNPACK_REPLY.keys())) + raise ProtocolError(f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}") return unpack_reply(data) @@ -281,7 +281,7 @@ def wait_for_read(sock_info, deadline): # Errors raised by sockets (and TLS sockets) when in non-blocking mode. -BLOCKING_IO_ERRORS = (BlockingIOError,) + ssl_support.BLOCKING_IO_ERRORS +BLOCKING_IO_ERRORS = (BlockingIOError, *ssl_support.BLOCKING_IO_ERRORS) def _receive_data_on_socket(sock_info, length, deadline): @@ -299,7 +299,7 @@ def _receive_data_on_socket(sock_info, length, deadline): chunk_length = sock_info.sock.recv_into(mv[bytes_read:]) except BLOCKING_IO_ERRORS: raise socket.timeout("timed out") - except (IOError, OSError) as exc: # noqa: B014 + except OSError as exc: # noqa: B014 if _errno_from_exception(exc) == errno.EINTR: continue raise diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py index 389ee09ce7..0c50902167 100644 --- a/pymongo/ocsp_cache.py +++ b/pymongo/ocsp_cache.py @@ -20,7 +20,7 @@ from pymongo.lock import _create_lock -class _OCSPCache(object): +class _OCSPCache: """A cache for OCSP responses.""" CACHE_KEY_TYPE = namedtuple( # type: ignore diff --git a/pymongo/operations.py b/pymongo/operations.py index ad119f2ecc..3ff4ed57a3 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -48,7 +48,7 @@ def _add_to_bulk(self, bulkobj): bulkobj.add_insert(self._doc) def __repr__(self): - return "InsertOne(%r)" % (self._doc,) + return f"InsertOne({self._doc!r})" def __eq__(self, other: Any) -> bool: if type(other) == type(self): @@ -59,7 +59,7 @@ def __ne__(self, other: Any) -> bool: return not self == other -class DeleteOne(object): +class DeleteOne: """Represents a delete_one operation.""" __slots__ = ("_filter", "_collation", "_hint") @@ -104,7 +104,7 @@ def _add_to_bulk(self, bulkobj): bulkobj.add_delete(self._filter, 1, collation=self._collation, hint=self._hint) def __repr__(self): - return "DeleteOne(%r, %r)" % (self._filter, self._collation) + return f"DeleteOne({self._filter!r}, {self._collation!r})" def __eq__(self, other: Any) -> bool: if type(other) == type(self): @@ -115,7 +115,7 @@ def __ne__(self, other: Any) -> bool: return not self == other -class DeleteMany(object): +class DeleteMany: """Represents a delete_many operation.""" __slots__ = ("_filter", "_collation", "_hint") @@ -160,7 +160,7 @@ def _add_to_bulk(self, bulkobj): bulkobj.add_delete(self._filter, 0, collation=self._collation, hint=self._hint) def __repr__(self): - return "DeleteMany(%r, %r)" % (self._filter, self._collation) + return f"DeleteMany({self._filter!r}, {self._collation!r})" def __eq__(self, other: Any) -> bool: if type(other) == type(self): @@ -242,7 +242,7 @@ def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self): - return "%s(%r, %r, %r, %r, %r)" % ( + return "{}({!r}, {!r}, {!r}, {!r}, {!r})".format( self.__class__.__name__, self._filter, self._doc, @@ -252,7 +252,7 @@ def __repr__(self): ) -class _UpdateOp(object): +class _UpdateOp: """Private base class for update operations.""" __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters", "_hint") @@ -298,7 +298,7 @@ def __ne__(self, other): return not self == other def __repr__(self): - return "%s(%r, %r, %r, %r, %r, %r)" % ( + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( self.__class__.__name__, self._filter, self._doc, @@ -352,7 +352,7 @@ def __init__( .. versionchanged:: 3.5 Added the `collation` option. """ - super(UpdateOne, self).__init__(filter, update, upsert, collation, array_filters, hint) + super().__init__(filter, update, upsert, collation, array_filters, hint) def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" @@ -410,7 +410,7 @@ def __init__( .. versionchanged:: 3.5 Added the `collation` option. """ - super(UpdateMany, self).__init__(filter, update, upsert, collation, array_filters, hint) + super().__init__(filter, update, upsert, collation, array_filters, hint) def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" @@ -425,7 +425,7 @@ def _add_to_bulk(self, bulkobj): ) -class IndexModel(object): +class IndexModel: """Represents an index to create.""" __slots__ = ("__document",) diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index 95e7830674..24090e0160 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -22,7 +22,7 @@ from pymongo.lock import _create_lock -class PeriodicExecutor(object): +class PeriodicExecutor: def __init__(self, interval, min_interval, target, name=None): """ "Run a target function periodically on a background thread. @@ -51,7 +51,7 @@ def __init__(self, interval, min_interval, target, name=None): self._lock = _create_lock() def __repr__(self): - return "<%s(name=%s) object at 0x%x>" % (self.__class__.__name__, self._name, id(self)) + return f"<{self.__class__.__name__}(name={self._name}) object at 0x{id(self):x}>" def open(self) -> None: """Start. Multiple calls have no effect. diff --git a/pymongo/pool.py b/pymongo/pool.py index 6ba1554231..5bae8ce878 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -81,7 +81,6 @@ def _set_non_inheritable_non_atomic(fd): # everything we need from fcntl, etc. def _set_non_inheritable_non_atomic(fd): """Dummy function for platforms that don't provide fcntl.""" - pass _MAX_TCP_KEEPIDLE = 120 @@ -134,7 +133,7 @@ def _set_tcp_option(sock, tcp_option, max_value): default = sock.getsockopt(socket.IPPROTO_TCP, sockopt) if default > max_value: sock.setsockopt(socket.IPPROTO_TCP, sockopt, max_value) - except socket.error: + except OSError: pass def _set_keepalive_times(sock): @@ -351,7 +350,7 @@ def _raise_connection_failure( if port is not None: msg = "%s:%d: %s" % (host, port, error) else: - msg = "%s: %s" % (host, error) + msg = f"{host}: {error}" if msg_prefix: msg = msg_prefix + msg if isinstance(error, socket.timeout): @@ -371,7 +370,7 @@ def _cond_wait(condition, deadline): return condition.wait(timeout) -class PoolOptions(object): +class PoolOptions: """Read only connection pool options for a MongoClient. Should not be instantiated directly by application developers. Access @@ -456,17 +455,17 @@ def __init__( # } if driver: if driver.name: - self.__metadata["driver"]["name"] = "%s|%s" % ( + self.__metadata["driver"]["name"] = "{}|{}".format( _METADATA["driver"]["name"], driver.name, ) if driver.version: - self.__metadata["driver"]["version"] = "%s|%s" % ( + self.__metadata["driver"]["version"] = "{}|{}".format( _METADATA["driver"]["version"], driver.version, ) if driver.platform: - self.__metadata["platform"] = "%s|%s" % (_METADATA["platform"], driver.platform) + self.__metadata["platform"] = "{}|{}".format(_METADATA["platform"], driver.platform) env = _metadata_env() if env: @@ -601,7 +600,7 @@ def load_balanced(self): return self.__load_balanced -class _CancellationContext(object): +class _CancellationContext: def __init__(self): self._cancelled = False @@ -615,7 +614,7 @@ def cancelled(self): return self._cancelled -class SocketInfo(object): +class SocketInfo: """Store a socket with some metadata. :Parameters: @@ -1080,7 +1079,7 @@ def __hash__(self): return hash(self.sock) def __repr__(self): - return "SocketInfo(%s)%s at %s" % ( + return "SocketInfo({}){} at {}".format( repr(self.sock), self.closed and " CLOSED" or "", id(self), @@ -1106,7 +1105,7 @@ def _create_connection(address, options): try: sock.connect(host) return sock - except socket.error: + except OSError: sock.close() raise @@ -1125,7 +1124,7 @@ def _create_connection(address, options): # all file descriptors are created non-inheritable. See PEP 446. try: sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto) - except socket.error: + except OSError: # Can SOCK_CLOEXEC be defined even if the kernel doesn't support # it? sock = socket.socket(af, socktype, proto) @@ -1144,7 +1143,7 @@ def _create_connection(address, options): _set_keepalive_times(sock) sock.connect(sa) return sock - except socket.error as e: + except OSError as e: err = e sock.close() @@ -1155,7 +1154,7 @@ def _create_connection(address, options): # host with an OS/kernel or Python interpreter that doesn't # support IPv6. The test case is Jython2.5.1 which doesn't # support IPv6 at all. - raise socket.error("getaddrinfo failed") + raise OSError("getaddrinfo failed") def _configured_socket(address, options): @@ -1182,7 +1181,7 @@ def _configured_socket(address, options): # Raise _CertificateError directly like we do after match_hostname # below. raise - except (IOError, OSError, SSLError) as exc: # noqa: B014 + except (OSError, SSLError) as exc: # noqa: B014 sock.close() # We raise AutoReconnect for transient and permanent SSL handshake # failures alike. Permanent handshake failures, like protocol @@ -1208,10 +1207,8 @@ class _PoolClosedError(PyMongoError): closed pool. """ - pass - -class _PoolGeneration(object): +class _PoolGeneration: def __init__(self): # Maps service_id to generation. self._generations = collections.defaultdict(int) @@ -1242,7 +1239,7 @@ def stale(self, gen, service_id): return gen != self.get(service_id) -class PoolState(object): +class PoolState: PAUSED = 1 READY = 2 CLOSED = 3 @@ -1753,10 +1750,9 @@ def _raise_wait_queue_timeout(self) -> NoReturn: other_ops = self.active_sockets - self.ncursors - self.ntxns raise WaitQueueTimeoutError( "Timeout waiting for connection from the connection pool. " - "maxPoolSize: %s, connections in use by cursors: %s, " - "connections in use by transactions: %s, connections in use " - "by other operations: %s, timeout: %s" - % ( + "maxPoolSize: {}, connections in use by cursors: {}, " + "connections in use by transactions: {}, connections in use " + "by other operations: {}, timeout: {}".format( self.opts.max_pool_size, self.ncursors, self.ntxns, @@ -1766,7 +1762,7 @@ def _raise_wait_queue_timeout(self) -> NoReturn: ) raise WaitQueueTimeoutError( "Timed out while checking out a connection from connection pool. " - "maxPoolSize: %s, timeout: %s" % (self.opts.max_pool_size, timeout) + "maxPoolSize: {}, timeout: {}".format(self.opts.max_pool_size, timeout) ) def __del__(self): diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 2d9c904bb3..bfc52df671 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -67,7 +67,7 @@ _stdlibssl.CERT_REQUIRED: _SSL.VERIFY_PEER | _SSL.VERIFY_FAIL_IF_NO_PEER_CERT, } -_REVERSE_VERIFY_MAP = dict((value, key) for key, value in _VERIFY_MAP.items()) +_REVERSE_VERIFY_MAP = {value: key for key, value in _VERIFY_MAP.items()} # For SNI support. According to RFC6066, section 3, IPv4 and IPv6 literals are @@ -97,7 +97,7 @@ class _sslConn(_SSL.Connection): def __init__(self, ctx, sock, suppress_ragged_eofs): self.socket_checker = _SocketChecker() self.suppress_ragged_eofs = suppress_ragged_eofs - super(_sslConn, self).__init__(ctx, sock) + super().__init__(ctx, sock) def _call(self, call, *args, **kwargs): timeout = self.gettimeout() @@ -122,11 +122,11 @@ def _call(self, call, *args, **kwargs): continue def do_handshake(self, *args, **kwargs): - return self._call(super(_sslConn, self).do_handshake, *args, **kwargs) + return self._call(super().do_handshake, *args, **kwargs) def recv(self, *args, **kwargs): try: - return self._call(super(_sslConn, self).recv, *args, **kwargs) + return self._call(super().recv, *args, **kwargs) except _SSL.SysCallError as exc: # Suppress ragged EOFs to match the stdlib. if self.suppress_ragged_eofs and _ragged_eof(exc): @@ -135,7 +135,7 @@ def recv(self, *args, **kwargs): def recv_into(self, *args, **kwargs): try: - return self._call(super(_sslConn, self).recv_into, *args, **kwargs) + return self._call(super().recv_into, *args, **kwargs) except _SSL.SysCallError as exc: # Suppress ragged EOFs to match the stdlib. if self.suppress_ragged_eofs and _ragged_eof(exc): @@ -148,11 +148,11 @@ def sendall(self, buf, flags=0): total_sent = 0 while total_sent < total_length: try: - sent = self._call(super(_sslConn, self).send, view[total_sent:], flags) + sent = self._call(super().send, view[total_sent:], flags) # XXX: It's not clear if this can actually happen. PyOpenSSL # doesn't appear to have any interrupt handling, nor any interrupt # errors for OpenSSL connections. - except (IOError, OSError) as exc: # noqa: B014 + except OSError as exc: # noqa: B014 if _errno_from_exception(exc) == _EINTR: continue raise @@ -163,7 +163,7 @@ def sendall(self, buf, flags=0): total_sent += sent -class _CallbackData(object): +class _CallbackData: """Data class which is passed to the OCSP callback.""" def __init__(self): @@ -172,7 +172,7 @@ def __init__(self): self.ocsp_response_cache = _OCSPCache() -class SSLContext(object): +class SSLContext: """A CPython compatible SSLContext implementation wrapping PyOpenSSL's context. """ @@ -328,7 +328,8 @@ def load_default_certs(self): def set_default_verify_paths(self): """Specify that the platform provided CA certificates are to be used - for verification purposes.""" + for verification purposes. + """ # Note: See PyOpenSSL's docs for limitations, which are similar # but not that same as CPython's. self._ctx.set_default_verify_paths() diff --git a/pymongo/read_concern.py b/pymongo/read_concern.py index dfb3930ab0..c673c44780 100644 --- a/pymongo/read_concern.py +++ b/pymongo/read_concern.py @@ -17,7 +17,7 @@ from typing import Any, Dict, Optional -class ReadConcern(object): +class ReadConcern: """ReadConcern :Parameters: @@ -45,7 +45,8 @@ def level(self) -> Optional[str]: @property def ok_for_legacy(self) -> bool: """Return ``True`` if this read concern is compatible with - old wire protocol versions.""" + old wire protocol versions. + """ return self.level is None or self.level == "local" @property diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 46f029ed31..f3aa003a1c 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -46,18 +46,18 @@ def _validate_tag_sets(tag_sets): return tag_sets if not isinstance(tag_sets, (list, tuple)): - raise TypeError(("Tag sets %r invalid, must be a sequence") % (tag_sets,)) + raise TypeError(f"Tag sets {tag_sets!r} invalid, must be a sequence") if len(tag_sets) == 0: raise ValueError( - ("Tag sets %r invalid, must be None or contain at least one set of tags") % (tag_sets,) + f"Tag sets {tag_sets!r} invalid, must be None or contain at least one set of tags" ) for tags in tag_sets: if not isinstance(tags, abc.Mapping): raise TypeError( - "Tag set %r invalid, must be an instance of dict, " + "Tag set {!r} invalid, must be an instance of dict, " "bson.son.SON or other type that inherits from " - "collection.Mapping" % (tags,) + "collection.Mapping".format(tags) ) return list(tag_sets) @@ -88,7 +88,7 @@ def _validate_hedge(hedge): return None if not isinstance(hedge, dict): - raise TypeError("hedge must be a dictionary, not %r" % (hedge,)) + raise TypeError(f"hedge must be a dictionary, not {hedge!r}") return hedge @@ -97,7 +97,7 @@ def _validate_hedge(hedge): _TagSets = Sequence[Mapping[str, Any]] -class _ServerMode(object): +class _ServerMode: """Base class for all read preferences.""" __slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness", "__hedge") @@ -168,7 +168,8 @@ def tag_sets(self) -> _TagSets: def max_staleness(self) -> int: """The maximum estimated length of time (in seconds) a replica set secondary can fall behind the primary in replication before it will - no longer be selected for operations, or -1 for no maximum.""" + no longer be selected for operations, or -1 for no maximum. + """ return self.__max_staleness @property @@ -209,7 +210,7 @@ def min_wire_version(self) -> int: return 0 if self.__max_staleness == -1 else 5 def __repr__(self): - return "%s(tag_sets=%r, max_staleness=%r, hedge=%r)" % ( + return "{}(tag_sets={!r}, max_staleness={!r}, hedge={!r})".format( self.name, self.__tag_sets, self.__max_staleness, @@ -263,7 +264,7 @@ class Primary(_ServerMode): __slots__ = () def __init__(self) -> None: - super(Primary, self).__init__(_PRIMARY) + super().__init__(_PRIMARY) def __call__(self, selection: Any) -> Any: """Apply this read preference to a Selection.""" @@ -314,7 +315,7 @@ def __init__( max_staleness: int = -1, hedge: Optional[_Hedge] = None, ) -> None: - super(PrimaryPreferred, self).__init__(_PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) + super().__init__(_PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" @@ -357,7 +358,7 @@ def __init__( max_staleness: int = -1, hedge: Optional[_Hedge] = None, ) -> None: - super(Secondary, self).__init__(_SECONDARY, tag_sets, max_staleness, hedge) + super().__init__(_SECONDARY, tag_sets, max_staleness, hedge) def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" @@ -401,9 +402,7 @@ def __init__( max_staleness: int = -1, hedge: Optional[_Hedge] = None, ) -> None: - super(SecondaryPreferred, self).__init__( - _SECONDARY_PREFERRED, tag_sets, max_staleness, hedge - ) + super().__init__(_SECONDARY_PREFERRED, tag_sets, max_staleness, hedge) def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" @@ -448,7 +447,7 @@ def __init__( max_staleness: int = -1, hedge: Optional[_Hedge] = None, ) -> None: - super(Nearest, self).__init__(_NEAREST, tag_sets, max_staleness, hedge) + super().__init__(_NEAREST, tag_sets, max_staleness, hedge) def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" @@ -490,7 +489,7 @@ def __call__(self, selection): return self.effective_pref(selection) def __repr__(self): - return "_AggWritePref(pref=%r)" % (self.pref,) + return f"_AggWritePref(pref={self.pref!r})" # Proxy other calls to the effective_pref so that _AggWritePref can be # used in place of an actual read preference. @@ -524,7 +523,7 @@ def make_read_preference( ) -class ReadPreference(object): +class ReadPreference: """An enum that defines some commonly used read preference modes. Apps can also create a custom read preference, for example:: @@ -591,7 +590,7 @@ def read_pref_mode_from_name(name: str) -> int: return _MONGOS_MODES.index(name) -class MovingAverage(object): +class MovingAverage: """Tracks an exponentially-weighted moving average.""" average: Optional[float] diff --git a/pymongo/response.py b/pymongo/response.py index 1369eac4e0..fc01b0f1bf 100644 --- a/pymongo/response.py +++ b/pymongo/response.py @@ -15,7 +15,7 @@ """Represent a response from the server.""" -class Response(object): +class Response: __slots__ = ("_data", "_address", "_request_id", "_duration", "_from_command", "_docs") def __init__(self, data, address, request_id, duration, from_command, docs): @@ -86,9 +86,7 @@ def __init__( - `more_to_come`: Bool indicating whether cursor is ready to be exhausted. """ - super(PinnedResponse, self).__init__( - data, address, request_id, duration, from_command, docs - ) + super().__init__(data, address, request_id, duration, from_command, docs) self._socket_info = socket_info self._more_to_come = more_to_come @@ -105,5 +103,6 @@ def socket_info(self): @property def more_to_come(self): """If true, server is ready to send batches on the socket until the - result set is exhausted or there is an error.""" + result set is exhausted or there is an error. + """ return self._more_to_come diff --git a/pymongo/results.py b/pymongo/results.py index b072979499..3bd9e82069 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -18,7 +18,7 @@ from pymongo.errors import InvalidOperation -class _WriteResult(object): +class _WriteResult: """Base class for write result classes.""" __slots__ = ("__acknowledged",) @@ -30,10 +30,10 @@ def _raise_if_unacknowledged(self, property_name): """Raise an exception on property access if unacknowledged.""" if not self.__acknowledged: raise InvalidOperation( - "A value for %s is not available when " + "A value for {} is not available when " "the write is unacknowledged. Check the " "acknowledged attribute to avoid this " - "error." % (property_name,) + "error.".format(property_name) ) @property @@ -63,7 +63,7 @@ class InsertOneResult(_WriteResult): def __init__(self, inserted_id: Any, acknowledged: bool) -> None: self.__inserted_id = inserted_id - super(InsertOneResult, self).__init__(acknowledged) + super().__init__(acknowledged) @property def inserted_id(self) -> Any: @@ -78,7 +78,7 @@ class InsertManyResult(_WriteResult): def __init__(self, inserted_ids: List[Any], acknowledged: bool) -> None: self.__inserted_ids = inserted_ids - super(InsertManyResult, self).__init__(acknowledged) + super().__init__(acknowledged) @property def inserted_ids(self) -> List: @@ -102,7 +102,7 @@ class UpdateResult(_WriteResult): def __init__(self, raw_result: Dict[str, Any], acknowledged: bool) -> None: self.__raw_result = raw_result - super(UpdateResult, self).__init__(acknowledged) + super().__init__(acknowledged) @property def raw_result(self) -> Dict[str, Any]: @@ -134,13 +134,14 @@ def upserted_id(self) -> Any: class DeleteResult(_WriteResult): """The return type for :meth:`~pymongo.collection.Collection.delete_one` - and :meth:`~pymongo.collection.Collection.delete_many`""" + and :meth:`~pymongo.collection.Collection.delete_many` + """ __slots__ = ("__raw_result",) def __init__(self, raw_result: Dict[str, Any], acknowledged: bool) -> None: self.__raw_result = raw_result - super(DeleteResult, self).__init__(acknowledged) + super().__init__(acknowledged) @property def raw_result(self) -> Dict[str, Any]: @@ -169,7 +170,7 @@ def __init__(self, bulk_api_result: Dict[str, Any], acknowledged: bool) -> None: :exc:`~pymongo.errors.InvalidOperation`. """ self.__bulk_api_result = bulk_api_result - super(BulkWriteResult, self).__init__(acknowledged) + super().__init__(acknowledged) @property def bulk_api_result(self) -> Dict[str, Any]: @@ -211,7 +212,5 @@ def upserted_ids(self) -> Optional[Dict[int, Any]]: """A map of operation index to the _id of the upserted document.""" self._raise_if_unacknowledged("upserted_ids") if self.__bulk_api_result: - return dict( - (upsert["index"], upsert["_id"]) for upsert in self.bulk_api_result["upserted"] - ) + return {upsert["index"]: upsert["_id"] for upsert in self.bulk_api_result["upserted"]} return None diff --git a/pymongo/saslprep.py b/pymongo/saslprep.py index b96d6fcb56..34c0182a53 100644 --- a/pymongo/saslprep.py +++ b/pymongo/saslprep.py @@ -71,7 +71,7 @@ def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) return data if prohibit_unassigned_code_points: - prohibited = _PROHIBITED + (stringprep.in_table_a1,) + prohibited = (*_PROHIBITED, stringprep.in_table_a1) else: prohibited = _PROHIBITED @@ -98,12 +98,12 @@ def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) raise ValueError("SASLprep: failed bidirectional check") # RFC3454, Section 6, #2. If a string contains any RandALCat # character, it MUST NOT contain any LCat character. - prohibited = prohibited + (stringprep.in_table_d2,) + prohibited = (*prohibited, stringprep.in_table_d2) else: # RFC3454, Section 6, #3. Following the logic of #3, if # the first character is not a RandALCat, no other character # can be either. - prohibited = prohibited + (in_table_d1,) + prohibited = (*prohibited, in_table_d1) # RFC3454 section 2, step 3 and 4 - Prohibit and check bidi for char in data: diff --git a/pymongo/server.py b/pymongo/server.py index 16c905abb7..2eb91c5b5d 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -25,7 +25,7 @@ _CURSOR_DOC_FIELDS = {"cursor": {"firstBatch": 1, "nextBatch": 1}} -class Server(object): +class Server: def __init__( self, server_description, pool, monitor, topology_id=None, listeners=None, events=None ): @@ -245,4 +245,4 @@ def _split_message(self, message): return request_id, data, 0 def __repr__(self): - return "<%s %r>" % (self.__class__.__name__, self._description) + return f"<{self.__class__.__name__} {self._description!r}>" diff --git a/pymongo/server_api.py b/pymongo/server_api.py index e92d6e6179..2393615032 100644 --- a/pymongo/server_api.py +++ b/pymongo/server_api.py @@ -95,7 +95,7 @@ class ServerApiVersion: """Server API version "1".""" -class ServerApi(object): +class ServerApi: """MongoDB Stable API.""" def __init__(self, version, strict=None, deprecation_errors=None): @@ -113,16 +113,16 @@ def __init__(self, version, strict=None, deprecation_errors=None): .. versionadded:: 3.12 """ if version != ServerApiVersion.V1: - raise ValueError("Unknown ServerApi version: %s" % (version,)) + raise ValueError(f"Unknown ServerApi version: {version}") if strict is not None and not isinstance(strict, bool): raise TypeError( "Wrong type for ServerApi strict, value must be an instance " - "of bool, not %s" % (type(strict),) + "of bool, not {}".format(type(strict)) ) if deprecation_errors is not None and not isinstance(deprecation_errors, bool): raise TypeError( "Wrong type for ServerApi deprecation_errors, value must be " - "an instance of bool, not %s" % (type(deprecation_errors),) + "an instance of bool, not {}".format(type(deprecation_errors)) ) self._version = version self._strict = strict diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 46517ee95e..4bca3390ae 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -25,7 +25,7 @@ from pymongo.typings import _Address -class ServerDescription(object): +class ServerDescription: """Immutable representation of one server. :Parameters: @@ -287,8 +287,8 @@ def __ne__(self, other: Any) -> bool: def __repr__(self): errmsg = "" if self.error: - errmsg = ", error=%r" % (self.error,) - return "<%s %s server_type: %s, rtt: %s%s>" % ( + errmsg = f", error={self.error!r}" + return "<{} {} server_type: {}, rtt: {}{}>".format( self.__class__.__name__, self.address, self.server_type_name, diff --git a/pymongo/server_selectors.py b/pymongo/server_selectors.py index 313566cb83..aa9d26b5fb 100644 --- a/pymongo/server_selectors.py +++ b/pymongo/server_selectors.py @@ -17,7 +17,7 @@ from pymongo.server_type import SERVER_TYPE -class Selection(object): +class Selection: """Input or output of a server selector function.""" @classmethod @@ -51,6 +51,7 @@ def secondary_with_max_last_write_date(self): secondaries = secondary_server_selector(self) if secondaries.server_descriptions: return max(secondaries.server_descriptions, key=lambda sd: sd.last_write_date) + return None @property def primary_selection(self): diff --git a/pymongo/settings.py b/pymongo/settings.py index 2bd2527cdf..5d6ddefd36 100644 --- a/pymongo/settings.py +++ b/pymongo/settings.py @@ -26,7 +26,7 @@ from pymongo.topology_description import TOPOLOGY_TYPE -class TopologySettings(object): +class TopologySettings: def __init__( self, seeds=None, @@ -156,4 +156,4 @@ def get_topology_type(self): def get_server_descriptions(self): """Initial dict of (address, ServerDescription) for all seeds.""" - return dict([(address, ServerDescription(address)) for address in self.seeds]) + return {address: ServerDescription(address) for address in self.seeds} diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py index 420953db2e..a278898952 100644 --- a/pymongo/socket_checker.py +++ b/pymongo/socket_checker.py @@ -33,7 +33,7 @@ def _errno_from_exception(exc): return None -class SocketChecker(object): +class SocketChecker: def __init__(self) -> None: self._poller: Optional[select.poll] if _HAVE_POLL: @@ -78,7 +78,7 @@ def select( # ready: subsets of the first three arguments. Return # True if any of the lists are not empty. return any(res) - except (_SelectError, IOError) as exc: # type: ignore + except (_SelectError, OSError) as exc: # type: ignore if _errno_from_exception(exc) in (errno.EINTR, errno.EAGAIN): continue raise diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index fe2dd49aa0..583de818b0 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -51,7 +51,7 @@ def _resolve(*args, **kwargs): ) -class _SrvResolver(object): +class _SrvResolver: def __init__(self, fqdn, connect_timeout, srv_service_name, srv_max_hosts=0): self.__fqdn = fqdn self.__srv = srv_service_name @@ -110,9 +110,9 @@ def _get_srv_response_and_hosts(self, encapsulate_errors): try: nlist = node[0].split(".")[1:][-self.__slen :] except Exception: - raise ConfigurationError("Invalid SRV host: %s" % (node[0],)) + raise ConfigurationError(f"Invalid SRV host: {node[0]}") if self.__plist != nlist: - raise ConfigurationError("Invalid SRV host: %s" % (node[0],)) + raise ConfigurationError(f"Invalid SRV host: {node[0]}") if self.__srv_max_hosts: nodes = random.sample(nodes, min(self.__srv_max_hosts, len(nodes))) return results, nodes diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 13c5315eee..3af535ee4b 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -71,7 +71,7 @@ def get_ssl_context( try: ctx.load_cert_chain(certfile, None, passphrase) except _ssl.SSLError as exc: - raise ConfigurationError("Private key doesn't match certificate: %s" % (exc,)) + raise ConfigurationError(f"Private key doesn't match certificate: {exc}") if crlfile is not None: if _ssl.IS_PYOPENSSL: raise ConfigurationError("tlsCRLFile cannot be used with PyOpenSSL") diff --git a/pymongo/topology.py b/pymongo/topology.py index 904f6b1836..9759b39f9f 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -75,7 +75,7 @@ def process_events_queue(queue_ref): return True # Continue PeriodicExecutor. -class Topology(object): +class Topology: """Monitor a topology of one or more servers.""" def __init__(self, topology_settings): @@ -236,8 +236,7 @@ def _select_servers_loop(self, selector, timeout, address): # No suitable servers. if timeout == 0 or now > end_time: raise ServerSelectionTimeoutError( - "%s, Timeout: %ss, Topology Description: %r" - % (self._error_message(selector), timeout, self.description) + f"{self._error_message(selector)}, Timeout: {timeout}s, Topology Description: {self.description!r}" ) self._ensure_opened() @@ -431,7 +430,7 @@ def _get_replica_set_members(self, selector): ): return set() - return set([sd.address for sd in selector(self._new_selection())]) + return {sd.address for sd in selector(self._new_selection())} def get_secondaries(self): """Return set of secondary addresses.""" @@ -499,7 +498,8 @@ def update_pool(self): def close(self): """Clear pools and terminate monitors. Topology does not reopen on demand. Any further operations will raise - :exc:`~.errors.InvalidOperation`.""" + :exc:`~.errors.InvalidOperation`. + """ with self._lock: for server in self._servers.values(): server.close() @@ -807,14 +807,14 @@ def _error_message(self, selector): else: return "No %s available for writes" % server_plural else: - return 'No %s match selector "%s"' % (server_plural, selector) + return f'No {server_plural} match selector "{selector}"' else: addresses = list(self._description.server_descriptions()) servers = list(self._description.server_descriptions().values()) if not servers: if is_replica_set: # We removed all servers because of the wrong setName? - return 'No %s available for replica set name "%s"' % ( + return 'No {} available for replica set name "{}"'.format( server_plural, self._settings.replica_set_name, ) @@ -844,7 +844,7 @@ def __repr__(self): msg = "" if not self._opened: msg = "CLOSED " - return "<%s %s%r>" % (self.__class__.__name__, msg, self._description) + return f"<{self.__class__.__name__} {msg}{self._description!r}>" def eq_props(self): """The properties to use for MongoClient/Topology equality checks.""" @@ -860,7 +860,7 @@ def __hash__(self): return hash(self.eq_props()) -class _ErrorContext(object): +class _ErrorContext: """An error with context for SDAM error handling.""" def __init__(self, error, max_wire_version, sock_generation, completed_handshake, service_id): diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 7503a72704..7079b324b2 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -47,7 +47,7 @@ class _TopologyType(NamedTuple): _ServerSelector = Callable[[List[ServerDescription]], List[ServerDescription]] -class TopologyDescription(object): +class TopologyDescription: def __init__( self, topology_type: int, @@ -171,7 +171,7 @@ def reset(self) -> "TopologyDescription": topology_type = self._topology_type # The default ServerDescription's type is Unknown. - sds = dict((address, ServerDescription(address)) for address in self._server_descriptions) + sds = {address: ServerDescription(address) for address in self._server_descriptions} return TopologyDescription( topology_type, @@ -184,7 +184,8 @@ def reset(self) -> "TopologyDescription": def server_descriptions(self) -> Dict[_Address, ServerDescription]: """Dict of (address, - :class:`~pymongo.server_description.ServerDescription`).""" + :class:`~pymongo.server_description.ServerDescription`). + """ return self._server_descriptions.copy() @property @@ -346,7 +347,7 @@ def has_writable_server(self) -> bool: def __repr__(self): # Sort the servers by address. servers = sorted(self._server_descriptions.values(), key=lambda sd: sd.address) - return "<%s id: %s, topology_type: %s, servers: %r>" % ( + return "<{} id: {}, topology_type: {}, servers: {!r}>".format( self.__class__.__name__, self._topology_settings._topology_id, self.topology_type_name, @@ -400,8 +401,9 @@ def updated_topology_description( if set_name is not None and set_name != server_description.replica_set_name: error = ConfigurationError( "client is configured to connect to a replica set named " - "'%s' but this node belongs to a set named '%s'" - % (set_name, server_description.replica_set_name) + "'{}' but this node belongs to a set named '{}'".format( + set_name, server_description.replica_set_name + ) ) sds[address] = server_description.to_unknown(error=error) # Single type never changes. diff --git a/pymongo/typings.py b/pymongo/typings.py index 32cd980c97..ef82114f15 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -29,7 +29,8 @@ def strip_optional(elem): """This function is to allow us to cast all of the elements of an iterator from Optional[_T] to _T - while inside a list comprehension.""" + while inside a list comprehension. + """ assert elem is not None return elem diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index e3aeee399e..0772b39c80 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -134,7 +134,7 @@ def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Addr host, port = host.split(":", 1) if isinstance(port, str): if not port.isdigit() or int(port) > 65535 or int(port) <= 0: - raise ValueError("Port must be an integer between 0 and 65535: %r" % (port,)) + raise ValueError(f"Port must be an integer between 0 and 65535: {port!r}") port = int(port) # Normalize hostname to lowercase, since DNS is case-insensitive: @@ -155,7 +155,8 @@ def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Addr def _parse_options(opts, delim): """Helper method for split_options which creates the options dict. Also handles the creation of a list for the URI tag_sets/ - readpreferencetags portion, and the use of a unicode options string.""" + readpreferencetags portion, and the use of a unicode options string. + """ options = _CaseInsensitiveDictionary() for uriopt in opts.split(delim): key, value = uriopt.split("=") @@ -163,7 +164,7 @@ def _parse_options(opts, delim): options.setdefault(key, []).append(value) else: if key in options: - warnings.warn("Duplicate URI option '%s'." % (key,)) + warnings.warn(f"Duplicate URI option '{key}'.") if key.lower() == "authmechanismproperties": val = value else: @@ -475,9 +476,7 @@ def parse_uri( is_srv = True scheme_free = uri[SRV_SCHEME_LEN:] else: - raise InvalidURI( - "Invalid URI scheme: URI must begin with '%s' or '%s'" % (SCHEME, SRV_SCHEME) - ) + raise InvalidURI(f"Invalid URI scheme: URI must begin with '{SCHEME}' or '{SRV_SCHEME}'") if not scheme_free: raise InvalidURI("Must provide at least one hostname or IP.") @@ -525,15 +524,13 @@ def parse_uri( srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") if is_srv: if options.get("directConnection"): - raise ConfigurationError( - "Cannot specify directConnection=true with %s URIs" % (SRV_SCHEME,) - ) + raise ConfigurationError(f"Cannot specify directConnection=true with {SRV_SCHEME} URIs") nodes = split_hosts(hosts, default_port=None) if len(nodes) != 1: - raise InvalidURI("%s URIs must include one, and only one, hostname" % (SRV_SCHEME,)) + raise InvalidURI(f"{SRV_SCHEME} URIs must include one, and only one, hostname") fqdn, port = nodes[0] if port is not None: - raise InvalidURI("%s URIs must not include a port number" % (SRV_SCHEME,)) + raise InvalidURI(f"{SRV_SCHEME} URIs must not include a port number") # Use the connection timeout. connectTimeoutMS passed as a keyword # argument overrides the same option passed in the connection string. diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index ced71d0488..25f87954b5 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -19,7 +19,7 @@ from pymongo.errors import ConfigurationError -class WriteConcern(object): +class WriteConcern: """WriteConcern :Parameters: @@ -113,7 +113,9 @@ def acknowledged(self) -> bool: return self.__acknowledged def __repr__(self): - return "WriteConcern(%s)" % (", ".join("%s=%s" % kvt for kvt in self.__document.items()),) + return "WriteConcern({})".format( + ", ".join("{}={}".format(*kvt) for kvt in self.__document.items()) + ) def __eq__(self, other: Any) -> bool: if isinstance(other, WriteConcern): diff --git a/test/__init__.py b/test/__init__.py index dc324c6911..c80b4e95c8 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test suite for pymongo, bson, and gridfs. -""" +"""Test suite for pymongo, bson, and gridfs.""" import base64 import gc @@ -92,7 +91,7 @@ CLIENT_PEM = os.environ.get("CLIENT_PEM", os.path.join(CERT_PATH, "client.pem")) CA_PEM = os.environ.get("CA_PEM", os.path.join(CERT_PATH, "ca.pem")) -TLS_OPTIONS: Dict = dict(tls=True) +TLS_OPTIONS: Dict = {"tls": True} if CLIENT_PEM: TLS_OPTIONS["tlsCertificateKeyFile"] = CLIENT_PEM if CA_PEM: @@ -149,7 +148,7 @@ def is_server_resolvable(): try: socket.gethostbyname("server") return True - except socket.error: + except OSError: return False finally: socket.setdefaulttimeout(socket_timeout) @@ -165,7 +164,7 @@ def _create_user(authdb, user, pwd=None, roles=None, **kwargs): return authdb.command(cmd) -class client_knobs(object): +class client_knobs: def __init__( self, heartbeat_frequency=None, @@ -234,10 +233,9 @@ def wrap(*args, **kwargs): def __del__(self): if self._enabled: msg = ( - "ERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY=%s, " - "MIN_HEARTBEAT_INTERVAL=%s, KILL_CURSOR_FREQUENCY=%s, " - "EVENTS_QUEUE_FREQUENCY=%s, stack:\n%s" - % ( + "ERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY={}, " + "MIN_HEARTBEAT_INTERVAL={}, KILL_CURSOR_FREQUENCY={}, " + "EVENTS_QUEUE_FREQUENCY={}, stack:\n{}".format( common.HEARTBEAT_FREQUENCY, common.MIN_HEARTBEAT_INTERVAL, common.KILL_CURSOR_FREQUENCY, @@ -250,10 +248,10 @@ def __del__(self): def _all_users(db): - return set(u["user"] for u in db.command("usersInfo").get("users", [])) + return {u["user"] for u in db.command("usersInfo").get("users", [])} -class ClientContext(object): +class ClientContext: client: MongoClient MULTI_MONGOS_LB_URI = MULTI_MONGOS_LB_URI @@ -339,14 +337,14 @@ def _connect(self, host, port, **kwargs): except pymongo.errors.OperationFailure as exc: # SERVER-32063 self.connection_attempts.append( - "connected client %r, but legacy hello failed: %s" % (client, exc) + f"connected client {client!r}, but legacy hello failed: {exc}" ) else: - self.connection_attempts.append("successfully connected client %r" % (client,)) + self.connection_attempts.append(f"successfully connected client {client!r}") # If connected, then return client with default timeout return pymongo.MongoClient(host, port, **kwargs) except pymongo.errors.ConnectionFailure as exc: - self.connection_attempts.append("failed to connect client %r: %s" % (client, exc)) + self.connection_attempts.append(f"failed to connect client {client!r}: {exc}") return None finally: client.close() @@ -447,7 +445,7 @@ def _init_client(self): nodes.extend([partition_node(node.lower()) for node in hello.get("arbiters", [])]) self.nodes = set(nodes) else: - self.nodes = set([(host, port)]) + self.nodes = {(host, port)} self.w = len(hello.get("hosts", [])) or 1 self.version = Version.from_client(self.client) @@ -587,7 +585,7 @@ def _server_started_with_ipv6(self): for info in socket.getaddrinfo(self.host, self.port): if info[0] == socket.AF_INET6: return True - except socket.error: + except OSError: pass return False @@ -599,7 +597,7 @@ def wrap(*args, **kwargs): self.init() # Always raise SkipTest if we can't connect to MongoDB if not self.connected: - raise SkipTest("Cannot connect to MongoDB on %s" % (self.pair,)) + raise SkipTest(f"Cannot connect to MongoDB on {self.pair}") if condition(): return f(*args, **kwargs) raise SkipTest(msg) @@ -625,7 +623,7 @@ def require_connection(self, func): """Run a test only if we can connect to MongoDB.""" return self._require( lambda: True, # _require checks if we're connected - "Cannot connect to MongoDB on %s" % (self.pair,), + f"Cannot connect to MongoDB on {self.pair}", func=func, ) @@ -633,14 +631,15 @@ def require_data_lake(self, func): """Run a test only if we are connected to Atlas Data Lake.""" return self._require( lambda: self.is_data_lake, - "Not connected to Atlas Data Lake on %s" % (self.pair,), + f"Not connected to Atlas Data Lake on {self.pair}", func=func, ) def require_no_mmap(self, func): """Run a test only if the server is not using the MMAPv1 storage engine. Only works for standalone and replica sets; tests are - run regardless of storage engine on sharded clusters.""" + run regardless of storage engine on sharded clusters. + """ def is_not_mmap(): if self.is_mongos: @@ -734,7 +733,8 @@ def require_mongos(self, func): def require_multiple_mongoses(self, func): """Run a test only if the client is connected to a sharded cluster - that has 2 mongos nodes.""" + that has 2 mongos nodes. + """ return self._require( lambda: len(self.mongoses) > 1, "Must have multiple mongoses available", func=func ) @@ -786,7 +786,7 @@ def is_topology_type(self, topologies): "load-balanced", } if unknown: - raise AssertionError("Unknown topologies: %r" % (unknown,)) + raise AssertionError(f"Unknown topologies: {unknown!r}") if self.load_balancer: if "load-balanced" in topologies: return True @@ -812,7 +812,8 @@ def is_topology_type(self, topologies): def require_cluster_type(self, topologies=[]): # noqa """Run a test only if the client is connected to a cluster that conforms to one of the specified topologies. Acceptable topologies - are 'single', 'replicaset', and 'sharded'.""" + are 'single', 'replicaset', and 'sharded'. + """ def _is_valid_topology(): return self.is_topology_type(topologies) @@ -827,7 +828,8 @@ def require_test_commands(self, func): def require_failCommand_fail_point(self, func): """Run a test only if the server supports the failCommand fail - point.""" + point. + """ return self._require( lambda: self.supports_failCommand_fail_point, "failCommand fail point must be supported", @@ -930,7 +932,7 @@ def require_no_api_version(self, func): ) def mongos_seeds(self): - return ",".join("%s:%s" % address for address in self.mongoses) + return ",".join("{}:{}".format(*address) for address in self.mongoses) @property def supports_failCommand_fail_point(self): @@ -1139,7 +1141,7 @@ def setUpClass(cls): pass def setUp(self): - super(MockClientTest, self).setUp() + super().setUp() self.client_knobs = client_knobs(heartbeat_frequency=0.001, min_heartbeat_interval=0.001) @@ -1147,7 +1149,7 @@ def setUp(self): def tearDown(self): self.client_knobs.disable() - super(MockClientTest, self).tearDown() + super().tearDown() # Global knobs to speed up the test suite. @@ -1181,9 +1183,9 @@ def print_running_topology(topology): if running: print( "WARNING: found Topology with running threads:\n" - " Threads: %s\n" - " Topology: %s\n" - " Creation traceback:\n%s" % (running, topology, topology._settings._stack) + " Threads: {}\n" + " Topology: {}\n" + " Creation traceback:\n{}".format(running, topology, topology._settings._stack) ) @@ -1215,11 +1217,11 @@ def teardown(): global_knobs.disable() garbage = [] for g in gc.garbage: - garbage.append("GARBAGE: %r" % (g,)) - garbage.append(" gc.get_referents: %r" % (gc.get_referents(g),)) - garbage.append(" gc.get_referrers: %r" % (gc.get_referrers(g),)) + garbage.append(f"GARBAGE: {g!r}") + garbage.append(f" gc.get_referents: {gc.get_referents(g)!r}") + garbage.append(f" gc.get_referrers: {gc.get_referrers(g)!r}") if garbage: - assert False, "\n".join(garbage) + raise AssertionError("\n".join(garbage)) c = client_context.client if c: if not client_context.is_data_lake: @@ -1237,7 +1239,7 @@ def teardown(): class PymongoTestRunner(unittest.TextTestRunner): def run(self, test): setup() - result = super(PymongoTestRunner, self).run(test) + result = super().run(test) teardown() return result @@ -1247,7 +1249,7 @@ def run(self, test): class PymongoXMLTestRunner(XMLTestRunner): # type: ignore[misc] def run(self, test): setup() - result = super(PymongoXMLTestRunner, self).run(test) + result = super().run(test) teardown() return result @@ -1260,8 +1262,7 @@ def test_cases(suite): yield suite_or_case else: # unittest.TestSuite - for case in test_cases(suite_or_case): - yield case + yield from test_cases(suite_or_case) # Helper method to workaround https://bugs.python.org/issue21724 @@ -1272,7 +1273,7 @@ def clear_warning_registry(): setattr(module, "__warningregistry__", {}) # noqa -class SystemCertsPatcher(object): +class SystemCertsPatcher: def __init__(self, ca_certs): if ( ssl.OPENSSL_VERSION.lower().startswith("libressl") diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index 39d817140e..036e4772ff 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -102,7 +102,7 @@ def test_uniqueness(self): duplicates = [names for names in uri_to_names.values() if len(names) > 1] self.assertFalse( duplicates, - "Error: the following env variables have duplicate values: %s" % (duplicates,), + f"Error: the following env variables have duplicate values: {duplicates}", ) diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index e0329a783e..e180d8b064 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -39,7 +39,7 @@ def test_should_fail_without_credentials(self): if "@" not in self.uri: self.skipTest("MONGODB_URI already has no credentials") - hosts = ["%s:%s" % addr for addr in parse_uri(self.uri)["nodelist"]] + hosts = ["{}:{}".format(*addr) for addr in parse_uri(self.uri)["nodelist"]] self.assertTrue(hosts) with MongoClient(hosts) as client: with self.assertRaises(OperationFailure): @@ -115,7 +115,7 @@ def test_poisoned_cache(self): def test_environment_variables_ignored(self): creds = self.setup_cache() self.assertIsNotNone(creds) - prev = os.environ.copy() + os.environ.copy() client = MongoClient(self.uri) self.addCleanup(client.close) @@ -124,9 +124,11 @@ def test_environment_variables_ignored(self): self.assertIsNotNone(auth.get_cached_credentials()) - mock_env = dict( - AWS_ACCESS_KEY_ID="foo", AWS_SECRET_ACCESS_KEY="bar", AWS_SESSION_TOKEN="baz" - ) + mock_env = { + "AWS_ACCESS_KEY_ID": "foo", + "AWS_SECRET_ACCESS_KEY": "bar", + "AWS_SESSION_TOKEN": "baz", + } with patch.dict("os.environ", mock_env): self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") @@ -147,7 +149,7 @@ def test_no_cache_environment_variables(self): self.assertIsNotNone(creds) auth.set_cached_credentials(None) - mock_env = dict(AWS_ACCESS_KEY_ID=creds.username, AWS_SECRET_ACCESS_KEY=creds.password) + mock_env = {"AWS_ACCESS_KEY_ID": creds.username, "AWS_SECRET_ACCESS_KEY": creds.password} if creds.token: mock_env["AWS_SESSION_TOKEN"] = creds.token diff --git a/test/auth_aws/test_auth_oidc.py b/test/auth_aws/test_auth_oidc.py index 470e4581c2..26e71573d4 100644 --- a/test/auth_aws/test_auth_oidc.py +++ b/test/auth_aws/test_auth_oidc.py @@ -65,7 +65,7 @@ def request_token(server_info, context): self.assertEqual(timeout_seconds, 60 * 5) with open(token_file) as fid: token = fid.read() - resp = dict(access_token=token) + resp = {"access_token": token} time.sleep(sleep) @@ -94,7 +94,7 @@ def refresh_token(server_info, context): # Validate the timeout. self.assertEqual(context["timeout_seconds"], 60 * 5) - resp = dict(access_token=token) + resp = {"access_token": token} if expires_in_seconds is not None: resp["expires_in_seconds"] = expires_in_seconds self.refresh_called += 1 @@ -115,21 +115,21 @@ def fail_point(self, command_args): def test_connect_callbacks_single_implicit_username(self): request_token = self.create_request_cb() - props: Dict = dict(request_token_callback=request_token) + props: Dict = {"request_token_callback": request_token} client = MongoClient(self.uri_single, authmechanismproperties=props) client.test.test.find_one() client.close() def test_connect_callbacks_single_explicit_username(self): request_token = self.create_request_cb() - props: Dict = dict(request_token_callback=request_token) + props: Dict = {"request_token_callback": request_token} client = MongoClient(self.uri_single, username="test_user1", authmechanismproperties=props) client.test.test.find_one() client.close() def test_connect_callbacks_multiple_principal_user1(self): request_token = self.create_request_cb() - props: Dict = dict(request_token_callback=request_token) + props: Dict = {"request_token_callback": request_token} client = MongoClient( self.uri_multiple, username="test_user1", authmechanismproperties=props ) @@ -138,7 +138,7 @@ def test_connect_callbacks_multiple_principal_user1(self): def test_connect_callbacks_multiple_principal_user2(self): request_token = self.create_request_cb("test_user2") - props: Dict = dict(request_token_callback=request_token) + props: Dict = {"request_token_callback": request_token} client = MongoClient( self.uri_multiple, username="test_user2", authmechanismproperties=props ) @@ -147,7 +147,7 @@ def test_connect_callbacks_multiple_principal_user2(self): def test_connect_callbacks_multiple_no_username(self): request_token = self.create_request_cb() - props: Dict = dict(request_token_callback=request_token) + props: Dict = {"request_token_callback": request_token} client = MongoClient(self.uri_multiple, authmechanismproperties=props) with self.assertRaises(OperationFailure): client.test.test.find_one() @@ -155,13 +155,13 @@ def test_connect_callbacks_multiple_no_username(self): def test_allowed_hosts_blocked(self): request_token = self.create_request_cb() - props: Dict = dict(request_token_callback=request_token, allowed_hosts=[]) + props: Dict = {"request_token_callback": request_token, "allowed_hosts": []} client = MongoClient(self.uri_single, authmechanismproperties=props) with self.assertRaises(ConfigurationError): client.test.test.find_one() client.close() - props: Dict = dict(request_token_callback=request_token, allowed_hosts=["example.com"]) + props: Dict = {"request_token_callback": request_token, "allowed_hosts": ["example.com"]} client = MongoClient( self.uri_single + "&ignored=example.com", authmechanismproperties=props, connect=False ) @@ -170,26 +170,26 @@ def test_allowed_hosts_blocked(self): client.close() def test_connect_aws_single_principal(self): - props = dict(PROVIDER_NAME="aws") + props = {"PROVIDER_NAME": "aws"} client = MongoClient(self.uri_single, authmechanismproperties=props) client.test.test.find_one() client.close() def test_connect_aws_multiple_principal_user1(self): - props = dict(PROVIDER_NAME="aws") + props = {"PROVIDER_NAME": "aws"} client = MongoClient(self.uri_multiple, authmechanismproperties=props) client.test.test.find_one() client.close() def test_connect_aws_multiple_principal_user2(self): os.environ["AWS_WEB_IDENTITY_TOKEN_FILE"] = os.path.join(self.token_dir, "test_user2") - props = dict(PROVIDER_NAME="aws") + props = {"PROVIDER_NAME": "aws"} client = MongoClient(self.uri_multiple, authmechanismproperties=props) client.test.test.find_one() client.close() def test_connect_aws_allowed_hosts_ignored(self): - props = dict(PROVIDER_NAME="aws", allowed_hosts=[]) + props = {"PROVIDER_NAME": "aws", "allowed_hosts": []} client = MongoClient(self.uri_multiple, authmechanismproperties=props) client.test.test.find_one() client.close() @@ -198,10 +198,10 @@ def test_valid_callbacks(self): request_cb = self.create_request_cb(expires_in_seconds=60) refresh_cb = self.create_refresh_cb() - props: Dict = dict( - request_token_callback=request_cb, - refresh_token_callback=refresh_cb, - ) + props: Dict = { + "request_token_callback": request_cb, + "refresh_token_callback": refresh_cb, + } client = MongoClient(self.uri_single, authmechanismproperties=props) client.test.test.find_one() client.close() @@ -214,7 +214,7 @@ def test_lock_avoids_extra_callbacks(self): request_cb = self.create_request_cb(sleep=0.5) refresh_cb = self.create_refresh_cb() - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} def run_test(): client = MongoClient(self.uri_single, authMechanismProperties=props) @@ -239,7 +239,7 @@ def test_request_callback_returns_null(self): def request_token_null(a, b): return None - props: Dict = dict(request_token_callback=request_token_null) + props: Dict = {"request_token_callback": request_token_null} client = MongoClient(self.uri_single, authMechanismProperties=props) with self.assertRaises(ValueError): client.test.test.find_one() @@ -251,9 +251,10 @@ def test_refresh_callback_returns_null(self): def refresh_token_null(a, b): return None - props: Dict = dict( - request_token_callback=request_cb, refresh_token_callback=refresh_token_null - ) + props: Dict = { + "request_token_callback": request_cb, + "refresh_token_callback": refresh_token_null, + } client = MongoClient(self.uri_single, authMechanismProperties=props) client.test.test.find_one() client.close() @@ -265,9 +266,9 @@ def refresh_token_null(a, b): def test_request_callback_invalid_result(self): def request_token_invalid(a, b): - return dict() + return {} - props: Dict = dict(request_token_callback=request_token_invalid) + props: Dict = {"request_token_callback": request_token_invalid} client = MongoClient(self.uri_single, authMechanismProperties=props) with self.assertRaises(ValueError): client.test.test.find_one() @@ -278,7 +279,7 @@ def request_cb_extra_value(server_info, context): result["foo"] = "bar" return result - props: Dict = dict(request_token_callback=request_cb_extra_value) + props: Dict = {"request_token_callback": request_cb_extra_value} client = MongoClient(self.uri_single, authMechanismProperties=props) with self.assertRaises(ValueError): client.test.test.find_one() @@ -288,11 +289,12 @@ def test_refresh_callback_missing_data(self): request_cb = self.create_request_cb(expires_in_seconds=60) def refresh_cb_no_token(a, b): - return dict() + return {} - props: Dict = dict( - request_token_callback=request_cb, refresh_token_callback=refresh_cb_no_token - ) + props: Dict = { + "request_token_callback": request_cb, + "refresh_token_callback": refresh_cb_no_token, + } client = MongoClient(self.uri_single, authMechanismProperties=props) client.test.test.find_one() client.close() @@ -310,9 +312,10 @@ def refresh_cb_extra_value(server_info, context): result["foo"] = "bar" return result - props: Dict = dict( - request_token_callback=request_cb, refresh_token_callback=refresh_cb_extra_value - ) + props: Dict = { + "request_token_callback": request_cb, + "refresh_token_callback": refresh_cb_extra_value, + } client = MongoClient(self.uri_single, authMechanismProperties=props) client.test.test.find_one() client.close() @@ -329,7 +332,7 @@ def test_cache_with_refresh(self): request_cb = self.create_request_cb(expires_in_seconds=60) refresh_cb = self.create_refresh_cb() - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} # Ensure that a ``find`` operation adds credentials to the cache. client = MongoClient(self.uri_single, authMechanismProperties=props) @@ -352,7 +355,7 @@ def test_cache_with_no_refresh(self): # Give a callback response with a valid accessToken and an expiresInSeconds that is within one minute. request_cb = self.create_request_cb() - props = dict(request_token_callback=request_cb) + props = {"request_token_callback": request_cb} client = MongoClient(self.uri_single, authMechanismProperties=props) # Ensure that a ``find`` operation adds credentials to the cache. @@ -373,7 +376,7 @@ def test_cache_with_no_refresh(self): def test_cache_key_includes_callback(self): request_cb = self.create_request_cb() - props: Dict = dict(request_token_callback=request_cb) + props: Dict = {"request_token_callback": request_cb} # Ensure that a ``find`` operation adds a new entry to the cache. client = MongoClient(self.uri_single, authMechanismProperties=props) @@ -397,10 +400,10 @@ def test_cache_clears_on_error(self): # Create a new client with a valid request callback that gives credentials that expire within 5 minutes and a refresh callback that gives invalid credentials. def refresh_cb(a, b): - return dict(access_token="bad") + return {"access_token": "bad"} # Add a token to the cache that will expire soon. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient(self.uri_single, authMechanismProperties=props) client.test.test.find_one() client.close() @@ -421,7 +424,7 @@ def refresh_cb(a, b): def test_cache_is_not_used_in_aws_automatic_workflow(self): # Create a new client using the AWS device workflow. # Ensure that a ``find`` operation does not add credentials to the cache. - props = dict(PROVIDER_NAME="aws") + props = {"PROVIDER_NAME": "aws"} client = MongoClient(self.uri_single, authmechanismproperties=props) client.test.test.find_one() client.close() @@ -438,11 +441,11 @@ def test_speculative_auth_success(self): def request_token(a, b): with open(token_file) as fid: token = fid.read() - return dict(access_token=token, expires_in_seconds=1000) + return {"access_token": token, "expires_in_seconds": 1000} # Create a client with a request callback that returns a valid token # that will not expire soon. - props: Dict = dict(request_token_callback=request_token) + props: Dict = {"request_token_callback": request_token} client = MongoClient(self.uri_single, authmechanismproperties=props) # Set a fail point for saslStart commands. @@ -483,7 +486,7 @@ def test_reauthenticate_succeeds(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient( self.uri_single, event_listeners=[listener], authmechanismproperties=props ) @@ -536,7 +539,7 @@ def test_reauthenticate_succeeds_bulk_write(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient(self.uri_single, authmechanismproperties=props) # Perform a find operation. @@ -563,7 +566,7 @@ def test_reauthenticate_succeeds_bulk_read(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient(self.uri_single, authmechanismproperties=props) # Perform a find operation. @@ -594,7 +597,7 @@ def test_reauthenticate_succeeds_cursor(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient(self.uri_single, authmechanismproperties=props) # Perform an insert operation. @@ -622,7 +625,7 @@ def test_reauthenticate_succeeds_get_more(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient(self.uri_single, authmechanismproperties=props) # Perform an insert operation. @@ -647,7 +650,7 @@ def test_reauthenticate_succeeds_get_more(self): def test_reauthenticate_succeeds_get_more_exhaust(self): # Ensure no mongos - props = dict(PROVIDER_NAME="aws") + props = {"PROVIDER_NAME": "aws"} client = MongoClient(self.uri_single, authmechanismproperties=props) hello = client.admin.command(HelloCompat.LEGACY_CMD) if hello.get("msg") != "isdbgrid": @@ -657,7 +660,7 @@ def test_reauthenticate_succeeds_get_more_exhaust(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient(self.uri_single, authmechanismproperties=props) # Perform an insert operation. @@ -685,7 +688,7 @@ def test_reauthenticate_succeeds_command(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} print("start of test") client = MongoClient(self.uri_single, authmechanismproperties=props) @@ -703,7 +706,7 @@ def test_reauthenticate_succeeds_command(self): } ): # Perform a count operation. - cursor = client.test.command(dict(count="test")) + cursor = client.test.command({"count": "test"}) self.assertGreaterEqual(len(list(cursor)), 1) @@ -720,7 +723,7 @@ def test_reauthenticate_retries_and_succeeds_with_cache(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient( self.uri_single, event_listeners=[listener], authmechanismproperties=props ) @@ -750,7 +753,7 @@ def test_reauthenticate_fails_with_no_cache(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient( self.uri_single, event_listeners=[listener], authmechanismproperties=props ) @@ -778,7 +781,7 @@ def test_late_reauth_avoids_callback(self): request_cb = self.create_request_cb(expires_in_seconds=1e6) refresh_cb = self.create_refresh_cb(expires_in_seconds=1e6) - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client1 = MongoClient(self.uri_single, authMechanismProperties=props) client1.test.test.find_one() client2 = MongoClient(self.uri_single, authMechanismProperties=props) diff --git a/test/crud_v2_format.py b/test/crud_v2_format.py index 4118dfef9f..f711a125c2 100644 --- a/test/crud_v2_format.py +++ b/test/crud_v2_format.py @@ -27,7 +27,7 @@ class TestCrudV2(SpecRunner): def allowable_errors(self, op): """Override expected error classes.""" - errors = super(TestCrudV2, self).allowable_errors(op) + errors = super().allowable_errors(op) errors += (ValueError,) return errors @@ -51,4 +51,4 @@ def setup_scenario(self, scenario_def): """Allow specs to override a test's setup.""" # PYTHON-1935 Only create the collection if there is data to insert. if scenario_def["data"]: - super(TestCrudV2, self).setup_scenario(scenario_def) + super().setup_scenario(scenario_def) diff --git a/test/mockupdb/operations.py b/test/mockupdb/operations.py index 90d7f27c39..692f9aef04 100644 --- a/test/mockupdb/operations.py +++ b/test/mockupdb/operations.py @@ -112,7 +112,7 @@ ] -_ops_by_name = dict([(op.name, op) for op in operations]) +_ops_by_name = {op.name: op for op in operations} Upgrade = namedtuple("Upgrade", ["name", "function", "old", "new", "wire_version"]) diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index 39188e8ad0..d3f8922c4c 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -247,6 +247,7 @@ def responder(request): } ) ) + return None else: return request.reply(**primary_response) diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index dc2cd57380..7813069c99 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -46,7 +46,7 @@ def setup_server(self, upgrade): "ismaster", ismaster=True, msg="isdbgrid", maxWireVersion=upgrade.wire_version ) - self.mongoses_uri = "mongodb://%s,%s" % ( + self.mongoses_uri = "mongodb://{},{}".format( self.mongos_old.address_string, self.mongos_new.address_string, ) diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py index 997f5af118..62bd76cf0f 100644 --- a/test/mockupdb/test_mongos_command_read_mode.py +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -110,7 +110,7 @@ def generate_mongos_read_mode_tests(): # Skip something like command('foo', read_preference=SECONDARY). continue test = create_mongos_read_mode_test(mode, operation) - test_name = "test_%s_with_mode_%s" % (operation.name.replace(" ", "_"), mode) + test_name = "test_{}_with_mode_{}".format(operation.name.replace(" ", "_"), mode) test.__name__ = test_name setattr(TestMongosCommandReadMode, test_name, test) diff --git a/test/mockupdb/test_network_disconnect_primary.py b/test/mockupdb/test_network_disconnect_primary.py index ea13a3b042..dd14abf84f 100755 --- a/test/mockupdb/test_network_disconnect_primary.py +++ b/test/mockupdb/test_network_disconnect_primary.py @@ -26,7 +26,7 @@ def test_network_disconnect_primary(self): # Application operation fails against primary. Test that topology # type changes from ReplicaSetWithPrimary to ReplicaSetNoPrimary. # http://bit.ly/1B5ttuL - primary, secondary = servers = [MockupDB() for _ in range(2)] + primary, secondary = servers = (MockupDB() for _ in range(2)) for server in servers: server.run() self.addCleanup(server.stop) diff --git a/test/mockupdb/test_op_msg.py b/test/mockupdb/test_op_msg.py index 22fe38fd02..e8542e2fe5 100755 --- a/test/mockupdb/test_op_msg.py +++ b/test/mockupdb/test_op_msg.py @@ -304,7 +304,7 @@ def test(self): def create_tests(ops): for op in ops: - test_name = "test_op_msg_%s" % (op.name,) + test_name = f"test_op_msg_{op.name}" setattr(TestOpMsg, test_name, operation_test(op)) diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index b377f4cf69..a3aef1541e 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -35,7 +35,7 @@ class OpMsgReadPrefBase(unittest.TestCase): @classmethod def setUpClass(cls): - super(OpMsgReadPrefBase, cls).setUpClass() + super().setUpClass() @classmethod def add_test(cls, mode, test_name, test): @@ -50,7 +50,7 @@ def setup_client(self, read_preference): class TestOpMsgMongos(OpMsgReadPrefBase): @classmethod def setUpClass(cls): - super(TestOpMsgMongos, cls).setUpClass() + super().setUpClass() auto_ismaster = { "ismaster": True, "msg": "isdbgrid", # Mongos. @@ -64,13 +64,13 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.primary.stop() - super(TestOpMsgMongos, cls).tearDownClass() + super().tearDownClass() class TestOpMsgReplicaSet(OpMsgReadPrefBase): @classmethod def setUpClass(cls): - super(TestOpMsgReplicaSet, cls).setUpClass() + super().setUpClass() cls.primary, cls.secondary = MockupDB(), MockupDB() for server in cls.primary, cls.secondary: server.run() @@ -94,7 +94,7 @@ def setUpClass(cls): def tearDownClass(cls): for server in cls.primary, cls.secondary: server.stop() - super(TestOpMsgReplicaSet, cls).tearDownClass() + super().tearDownClass() @classmethod def add_test(cls, mode, test_name, test): @@ -118,7 +118,7 @@ class TestOpMsgSingle(OpMsgReadPrefBase): @classmethod def setUpClass(cls): - super(TestOpMsgSingle, cls).setUpClass() + super().setUpClass() auto_ismaster = { "ismaster": True, "minWireVersion": 2, @@ -131,7 +131,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.primary.stop() - super(TestOpMsgSingle, cls).tearDownClass() + super().tearDownClass() def create_op_msg_read_mode_test(mode, operation): @@ -181,7 +181,7 @@ def generate_op_msg_read_mode_tests(): for entry in matrix: mode, operation = entry test = create_op_msg_read_mode_test(mode, operation) - test_name = "test_%s_with_mode_%s" % (operation.name.replace(" ", "_"), mode) + test_name = "test_{}_with_mode_{}".format(operation.name.replace(" ", "_"), mode) test.__name__ = test_name for cls in TestOpMsgMongos, TestOpMsgReplicaSet, TestOpMsgSingle: cls.add_test(mode, test_name, test) diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py index 841cd41846..c554499379 100755 --- a/test/mockupdb/test_reset_and_request_check.py +++ b/test/mockupdb/test_reset_and_request_check.py @@ -26,7 +26,7 @@ class TestResetAndRequestCheck(unittest.TestCase): def __init__(self, *args, **kwargs): - super(TestResetAndRequestCheck, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.ismaster_time = 0.0 self.client = None self.server = None @@ -143,7 +143,7 @@ def generate_reset_tests(): for entry in matrix: operation, (test_method, name) = entry test = create_reset_test(operation, test_method) - test_name = "%s_%s" % (name, operation.name.replace(" ", "_")) + test_name = "{}_{}".format(name, operation.name.replace(" ", "_")) test.__name__ = test_name setattr(TestResetAndRequestCheck, test_name, test) diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py index 18f2016126..5a590bcf15 100644 --- a/test/mockupdb/test_slave_okay_sharded.py +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -43,7 +43,7 @@ def setup_server(self): "ismaster", minWireVersion=2, maxWireVersion=6, ismaster=True, msg="isdbgrid" ) - self.mongoses_uri = "mongodb://%s,%s" % ( + self.mongoses_uri = "mongodb://{},{}".format( self.mongos1.address_string, self.mongos2.address_string, ) @@ -59,7 +59,7 @@ def test(self): elif operation.op_type == "must-use-primary": slave_ok = False else: - assert False, "unrecognized op_type %r" % operation.op_type + raise AssertionError("unrecognized op_type %r" % operation.op_type) pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) @@ -84,7 +84,7 @@ def generate_slave_ok_sharded_tests(): for entry in matrix: mode, operation = entry test = create_slave_ok_sharded_test(mode, operation) - test_name = "test_%s_with_mode_%s" % (operation.name.replace(" ", "_"), mode) + test_name = "test_{}_with_mode_{}".format(operation.name.replace(" ", "_"), mode) test.__name__ = test_name setattr(TestSlaveOkaySharded, test_name, test) diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py index 4b2846490f..90b99df496 100644 --- a/test/mockupdb/test_slave_okay_single.py +++ b/test/mockupdb/test_slave_okay_single.py @@ -78,7 +78,7 @@ def generate_slave_ok_single_tests(): mode, (server_type, ismaster), operation = entry test = create_slave_ok_single_test(mode, server_type, ismaster, operation) - test_name = "test_%s_%s_with_mode_%s" % ( + test_name = "test_{}_{}_with_mode_{}".format( operation.name.replace(" ", "_"), server_type, mode, diff --git a/test/mod_wsgi_test/test_client.py b/test/mod_wsgi_test/test_client.py index bfdae9e824..6d3b299700 100644 --- a/test/mod_wsgi_test/test_client.py +++ b/test/mod_wsgi_test/test_client.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test client for mod_wsgi application, see bug PYTHON-353. -""" +"""Test client for mod_wsgi application, see bug PYTHON-353.""" import _thread as thread import sys @@ -91,14 +90,14 @@ class URLGetterThread(threading.Thread): counter = 0 def __init__(self, options, url, nrequests_per_thread): - super(URLGetterThread, self).__init__() + super().__init__() self.options = options self.url = url self.nrequests_per_thread = nrequests_per_thread self.errors = 0 def run(self): - for i in range(self.nrequests_per_thread): + for _i in range(self.nrequests_per_thread): try: get(url) except Exception as e: @@ -128,9 +127,8 @@ def main(options, mode, url): if options.verbose: print( - "Getting %s %s times total in %s threads, " - "%s times per thread" - % ( + "Getting {} {} times total in {} threads, " + "{} times per thread".format( url, nrequests_per_thread * options.nthreads, options.nthreads, @@ -154,7 +152,7 @@ def main(options, mode, url): else: assert mode == "serial" if options.verbose: - print("Getting %s %s times in one thread" % (url, options.nrequests)) + print(f"Getting {url} {options.nrequests} times in one thread") for i in range(1, options.nrequests + 1): try: diff --git a/test/ocsp/test_ocsp.py b/test/ocsp/test_ocsp.py index a0770afefa..dc2650499f 100644 --- a/test/ocsp/test_ocsp.py +++ b/test/ocsp/test_ocsp.py @@ -40,7 +40,7 @@ def _connect(options): - uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS=%s&tlsCAFile=%s&%s") % ( + uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS={}&tlsCAFile={}&{}").format( TIMEOUT_MS, CA_FILE, options, diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index 3cb4b5d5d1..062058e09d 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -58,7 +58,7 @@ def tearDownModule(): print(output) -class Timer(object): +class Timer: def __enter__(self): self.start = time.monotonic() return self @@ -68,7 +68,7 @@ def __exit__(self, *args): self.interval = self.end - self.start -class PerformanceTest(object): +class PerformanceTest: dataset: Any data_size: Any do_task: Any @@ -85,7 +85,7 @@ def tearDown(self): name = self.__class__.__name__ median = self.percentile(50) bytes_per_sec = self.data_size / median - print("Running %s. MEDIAN=%s" % (self.__class__.__name__, self.percentile(50))) + print(f"Running {self.__class__.__name__}. MEDIAN={self.percentile(50)}") result_data.append( { "info": { @@ -113,6 +113,7 @@ def percentile(self, percentile): return sorted_results[percentile_index] else: self.fail("Test execution failed") + return None def runTest(self): results = [] @@ -202,7 +203,7 @@ class TestDocument(PerformanceTest): def setUp(self): # Location of test data. with open( - os.path.join(TEST_PATH, os.path.join("single_and_multi_document", self.dataset)), "r" + os.path.join(TEST_PATH, os.path.join("single_and_multi_document", self.dataset)) ) as data: self.document = json.loads(data.read()) @@ -210,7 +211,7 @@ def setUp(self): self.client.drop_database("perftest") def tearDown(self): - super(TestDocument, self).tearDown() + super().tearDown() self.client.drop_database("perftest") def before(self): @@ -225,7 +226,7 @@ class TestFindOneByID(TestDocument, unittest.TestCase): def setUp(self): self.dataset = "tweet.json" - super(TestFindOneByID, self).setUp() + super().setUp() documents = [self.document.copy() for _ in range(NUM_DOCS)] self.corpus = self.client.perftest.corpus @@ -249,7 +250,7 @@ class TestSmallDocInsertOne(TestDocument, unittest.TestCase): def setUp(self): self.dataset = "small_doc.json" - super(TestSmallDocInsertOne, self).setUp() + super().setUp() self.documents = [self.document.copy() for _ in range(NUM_DOCS)] @@ -264,7 +265,7 @@ class TestLargeDocInsertOne(TestDocument, unittest.TestCase): def setUp(self): self.dataset = "large_doc.json" - super(TestLargeDocInsertOne, self).setUp() + super().setUp() self.documents = [self.document.copy() for _ in range(10)] @@ -280,7 +281,7 @@ class TestFindManyAndEmptyCursor(TestDocument, unittest.TestCase): def setUp(self): self.dataset = "tweet.json" - super(TestFindManyAndEmptyCursor, self).setUp() + super().setUp() for _ in range(10): self.client.perftest.command("insert", "corpus", documents=[self.document] * 1000) @@ -301,7 +302,7 @@ class TestSmallDocBulkInsert(TestDocument, unittest.TestCase): def setUp(self): self.dataset = "small_doc.json" - super(TestSmallDocBulkInsert, self).setUp() + super().setUp() self.documents = [self.document.copy() for _ in range(NUM_DOCS)] def before(self): @@ -316,7 +317,7 @@ class TestLargeDocBulkInsert(TestDocument, unittest.TestCase): def setUp(self): self.dataset = "large_doc.json" - super(TestLargeDocBulkInsert, self).setUp() + super().setUp() self.documents = [self.document.copy() for _ in range(10)] def before(self): @@ -342,7 +343,7 @@ def setUp(self): self.bucket = GridFSBucket(self.client.perftest) def tearDown(self): - super(TestGridFsUpload, self).tearDown() + super().tearDown() self.client.drop_database("perftest") def before(self): @@ -368,7 +369,7 @@ def setUp(self): self.uploaded_id = self.bucket.upload_from_stream("gridfstest", gfile) def tearDown(self): - super(TestGridFsDownload, self).tearDown() + super().tearDown() self.client.drop_database("perftest") def do_task(self): @@ -392,14 +393,14 @@ def mp_map(map_func, files): def insert_json_file(filename): assert proc_client is not None - with open(filename, "r") as data: + with open(filename) as data: coll = proc_client.perftest.corpus coll.insert_many([json.loads(line) for line in data]) def insert_json_file_with_file_id(filename): documents = [] - with open(filename, "r") as data: + with open(filename) as data: for line in data: doc = json.loads(line) doc["file"] = filename @@ -461,7 +462,7 @@ def after(self): self.client.perftest.drop_collection("corpus") def tearDown(self): - super(TestJsonMultiImport, self).tearDown() + super().tearDown() self.client.drop_database("perftest") @@ -482,7 +483,7 @@ def do_task(self): mp_map(read_json_file, self.files) def tearDown(self): - super(TestJsonMultiExport, self).tearDown() + super().tearDown() self.client.drop_database("perftest") @@ -505,7 +506,7 @@ def do_task(self): mp_map(insert_gridfs_file, self.files) def tearDown(self): - super(TestGridFsMultiFileUpload, self).tearDown() + super().tearDown() self.client.drop_database("perftest") @@ -529,7 +530,7 @@ def do_task(self): mp_map(read_gridfs_file, self.files) def tearDown(self): - super(TestGridFsMultiFileDownload, self).tearDown() + super().tearDown() self.client.drop_database("perftest") diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index 580c5da993..2e7fda21e0 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -40,7 +40,7 @@ def __init__(self, client, pair, *args, **kwargs): @contextlib.contextmanager def get_socket(self, handler=None): client = self.client - host_and_port = "%s:%s" % (self.mock_host, self.mock_port) + host_and_port = f"{self.mock_host}:{self.mock_port}" if host_and_port in client.mock_down_hosts: raise AutoReconnect("mock error") @@ -54,7 +54,7 @@ def get_socket(self, handler=None): yield sock_info -class DummyMonitor(object): +class DummyMonitor: def __init__(self, server_description, topology, pool, topology_settings): self._server_description = server_description self.opened = False @@ -99,7 +99,7 @@ def __init__( arbiters=None, down_hosts=None, *args, - **kwargs + **kwargs, ): """A MongoClient connected to the default server, with a mock topology. @@ -144,7 +144,7 @@ def __init__( client_options = client_context.default_client_options.copy() client_options.update(kwargs) - super(MockClient, self).__init__(*args, **client_options) + super().__init__(*args, **client_options) def kill_host(self, host): """Host is like 'a:1'.""" diff --git a/test/qcheck.py b/test/qcheck.py index 4cce7b5bc8..52e4c46b8b 100644 --- a/test/qcheck.py +++ b/test/qcheck.py @@ -116,7 +116,8 @@ def gen_regexp(gen_length): # TODO our patterns only consist of one letter. # this is because of a bug in CPython's regex equality testing, # which I haven't quite tracked down, so I'm just ignoring it... - pattern = lambda: "".join(gen_list(choose_lifted("a"), gen_length)()) + def pattern(): + return "".join(gen_list(choose_lifted("a"), gen_length)()) def gen_flags(): flags = 0 @@ -230,9 +231,9 @@ def check(predicate, generator): try: if not predicate(case): reduction = reduce(case, predicate) - counter_examples.append("after %s reductions: %r" % reduction) + counter_examples.append("after {} reductions: {!r}".format(*reduction)) except: - counter_examples.append("%r : %s" % (case, traceback.format_exc())) + counter_examples.append(f"{case!r} : {traceback.format_exc()}") return counter_examples diff --git a/test/sigstop_sigcont.py b/test/sigstop_sigcont.py index 87b4f62038..6f84b6a6a2 100644 --- a/test/sigstop_sigcont.py +++ b/test/sigstop_sigcont.py @@ -84,7 +84,7 @@ def main(uri: str) -> None: if len(sys.argv) != 2: print("unknown or missing options") print(f"usage: python3 {sys.argv[0]} 'mongodb://localhost'") - exit(1) + sys.exit(1) # Enable logs in this format: # 2022-03-30 12:40:55,582 INFO diff --git a/test/test_auth.py b/test/test_auth.py index 7db2247746..f9a9af4d5a 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -67,7 +67,7 @@ class AutoAuthenticateThread(threading.Thread): """ def __init__(self, collection): - super(AutoAuthenticateThread, self).__init__() + super().__init__() self.collection = collection self.success = False @@ -89,10 +89,10 @@ def setUpClass(cls): cls.service_realm_required = ( GSSAPI_SERVICE_REALM is not None and GSSAPI_SERVICE_REALM not in GSSAPI_PRINCIPAL ) - mech_properties = "SERVICE_NAME:%s" % (GSSAPI_SERVICE_NAME,) - mech_properties += ",CANONICALIZE_HOST_NAME:%s" % (GSSAPI_CANONICALIZE,) + mech_properties = f"SERVICE_NAME:{GSSAPI_SERVICE_NAME}" + mech_properties += f",CANONICALIZE_HOST_NAME:{GSSAPI_CANONICALIZE}" if GSSAPI_SERVICE_REALM is not None: - mech_properties += ",SERVICE_REALM:%s" % (GSSAPI_SERVICE_REALM,) + mech_properties += f",SERVICE_REALM:{GSSAPI_SERVICE_REALM}" cls.mech_properties = mech_properties def test_credentials_hashing(self): @@ -111,8 +111,8 @@ def test_credentials_hashing(self): "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "B"}}, None ) - self.assertEqual(1, len(set([creds1, creds2]))) - self.assertEqual(3, len(set([creds0, creds1, creds2, creds3]))) + self.assertEqual(1, len({creds1, creds2})) + self.assertEqual(3, len({creds0, creds1, creds2, creds3})) @ignore_deprecations def test_gssapi_simple(self): @@ -160,7 +160,7 @@ def test_gssapi_simple(self): client[GSSAPI_DB].collection.find_one() # Log in using URI, with authMechanismProperties. - mech_uri = uri + "&authMechanismProperties=%s" % (self.mech_properties,) + mech_uri = uri + f"&authMechanismProperties={self.mech_properties}" client = MongoClient(mech_uri) client[GSSAPI_DB].collection.find_one() @@ -179,7 +179,7 @@ def test_gssapi_simple(self): client[GSSAPI_DB].list_collection_names() - uri = uri + "&replicaSet=%s" % (str(set_name),) + uri = uri + f"&replicaSet={str(set_name)}" client = MongoClient(uri) client[GSSAPI_DB].list_collection_names() @@ -196,7 +196,7 @@ def test_gssapi_simple(self): client[GSSAPI_DB].list_collection_names() - mech_uri = mech_uri + "&replicaSet=%s" % (str(set_name),) + mech_uri = mech_uri + f"&replicaSet={str(set_name)}" client = MongoClient(mech_uri) client[GSSAPI_DB].list_collection_names() @@ -336,12 +336,12 @@ def auth_string(user, password): class TestSCRAMSHA1(IntegrationTest): @client_context.require_auth def setUp(self): - super(TestSCRAMSHA1, self).setUp() + super().setUp() client_context.create_user("pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"]) def tearDown(self): client_context.drop_user("pymongo_test", "user") - super(TestSCRAMSHA1, self).tearDown() + super().tearDown() def test_scram_sha1(self): host, port = client_context.host, client_context.port @@ -368,16 +368,16 @@ class TestSCRAM(IntegrationTest): @client_context.require_auth @client_context.require_version_min(3, 7, 2) def setUp(self): - super(TestSCRAM, self).setUp() + super().setUp() self._SENSITIVE_COMMANDS = monitoring._SENSITIVE_COMMANDS - monitoring._SENSITIVE_COMMANDS = set([]) + monitoring._SENSITIVE_COMMANDS = set() self.listener = AllowListEventListener("saslStart") def tearDown(self): monitoring._SENSITIVE_COMMANDS = self._SENSITIVE_COMMANDS client_context.client.testscram.command("dropAllUsersFromDatabase") client_context.client.drop_database("testscram") - super(TestSCRAM, self).tearDown() + super().tearDown() def test_scram_skip_empty_exchange(self): listener = AllowListEventListener("saslStart", "saslContinue") @@ -597,14 +597,14 @@ def test_scram_threaded(self): class TestAuthURIOptions(IntegrationTest): @client_context.require_auth def setUp(self): - super(TestAuthURIOptions, self).setUp() + super().setUp() client_context.create_user("admin", "admin", "pass") client_context.create_user("pymongo_test", "user", "pass", ["userAdmin", "readWrite"]) def tearDown(self): client_context.drop_user("pymongo_test", "user") client_context.drop_user("admin", "admin") - super(TestAuthURIOptions, self).tearDown() + super().tearDown() def test_uri_options(self): # Test default to admin diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index 78f4d21929..ebcc4eeb7d 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -67,7 +67,7 @@ def run_test(self): expected = credential["mechanism_properties"] if expected is not None: actual = credentials.mechanism_properties - for key, val in expected.items(): + for key, _val in expected.items(): if "SERVICE_NAME" in expected: self.assertEqual(actual.service_name, expected["SERVICE_NAME"]) elif "CANONICALIZE_HOST_NAME" in expected: @@ -91,7 +91,7 @@ def run_test(self): actual.refresh_token_callback, expected["refresh_token_callback"] ) else: - self.fail("Unhandled property: %s" % (key,)) + self.fail(f"Unhandled property: {key}") else: if credential["mechanism"] == "MONGODB-AWS": self.assertIsNone(credentials.mechanism_properties.aws_session_token) @@ -111,7 +111,7 @@ def create_tests(): continue test_method = create_test(test_case) name = str(test_case["description"].lower().replace(" ", "_")) - setattr(TestAuthSpec, "test_%s_%s" % (test_suffix, name), test_method) + setattr(TestAuthSpec, f"test_{test_suffix}_{name}", test_method) create_tests() diff --git a/test/test_binary.py b/test/test_binary.py index 65abdca796..158a990290 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -122,15 +122,15 @@ def test_equality(self): def test_repr(self): one = Binary(b"hello world") - self.assertEqual(repr(one), "Binary(%s, 0)" % (repr(b"hello world"),)) + self.assertEqual(repr(one), "Binary({}, 0)".format(repr(b"hello world"))) two = Binary(b"hello world", 2) - self.assertEqual(repr(two), "Binary(%s, 2)" % (repr(b"hello world"),)) + self.assertEqual(repr(two), "Binary({}, 2)".format(repr(b"hello world"))) three = Binary(b"\x08\xFF") - self.assertEqual(repr(three), "Binary(%s, 0)" % (repr(b"\x08\xFF"),)) + self.assertEqual(repr(three), "Binary({}, 0)".format(repr(b"\x08\xFF"))) four = Binary(b"\x08\xFF", 2) - self.assertEqual(repr(four), "Binary(%s, 2)" % (repr(b"\x08\xFF"),)) + self.assertEqual(repr(four), "Binary({}, 2)".format(repr(b"\x08\xFF"))) five = Binary(b"test", 100) - self.assertEqual(repr(five), "Binary(%s, 100)" % (repr(b"test"),)) + self.assertEqual(repr(five), "Binary({}, 100)".format(repr(b"test"))) def test_hash(self): one = Binary(b"hello world") @@ -351,7 +351,7 @@ class TestUuidSpecExplicitCoding(unittest.TestCase): @classmethod def setUpClass(cls): - super(TestUuidSpecExplicitCoding, cls).setUpClass() + super().setUpClass() cls.uuid = uuid.UUID("00112233445566778899AABBCCDDEEFF") @staticmethod @@ -452,7 +452,7 @@ class TestUuidSpecImplicitCoding(IntegrationTest): @classmethod def setUpClass(cls): - super(TestUuidSpecImplicitCoding, cls).setUpClass() + super().setUpClass() cls.uuid = uuid.UUID("00112233445566778899AABBCCDDEEFF") @staticmethod diff --git a/test/test_bson.py b/test/test_bson.py index a8fd1fef45..a6e6352333 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2009-present MongoDB, Inc. # @@ -370,7 +369,7 @@ def test_invalid_decodes(self): ), ] for i, data in enumerate(bad_bsons): - msg = "bad_bson[{}]".format(i) + msg = f"bad_bson[{i}]" with self.assertRaises(InvalidBSON, msg=msg): decode_all(data) with self.assertRaises(InvalidBSON, msg=msg): @@ -491,7 +490,7 @@ def test_basic_encode(self): def test_unknown_type(self): # Repr value differs with major python version - part = "type %r for fieldname 'foo'" % (b"\x14",) + part = "type {!r} for fieldname 'foo'".format(b"\x14") docs = [ b"\x0e\x00\x00\x00\x14foo\x00\x01\x00\x00\x00\x00", (b"\x16\x00\x00\x00\x04foo\x00\x0c\x00\x00\x00\x140\x00\x01\x00\x00\x00\x00\x00"), @@ -648,7 +647,7 @@ def test_small_long_encode_decode(self): encoded1 = encode({"x": 256}) decoded1 = decode(encoded1)["x"] self.assertEqual(256, decoded1) - self.assertEqual(type(256), type(decoded1)) + self.assertEqual(int, type(decoded1)) encoded2 = encode({"x": Int64(256)}) decoded2 = decode(encoded2)["x"] @@ -925,7 +924,7 @@ def test_bad_id_keys(self): def test_bson_encode_thread_safe(self): def target(i): for j in range(1000): - my_int = type("MyInt_%s_%s" % (i, j), (int,), {}) + my_int = type(f"MyInt_{i}_{j}", (int,), {}) bson.encode({"my_int": my_int()}) threads = [ExceptionCatchingThread(target=target, args=(i,)) for i in range(3)] @@ -939,7 +938,7 @@ def target(i): self.assertIsNone(t.exc) def test_raise_invalid_document(self): - class Wrapper(object): + class Wrapper: def __init__(self, val): self.val = val diff --git a/test/test_bulk.py b/test/test_bulk.py index ac7073c0ef..6a2af3143c 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -50,12 +50,12 @@ class BulkTestBase(IntegrationTest): @classmethod def setUpClass(cls): - super(BulkTestBase, cls).setUpClass() + super().setUpClass() cls.coll = cls.db.test cls.coll_w0 = cls.coll.with_options(write_concern=WriteConcern(w=0)) def setUp(self): - super(BulkTestBase, self).setUp() + super().setUp() self.coll.drop() def assertEqualResponse(self, expected, actual): @@ -93,7 +93,7 @@ def assertEqualResponse(self, expected, actual): self.assertEqual( actual.get(key), value, - "%r value of %r does not match expected %r" % (key, actual.get(key), value), + f"{key!r} value of {actual.get(key)!r} does not match expected {value!r}", ) def assertEqualUpsert(self, expected, actual): @@ -793,10 +793,10 @@ class BulkAuthorizationTestBase(BulkTestBase): @client_context.require_auth @client_context.require_no_api_version def setUpClass(cls): - super(BulkAuthorizationTestBase, cls).setUpClass() + super().setUpClass() def setUp(self): - super(BulkAuthorizationTestBase, self).setUp() + super().setUp() client_context.create_user(self.db.name, "readonly", "pw", ["read"]) self.db.command( "createRole", @@ -902,7 +902,7 @@ def test_no_remove(self): InsertOne({"x": 3}), # Never attempted. ] self.assertRaises(OperationFailure, coll.bulk_write, requests) - self.assertEqual(set([1, 2]), set(self.coll.distinct("x"))) + self.assertEqual({1, 2}, set(self.coll.distinct("x"))) class TestBulkWriteConcern(BulkTestBase): @@ -911,7 +911,7 @@ class TestBulkWriteConcern(BulkTestBase): @classmethod def setUpClass(cls): - super(TestBulkWriteConcern, cls).setUpClass() + super().setUpClass() cls.w = client_context.w cls.secondary = None if cls.w is not None and cls.w > 1: diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 2388a6e1f4..c9ddfcd137 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -104,7 +104,8 @@ def get_resume_token(self, invalidate=False): def get_start_at_operation_time(self): """Get an operationTime. Advances the operation clock beyond the most - recently returned timestamp.""" + recently returned timestamp. + """ optime = self.client.admin.command("ping")["operationTime"] return Timestamp(optime.time, optime.inc + 1) @@ -120,7 +121,7 @@ def kill_change_stream_cursor(self, change_stream): client._close_cursor_now(cursor.cursor_id, address) -class APITestsMixin(object): +class APITestsMixin: @no_type_check def test_watch(self): with self.change_stream( @@ -208,7 +209,7 @@ def test_try_next_runs_one_getmore(self): # Stream still works after a resume. coll.insert_one({"_id": 3}) wait_until(lambda: stream.try_next() is not None, "get change from try_next") - self.assertEqual(set(listener.started_command_names()), set(["getMore"])) + self.assertEqual(set(listener.started_command_names()), {"getMore"}) self.assertIsNone(stream.try_next()) @no_type_check @@ -249,7 +250,7 @@ def test_start_at_operation_time(self): coll.insert_many([{"data": i} for i in range(ndocs)]) with self.change_stream(start_at_operation_time=optime) as cs: - for i in range(ndocs): + for _i in range(ndocs): cs.next() @no_type_check @@ -443,7 +444,7 @@ def test_start_after_resume_process_without_changes(self): self.assertEqual(change["fullDocument"], {"_id": 2}) -class ProseSpecTestsMixin(object): +class ProseSpecTestsMixin: @no_type_check def _client_with_listener(self, *commands): listener = AllowListEventListener(*commands) @@ -461,7 +462,8 @@ def _populate_and_exhaust_change_stream(self, change_stream, batch_size=3): def _get_expected_resume_token_legacy(self, stream, listener, previous_change=None): """Predicts what the resume token should currently be for server versions that don't support postBatchResumeToken. Assumes the stream - has never returned any changes if previous_change is None.""" + has never returned any changes if previous_change is None. + """ if previous_change is None: agg_cmd = listener.started_events[0] stage = agg_cmd.command["pipeline"][0]["$changeStream"] @@ -474,7 +476,8 @@ def _get_expected_resume_token(self, stream, listener, previous_change=None): versions that support postBatchResumeToken. Assumes the stream has never returned any changes if previous_change is None. Assumes listener is a AllowListEventListener that listens for aggregate and - getMore commands.""" + getMore commands. + """ if previous_change is None or stream._cursor._has_next(): token = self._get_expected_resume_token_legacy(stream, listener, previous_change) if token is not None: @@ -767,14 +770,14 @@ class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin): @client_context.require_version_min(4, 0, 0, -1) @client_context.require_change_streams def setUpClass(cls): - super(TestClusterChangeStream, cls).setUpClass() + super().setUpClass() cls.dbs = [cls.db, cls.client.pymongo_test_2] @classmethod def tearDownClass(cls): for db in cls.dbs: cls.client.drop_database(db) - super(TestClusterChangeStream, cls).tearDownClass() + super().tearDownClass() def change_stream_with_client(self, client, *args, **kwargs): return client.watch(*args, **kwargs) @@ -828,7 +831,7 @@ class TestDatabaseChangeStream(TestChangeStreamBase, APITestsMixin): @client_context.require_version_min(4, 0, 0, -1) @client_context.require_change_streams def setUpClass(cls): - super(TestDatabaseChangeStream, cls).setUpClass() + super().setUpClass() def change_stream_with_client(self, client, *args, **kwargs): return client[self.db.name].watch(*args, **kwargs) @@ -913,7 +916,7 @@ class TestCollectionChangeStream(TestChangeStreamBase, APITestsMixin, ProseSpecT @classmethod @client_context.require_change_streams def setUpClass(cls): - super(TestCollectionChangeStream, cls).setUpClass() + super().setUpClass() def setUp(self): # Use a new collection for each test. @@ -1044,17 +1047,17 @@ class TestAllLegacyScenarios(IntegrationTest): @classmethod @client_context.require_connection def setUpClass(cls): - super(TestAllLegacyScenarios, cls).setUpClass() + super().setUpClass() cls.listener = AllowListEventListener("aggregate", "getMore") cls.client = rs_or_single_client(event_listeners=[cls.listener]) @classmethod def tearDownClass(cls): cls.client.close() - super(TestAllLegacyScenarios, cls).tearDownClass() + super().tearDownClass() def setUp(self): - super(TestAllLegacyScenarios, self).setUp() + super().setUp() self.listener.reset() def setUpCluster(self, scenario_dict): @@ -1088,7 +1091,8 @@ def setFailPoint(self, scenario_dict): def assert_list_contents_are_subset(self, superlist, sublist): """Check that each element in sublist is a subset of the corresponding - element in superlist.""" + element in superlist. + """ self.assertEqual(len(superlist), len(sublist)) for sup, sub in zip(superlist, sublist): if isinstance(sub, dict): @@ -1104,7 +1108,7 @@ def assert_dict_is_subset(self, superdict, subdict): exempt_fields = ["documentKey", "_id", "getMore"] for key, value in subdict.items(): if key not in superdict: - self.fail("Key %s not found in %s" % (key, superdict)) + self.fail(f"Key {key} not found in {superdict}") if isinstance(value, dict): self.assert_dict_is_subset(superdict[key], value) continue diff --git a/test/test_client.py b/test/test_client.py index 624c460c08..ec2b4bac97 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -325,7 +325,7 @@ def test_metadata(self): self.assertRaises(TypeError, MongoClient, driver=("Foo", "1", "a")) # Test appending to driver info. metadata["driver"]["name"] = "PyMongo|FooDriver" - metadata["driver"]["version"] = "%s|1.2.3" % (_METADATA["driver"]["version"],) + metadata["driver"]["version"] = "{}|1.2.3".format(_METADATA["driver"]["version"]) client = MongoClient( "foo", 27017, @@ -335,7 +335,7 @@ def test_metadata(self): ) options = client._MongoClient__options self.assertEqual(options.pool_options.metadata, metadata) - metadata["platform"] = "%s|FooPlatform" % (_METADATA["platform"],) + metadata["platform"] = "{}|FooPlatform".format(_METADATA["platform"]) client = MongoClient( "foo", 27017, @@ -347,7 +347,7 @@ def test_metadata(self): self.assertEqual(options.pool_options.metadata, metadata) def test_kwargs_codec_options(self): - class MyFloatType(object): + class MyFloatType: def __init__(self, x): self.__x = x @@ -704,7 +704,7 @@ def test_init_disconnected_with_auth(self): self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) def test_equality(self): - seed = "%s:%s" % list(self.client._topology_settings.seeds)[0] + seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) c = rs_or_single_client(seed, connect=False) self.addCleanup(c.close) self.assertEqual(client_context.client, c) @@ -723,7 +723,7 @@ def test_equality(self): ) def test_hashable(self): - seed = "%s:%s" % list(self.client._topology_settings.seeds)[0] + seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) c = rs_or_single_client(seed, connect=False) self.addCleanup(c.close) self.assertIn(c, {client_context.client}) @@ -735,7 +735,7 @@ def test_host_w_port(self): with self.assertRaises(ValueError): connected( MongoClient( - "%s:1234567" % (client_context.host,), + f"{client_context.host}:1234567", connectTimeoutMS=1, serverSelectionTimeoutMS=10, ) @@ -1002,7 +1002,7 @@ def test_username_and_password(self): @client_context.require_auth def test_lazy_auth_raises_operation_failure(self): lazy_client = rs_or_single_client_noauth( - "mongodb://user:wrong@%s/pymongo_test" % (client_context.host,), connect=False + f"mongodb://user:wrong@{client_context.host}/pymongo_test", connect=False ) assertRaisesExactly(OperationFailure, lazy_client.test.collection.find_one) @@ -1160,7 +1160,7 @@ def test_ipv6(self): raise SkipTest("Need the ipaddress module to test with SSL") if client_context.auth_enabled: - auth_str = "%s:%s@" % (db_user, db_pwd) + auth_str = f"{db_user}:{db_pwd}@" else: auth_str = "" @@ -1533,7 +1533,7 @@ def test_reset_during_update_pool(self): # Continuously reset the pool. class ResetPoolThread(threading.Thread): def __init__(self, pool): - super(ResetPoolThread, self).__init__() + super().__init__() self.running = True self.pool = pool @@ -1657,7 +1657,7 @@ def test_network_error_message(self): {"mode": {"times": 1}, "data": {"closeConnection": True, "failCommands": ["find"]}} ): assert client.address is not None - expected = "%s:%s: " % client.address + expected = "{}:{}: ".format(*client.address) with self.assertRaisesRegex(AutoReconnect, expected): client.pymongo_test.test.find_one({}) @@ -1836,7 +1836,7 @@ class TestExhaustCursor(IntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" def setUp(self): - super(TestExhaustCursor, self).setUp() + super().setUp() if client_context.is_mongos: raise SkipTest("mongos doesn't support exhaust, SERVER-2627") @@ -2188,23 +2188,33 @@ def _test_network_error(self, operation_callback): self.assertEqual(7, sd_b.max_wire_version) def test_network_error_on_query(self): - callback = lambda client: client.db.collection.find_one() + def callback(client): + return client.db.collection.find_one() + self._test_network_error(callback) def test_network_error_on_insert(self): - callback = lambda client: client.db.collection.insert_one({}) + def callback(client): + return client.db.collection.insert_one({}) + self._test_network_error(callback) def test_network_error_on_update(self): - callback = lambda client: client.db.collection.update_one({}, {"$unset": "x"}) + def callback(client): + return client.db.collection.update_one({}, {"$unset": "x"}) + self._test_network_error(callback) def test_network_error_on_replace(self): - callback = lambda client: client.db.collection.replace_one({}, {}) + def callback(client): + return client.db.collection.replace_one({}, {}) + self._test_network_error(callback) def test_network_error_on_delete(self): - callback = lambda client: client.db.collection.delete_many({}) + def callback(client): + return client.db.collection.delete_many({}) + self._test_network_error(callback) @@ -2227,7 +2237,7 @@ def test_rs_client_does_not_maintain_pool_to_arbiters(self): wait_until(lambda: len(c.nodes) == 3, "connect") self.assertEqual(c.address, ("a", 1)) - self.assertEqual(c.arbiters, set([("c", 3)])) + self.assertEqual(c.arbiters, {("c", 3)}) # Assert that we create 2 and only 2 pooled connections. listener.wait_for_event(monitoring.ConnectionReadyEvent, 2) self.assertEqual(listener.event_count(monitoring.ConnectionCreatedEvent), 2) diff --git a/test/test_client_context.py b/test/test_client_context.py index 9ee5b96d61..72da8dbc34 100644 --- a/test/test_client_context.py +++ b/test/test_client_context.py @@ -28,8 +28,9 @@ def test_must_connect(self): self.assertTrue( client_context.connected, "client context must be connected when " - "PYMONGO_MUST_CONNECT is set. Failed attempts:\n%s" - % (client_context.connection_attempt_info(),), + "PYMONGO_MUST_CONNECT is set. Failed attempts:\n{}".format( + client_context.connection_attempt_info() + ), ) def test_serverless(self): @@ -39,8 +40,9 @@ def test_serverless(self): self.assertTrue( client_context.connected and client_context.serverless, "client context must be connected to serverless when " - "TEST_SERVERLESS is set. Failed attempts:\n%s" - % (client_context.connection_attempt_info(),), + "TEST_SERVERLESS is set. Failed attempts:\n{}".format( + client_context.connection_attempt_info() + ), ) def test_enableTestCommands_is_disabled(self): diff --git a/test/test_cmap.py b/test/test_cmap.py index 360edef0e8..3b84524f44 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -116,7 +116,7 @@ def wait_for_event(self, op): timeout = op.get("timeout", 10000) / 1000.0 wait_until( lambda: self.listener.event_count(event) >= count, - "find %s %s event(s)" % (count, event), + f"find {count} {event} event(s)", timeout=timeout, ) @@ -191,11 +191,11 @@ def check_events(self, events, ignore): """Check the events of a test.""" actual_events = self.actual_events(ignore) for actual, expected in zip(actual_events, events): - self.logs.append("Checking event actual: %r vs expected: %r" % (actual, expected)) + self.logs.append(f"Checking event actual: {actual!r} vs expected: {expected!r}") self.check_event(actual, expected) if len(events) > len(actual_events): - self.fail("missing events: %r" % (events[len(actual_events) :],)) + self.fail(f"missing events: {events[len(actual_events) :]!r}") def check_error(self, actual, expected): message = expected.pop("message") @@ -260,9 +260,9 @@ def run_scenario(self, scenario_def, test): self.pool = list(client._topology._servers.values())[0].pool # Map of target names to Thread objects. - self.targets: dict = dict() + self.targets: dict = {} # Map of label names to Connection objects - self.labels: dict = dict() + self.labels: dict = {} def cleanup(): for t in self.targets.values(): @@ -285,7 +285,7 @@ def cleanup(): self.check_events(test["events"], test["ignore"]) except Exception: # Print the events after a test failure. - print("\nFailed test: %r" % (test["description"],)) + print("\nFailed test: {!r}".format(test["description"])) print("Operations:") for op in self._ops: print(op) @@ -332,8 +332,8 @@ def test_2_all_client_pools_have_same_options(self): self.assertEqual(pool.opts, pool_opts) def test_3_uri_connection_pool_options(self): - opts = "&".join(["%s=%s" % (k, v) for k, v in self.POOL_OPTIONS.items()]) - uri = "mongodb://%s/?%s" % (client_context.pair, opts) + opts = "&".join([f"{k}={v}" for k, v in self.POOL_OPTIONS.items()]) + uri = f"mongodb://{client_context.pair}/?{opts}" client = rs_or_single_client(uri) self.addCleanup(client.close) pool_opts = get_pool(client).opts diff --git a/test/test_code.py b/test/test_code.py index 9ff305e39a..9e44ca4962 100644 --- a/test/test_code.py +++ b/test/test_code.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2009-present MongoDB, Inc. # @@ -67,7 +66,7 @@ def test_repr(self): c = Code("hello world", {"blah": 3}) self.assertEqual(repr(c), "Code('hello world', {'blah': 3})") c = Code("\x08\xFF") - self.assertEqual(repr(c), "Code(%s, None)" % (repr("\x08\xFF"),)) + self.assertEqual(repr(c), "Code({}, None)".format(repr("\x08\xFF"))) def test_equality(self): b = Code("hello") diff --git a/test/test_collation.py b/test/test_collation.py index 18f8bc78ac..7f4bbf4750 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -96,7 +96,7 @@ class TestCollation(IntegrationTest): @classmethod @client_context.require_connection def setUpClass(cls): - super(TestCollation, cls).setUpClass() + super().setUpClass() cls.listener = EventListener() cls.client = rs_or_single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test @@ -110,11 +110,11 @@ def tearDownClass(cls): cls.warn_context.__exit__() cls.warn_context = None cls.client.close() - super(TestCollation, cls).tearDownClass() + super().tearDownClass() def tearDown(self): self.listener.reset() - super(TestCollation, self).tearDown() + super().tearDown() def last_command_started(self): return self.listener.started_events[-1].command diff --git a/test/test_collection.py b/test/test_collection.py index e36d6663f0..ca657f0099 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -151,7 +149,7 @@ class TestCollection(IntegrationTest): @classmethod def setUpClass(cls): - super(TestCollection, cls).setUpClass() + super().setUpClass() cls.w = client_context.w # type: ignore @classmethod @@ -373,7 +371,7 @@ def test_list_indexes(self): db.test.insert_one({}) # create collection def map_indexes(indexes): - return dict([(index["name"], index) for index in indexes]) + return {index["name"]: index for index in indexes} indexes = list(db.test.list_indexes()) self.assertEqual(len(indexes), 1) @@ -485,7 +483,7 @@ def test_index_2dsphere(self): db.test.drop_indexes() self.assertEqual("geo_2dsphere", db.test.create_index([("geo", GEOSPHERE)])) - for dummy, info in db.test.index_information().items(): + for _dummy, info in db.test.index_information().items(): field, idx_type = info["key"][0] if field == "geo" and idx_type == "2dsphere": break @@ -504,7 +502,7 @@ def test_index_hashed(self): db.test.drop_indexes() self.assertEqual("a_hashed", db.test.create_index([("a", HASHED)])) - for dummy, info in db.test.index_information().items(): + for _dummy, info in db.test.index_information().items(): field, idx_type = info["key"][0] if field == "a" and idx_type == "hashed": break @@ -1638,8 +1636,8 @@ def test_find_one(self): self.assertTrue("hello" in db.test.find_one(projection=("hello",))) self.assertTrue("hello" not in db.test.find_one(projection=("foo",))) - self.assertTrue("hello" in db.test.find_one(projection=set(["hello"]))) - self.assertTrue("hello" not in db.test.find_one(projection=set(["foo"]))) + self.assertTrue("hello" in db.test.find_one(projection={"hello"})) + self.assertTrue("hello" not in db.test.find_one(projection={"foo"})) self.assertTrue("hello" in db.test.find_one(projection=frozenset(["hello"]))) self.assertTrue("hello" not in db.test.find_one(projection=frozenset(["foo"]))) diff --git a/test/test_comment.py b/test/test_comment.py index 85e5470d74..ea44c74257 100644 --- a/test/test_comment.py +++ b/test/test_comment.py @@ -28,7 +28,7 @@ from pymongo.operations import IndexModel -class Empty(object): +class Empty: def __getattr__(self, item): try: self.__dict__[item] diff --git a/test/test_common.py b/test/test_common.py index ff50878ea1..76367ffa0c 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -148,14 +148,12 @@ def test_mongo_client(self): self.assertTrue(new_coll.insert_one(doc)) self.assertRaises(OperationFailure, coll.insert_one, doc) - m = rs_or_single_client( - "mongodb://%s/" % (pair,), replicaSet=client_context.replica_set_name - ) + m = rs_or_single_client(f"mongodb://{pair}/", replicaSet=client_context.replica_set_name) coll = m.pymongo_test.write_concern_test self.assertRaises(OperationFailure, coll.insert_one, doc) m = rs_or_single_client( - "mongodb://%s/?w=0" % (pair,), replicaSet=client_context.replica_set_name + f"mongodb://{pair}/?w=0", replicaSet=client_context.replica_set_name ) coll = m.pymongo_test.write_concern_test @@ -163,7 +161,7 @@ def test_mongo_client(self): # Equality tests direct = connected(single_client(w=0)) - direct2 = connected(single_client("mongodb://%s/?w=0" % (pair,), **self.credentials)) + direct2 = connected(single_client(f"mongodb://{pair}/?w=0", **self.credentials)) self.assertEqual(direct, direct2) self.assertFalse(direct != direct2) diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index fd9f126551..e09ba72a5c 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -40,7 +40,7 @@ class TestConnectionsSurvivePrimaryStepDown(IntegrationTest): @classmethod @client_context.require_replica_set def setUpClass(cls): - super(TestConnectionsSurvivePrimaryStepDown, cls).setUpClass() + super().setUpClass() cls.listener = CMAPListener() cls.client = rs_or_single_client( event_listeners=[cls.listener], retryWrites=False, heartbeatFrequencyMS=500 diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py index ca4b84c26d..589da0a7d7 100644 --- a/test/test_crud_v1.py +++ b/test/test_crud_v1.py @@ -55,7 +55,7 @@ def check_result(self, expected_result, result): if isinstance(result, _WriteResult): for res in expected_result: prop = camel_to_snake(res) - msg = "%s : %r != %r" % (prop, expected_result, result) + msg = f"{prop} : {expected_result!r} != {result!r}" # SPEC-869: Only BulkWriteResult has upserted_count. if prop == "upserted_count" and not isinstance(result, BulkWriteResult): if result.upserted_id is not None: # type: ignore diff --git a/test/test_cursor.py b/test/test_cursor.py index e96efb92b0..f8820f8aa2 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -945,7 +945,7 @@ def test_getitem_slice_index(self): for a, b in zip(count(99), self.db.test.find()[99:]): self.assertEqual(a, b["i"]) - for i in self.db.test.find()[1000:]: + for _i in self.db.test.find()[1000:]: self.fail() self.assertEqual(5, len(list(self.db.test.find()[20:25]))) @@ -1079,7 +1079,7 @@ def test_concurrent_close(self): def iterate_cursor(): while cursor.alive: - for doc in cursor: + for _doc in cursor: pass t = threading.Thread(target=iterate_cursor) @@ -1430,7 +1430,7 @@ def test_monitoring(self): class TestRawBatchCommandCursor(IntegrationTest): @classmethod def setUpClass(cls): - super(TestRawBatchCommandCursor, cls).setUpClass() + super().setUpClass() def test_aggregate_raw(self): c = self.db.test diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 676b3b6af0..14d7b4b05d 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -81,7 +81,7 @@ class DecimalCodec(DecimalDecoder, DecimalEncoder): DECIMAL_CODECOPTS = CodecOptions(type_registry=TypeRegistry([DecimalCodec()])) -class UndecipherableInt64Type(object): +class UndecipherableInt64Type: def __init__(self, value): self.value = value @@ -146,7 +146,7 @@ def transform_bson(self, value): return ResumeTokenToNanDecoder -class CustomBSONTypeTests(object): +class CustomBSONTypeTests: @no_type_check def roundtrip(self, doc): bsonbytes = encode(doc, codec_options=self.codecopts) @@ -164,9 +164,9 @@ def test_encode_decode_roundtrip(self): def test_decode_all(self): documents = [] for dec in range(3): - documents.append({"average": Decimal("56.4%s" % (dec,))}) + documents.append({"average": Decimal(f"56.4{dec}")}) - bsonstream = bytes() + bsonstream = b"" for doc in documents: bsonstream += encode(doc, codec_options=self.codecopts) @@ -287,7 +287,7 @@ def run_test(base, attrs, fail): else: codec() - class MyType(object): + class MyType: pass run_test( @@ -350,11 +350,11 @@ class TestBSONCustomTypeEncoderAndFallbackEncoderTandem(unittest.TestCase): @classmethod def setUpClass(cls): - class TypeA(object): + class TypeA: def __init__(self, x): self.value = x - class TypeB(object): + class TypeB: def __init__(self, x): self.value = x @@ -442,12 +442,12 @@ class TestTypeRegistry(unittest.TestCase): @classmethod def setUpClass(cls): - class MyIntType(object): + class MyIntType: def __init__(self, x): assert isinstance(x, int) self.x = x - class MyStrType(object): + class MyStrType: def __init__(self, x): assert isinstance(x, str) self.x = x @@ -553,18 +553,18 @@ def test_initialize_fail(self): with self.assertRaisesRegex(TypeError, err_msg): TypeRegistry([type("AnyType", (object,), {})()]) - err_msg = "fallback_encoder %r is not a callable" % (True,) + err_msg = f"fallback_encoder {True!r} is not a callable" with self.assertRaisesRegex(TypeError, err_msg): TypeRegistry([], True) # type: ignore[arg-type] - err_msg = "fallback_encoder %r is not a callable" % ("hello",) + err_msg = "fallback_encoder {!r} is not a callable".format("hello") with self.assertRaisesRegex(TypeError, err_msg): TypeRegistry(fallback_encoder="hello") # type: ignore[arg-type] def test_type_registry_repr(self): codec_instances = [codec() for codec in self.codecs] type_registry = TypeRegistry(codec_instances) - r = "TypeRegistry(type_codecs=%r, fallback_encoder=%r)" % (codec_instances, None) + r = f"TypeRegistry(type_codecs={codec_instances!r}, fallback_encoder={None!r})" self.assertEqual(r, repr(type_registry)) def test_type_registry_eq(self): @@ -777,7 +777,7 @@ def test_grid_out_custom_opts(self): self.assertRaises(AttributeError, setattr, two, attr, 5) -class ChangeStreamsWCustomTypesTestMixin(object): +class ChangeStreamsWCustomTypesTestMixin: @no_type_check def change_stream(self, *args, **kwargs): return self.watched_target.watch(*args, **kwargs) @@ -899,7 +899,7 @@ class TestCollectionChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCus @classmethod @client_context.require_change_streams def setUpClass(cls): - super(TestCollectionChangeStreamsWCustomTypes, cls).setUpClass() + super().setUpClass() cls.db.test.delete_many({}) def tearDown(self): @@ -918,7 +918,7 @@ class TestDatabaseChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCusto @client_context.require_version_min(4, 0, 0) @client_context.require_change_streams def setUpClass(cls): - super(TestDatabaseChangeStreamsWCustomTypes, cls).setUpClass() + super().setUpClass() cls.db.test.delete_many({}) def tearDown(self): @@ -937,7 +937,7 @@ class TestClusterChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustom @client_context.require_version_min(4, 0, 0) @client_context.require_change_streams def setUpClass(cls): - super(TestClusterChangeStreamsWCustomTypes, cls).setUpClass() + super().setUpClass() cls.db.test.delete_many({}) def tearDown(self): diff --git a/test/test_data_lake.py b/test/test_data_lake.py index 4fa38435a3..ce210010bd 100644 --- a/test/test_data_lake.py +++ b/test/test_data_lake.py @@ -52,7 +52,7 @@ class TestDataLakeProse(IntegrationTest): @classmethod @client_context.require_data_lake def setUpClass(cls): - super(TestDataLakeProse, cls).setUpClass() + super().setUpClass() # Test killCursors def test_1(self): @@ -100,7 +100,7 @@ class DataLakeTestSpec(TestCrudV2): @classmethod @client_context.require_data_lake def setUpClass(cls): - super(DataLakeTestSpec, cls).setUpClass() + super().setUpClass() def setup_scenario(self, scenario_def): # Spec tests MUST NOT insert data/drop collection for diff --git a/test/test_database.py b/test/test_database.py index b6be380aab..140d169db3 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -137,7 +137,7 @@ def test_get_coll(self): def test_repr(self): self.assertEqual( repr(Database(self.client, "pymongo_test")), - "Database(%r, %s)" % (self.client, repr("pymongo_test")), + "Database({!r}, {})".format(self.client, repr("pymongo_test")), ) def test_create_collection(self): @@ -262,8 +262,8 @@ def test_list_collections(self): # Checking if is there any collection which don't exists. if ( - len(set(colls) - set(["test", "test.mike"])) == 0 - or len(set(colls) - set(["test", "test.mike", "system.indexes"])) == 0 + len(set(colls) - {"test", "test.mike"}) == 0 + or len(set(colls) - {"test", "test.mike", "system.indexes"}) == 0 ): self.assertTrue(True) else: @@ -301,10 +301,7 @@ def test_list_collections(self): coll_cnt = {} # Checking if is there any collection which don't exists. - if ( - len(set(colls) - set(["test"])) == 0 - or len(set(colls) - set(["test", "system.indexes"])) == 0 - ): + if len(set(colls) - {"test"}) == 0 or len(set(colls) - {"test", "system.indexes"}) == 0: self.assertTrue(True) else: self.assertTrue(False) @@ -439,7 +436,7 @@ def test_id_ordering(self): ) cursor = db.test.find() for x in cursor: - for (k, v) in x.items(): + for (k, _v) in x.items(): self.assertEqual(k, "_id") break diff --git a/test/test_dbref.py b/test/test_dbref.py index 281aef473f..107d95d230 100644 --- a/test/test_dbref.py +++ b/test/test_dbref.py @@ -64,7 +64,7 @@ def test_repr(self): ) self.assertEqual( repr(DBRef("coll", ObjectId("1234567890abcdef12345678"))), - "DBRef(%s, ObjectId('1234567890abcdef12345678'))" % (repr("coll"),), + "DBRef({}, ObjectId('1234567890abcdef12345678'))".format(repr("coll")), ) self.assertEqual(repr(DBRef("coll", 5, foo="bar")), "DBRef('coll', 5, foo='bar')") self.assertEqual( diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 9af8185ab5..8a14ecfb2a 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -104,15 +104,15 @@ def got_app_error(topology, app_error): elif error_type == "timeout": raise NetworkTimeout("mock network timeout error") else: - raise AssertionError("unknown error type: %s" % (error_type,)) - assert False + raise AssertionError(f"unknown error type: {error_type}") + raise AssertionError except (AutoReconnect, NotPrimaryError, OperationFailure) as e: if when == "beforeHandshakeCompletes": completed_handshake = False elif when == "afterHandshakeCompletes": completed_handshake = True else: - assert False, "Unknown when field %s" % (when,) + raise AssertionError(f"Unknown when field {when}") topology.handle_error( server_address, @@ -201,7 +201,7 @@ def run_scenario(self): for i, phase in enumerate(scenario_def["phases"]): # Including the phase description makes failures easier to debug. description = phase.get("description", str(i)) - with assertion_context("phase: %s" % (description,)): + with assertion_context(f"phase: {description}"): for response in phase.get("responses", []): got_hello(c, common.partition_node(response[0]), response[1]) @@ -228,7 +228,7 @@ def create_tests(): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = "test_%s_%s" % (dirname, os.path.splitext(filename)[0]) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/test_encryption.py b/test/test_encryption.py index af8f54cd07..95f18eb307 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -207,7 +207,7 @@ class EncryptionIntegrationTest(IntegrationTest): @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") @client_context.require_version_min(4, 2, -1) def setUpClass(cls): - super(EncryptionIntegrationTest, cls).setUpClass() + super().setUpClass() def assertEncrypted(self, val): self.assertIsInstance(val, Binary) @@ -295,7 +295,7 @@ def _test_auto_encrypt(self, opts): # Collection.distinct auto decrypts. decrypted_ssns = encrypted_coll.distinct("ssn") - self.assertEqual(set(decrypted_ssns), set(d["ssn"] for d in docs)) + self.assertEqual(set(decrypted_ssns), {d["ssn"] for d in docs}) # Make sure the field is actually encrypted. for encrypted_doc in self.db.test.find(): @@ -391,7 +391,7 @@ class TestClientMaxWireVersion(IntegrationTest): @classmethod @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def setUpClass(cls): - super(TestClientMaxWireVersion, cls).setUpClass() + super().setUpClass() @client_context.require_version_max(4, 0, 99) def test_raise_max_wire_version_error(self): @@ -585,7 +585,7 @@ class TestSpec(SpecRunner): @classmethod @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def setUpClass(cls): - super(TestSpec, cls).setUpClass() + super().setUpClass() def parse_auto_encrypt_opts(self, opts): """Parse clientOptions.autoEncryptOpts.""" @@ -630,14 +630,14 @@ def parse_client_options(self, opts): if encrypt_opts: opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) - return super(TestSpec, self).parse_client_options(opts) + return super().parse_client_options(opts) def get_object_name(self, op): """Default object is collection.""" return op.get("object", "collection") def maybe_skip_scenario(self, test): - super(TestSpec, self).maybe_skip_scenario(test) + super().maybe_skip_scenario(test) desc = test["description"].lower() if "type=symbol" in desc: self.skipTest("PyMongo does not support the symbol type") @@ -674,7 +674,7 @@ def setup_scenario(self, scenario_def): def allowable_errors(self, op): """Override expected error classes.""" - errors = super(TestSpec, self).allowable_errors(op) + errors = super().allowable_errors(op) # An updateOne test expects encryption to error when no $ operator # appears but pymongo raises a client side ValueError in this case. if op["name"] == "updateOne": @@ -773,7 +773,7 @@ class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): "No environment credentials are set", ) def setUpClass(cls): - super(TestDataKeyDoubleEncryption, cls).setUpClass() + super().setUpClass() cls.listener = OvertCommandListener() cls.client = rs_or_single_client(event_listeners=[cls.listener]) cls.client.db.coll.drop() @@ -818,7 +818,7 @@ def run_test(self, provider_name): # Create data key. master_key: Any = self.MASTER_KEYS[provider_name] datakey_id = self.client_encryption.create_data_key( - provider_name, master_key=master_key, key_alt_names=["%s_altname" % (provider_name,)] + provider_name, master_key=master_key, key_alt_names=[f"{provider_name}_altname"] ) self.assertBinaryUUID(datakey_id) cmd = self.listener.started_events[-1] @@ -830,20 +830,20 @@ def run_test(self, provider_name): # Encrypt by key_id. encrypted = self.client_encryption.encrypt( - "hello %s" % (provider_name,), + f"hello {provider_name}", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=datakey_id, ) self.assertEncrypted(encrypted) self.client_encrypted.db.coll.insert_one({"_id": provider_name, "value": encrypted}) doc_decrypted = self.client_encrypted.db.coll.find_one({"_id": provider_name}) - self.assertEqual(doc_decrypted["value"], "hello %s" % (provider_name,)) # type: ignore + self.assertEqual(doc_decrypted["value"], f"hello {provider_name}") # type: ignore # Encrypt by key_alt_name. encrypted_altname = self.client_encryption.encrypt( - "hello %s" % (provider_name,), + f"hello {provider_name}", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name="%s_altname" % (provider_name,), + key_alt_name=f"{provider_name}_altname", ) self.assertEqual(encrypted_altname, encrypted) @@ -965,7 +965,7 @@ class TestCorpus(EncryptionIntegrationTest): @classmethod @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def setUpClass(cls): - super(TestCorpus, cls).setUpClass() + super().setUpClass() @staticmethod def kms_providers(): @@ -1046,17 +1046,17 @@ def _test_corpus(self, opts): self.assertIn(kms, ("local", "aws", "azure", "gcp", "kmip")) if identifier == "id": if kms == "local": - kwargs = dict(key_id=LOCAL_KEY_ID) + kwargs = {"key_id": LOCAL_KEY_ID} elif kms == "aws": - kwargs = dict(key_id=AWS_KEY_ID) + kwargs = {"key_id": AWS_KEY_ID} elif kms == "azure": - kwargs = dict(key_id=AZURE_KEY_ID) + kwargs = {"key_id": AZURE_KEY_ID} elif kms == "gcp": - kwargs = dict(key_id=GCP_KEY_ID) + kwargs = {"key_id": GCP_KEY_ID} else: - kwargs = dict(key_id=KMIP_KEY_ID) + kwargs = {"key_id": KMIP_KEY_ID} else: - kwargs = dict(key_alt_name=kms) + kwargs = {"key_alt_name": kms} self.assertIn(value["algo"], ("det", "rand")) if value["algo"] == "det": @@ -1069,12 +1069,12 @@ def _test_corpus(self, opts): value["value"], algo, **kwargs # type: ignore[arg-type] ) if not value["allowed"]: - self.fail("encrypt should have failed: %r: %r" % (key, value)) + self.fail(f"encrypt should have failed: {key!r}: {value!r}") corpus_copied[key]["value"] = encrypted_val except Exception: if value["allowed"]: tb = traceback.format_exc() - self.fail("encrypt failed: %r: %r, traceback: %s" % (key, value, tb)) + self.fail(f"encrypt failed: {key!r}: {value!r}, traceback: {tb}") client_encrypted.db.coll.insert_one(corpus_copied) corpus_decrypted = client_encrypted.db.coll.find_one() @@ -1141,7 +1141,7 @@ class TestBsonSizeBatches(EncryptionIntegrationTest): @classmethod def setUpClass(cls): - super(TestBsonSizeBatches, cls).setUpClass() + super().setUpClass() db = client_context.client.db cls.coll = db.coll cls.coll.drop() @@ -1172,7 +1172,7 @@ def setUpClass(cls): def tearDownClass(cls): cls.coll_encrypted.drop() cls.client_encrypted.close() - super(TestBsonSizeBatches, cls).tearDownClass() + super().tearDownClass() def test_01_insert_succeeds_under_2MiB(self): doc = {"_id": "over_2mib_under_16mib", "unencrypted": "a" * _2_MiB} @@ -1242,7 +1242,7 @@ class TestCustomEndpoint(EncryptionIntegrationTest): "No environment credentials are set", ) def setUpClass(cls): - super(TestCustomEndpoint, cls).setUpClass() + super().setUpClass() def setUp(self): kms_providers = { @@ -1442,7 +1442,7 @@ def test_12_kmip_master_key_invalid_endpoint(self): self.client_encryption.create_data_key("kmip", key) -class AzureGCPEncryptionTestMixin(object): +class AzureGCPEncryptionTestMixin: DEK = None KMS_PROVIDER_MAP = None KEYVAULT_DB = "keyvault" @@ -1514,7 +1514,7 @@ def setUpClass(cls): cls.KMS_PROVIDER_MAP = {"azure": AZURE_CREDS} cls.DEK = json_data(BASE, "custom", "azure-dek.json") cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") - super(TestAzureEncryption, cls).setUpClass() + super().setUpClass() def test_explicit(self): return self._test_explicit( @@ -1540,7 +1540,7 @@ def setUpClass(cls): cls.KMS_PROVIDER_MAP = {"gcp": GCP_CREDS} cls.DEK = json_data(BASE, "custom", "gcp-dek.json") cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") - super(TestGCPEncryption, cls).setUpClass() + super().setUpClass() def test_explicit(self): return self._test_explicit( @@ -1985,7 +1985,7 @@ def listener(): class TestKmsTLSProse(EncryptionIntegrationTest): @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def setUp(self): - super(TestKmsTLSProse, self).setUp() + super().setUp() self.patch_system_certs(CA_PEM) self.client_encrypted = ClientEncryption( {"aws": AWS_CREDS}, "keyvault.datakeys", self.client, OPTS @@ -2023,7 +2023,7 @@ def test_invalid_hostname_in_kms_certificate(self): class TestKmsTLSOptions(EncryptionIntegrationTest): @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def setUp(self): - super(TestKmsTLSOptions, self).setUp() + super().setUp() # 1, create client with only tlsCAFile. providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:8002" @@ -2391,7 +2391,7 @@ def run_test(self, src_provider, dst_provider): # https://github.com/mongodb/specifications/blob/5cf3ed/source/client-side-encryption/tests/README.rst#on-demand-aws-credentials class TestOnDemandAWSCredentials(EncryptionIntegrationTest): def setUp(self): - super(TestOnDemandAWSCredentials, self).setUp() + super().setUp() self.master_key = { "region": "us-east-1", "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), diff --git a/test/test_examples.py b/test/test_examples.py index c08cb17e20..b9508d4f1e 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -34,7 +34,7 @@ class TestSampleShellCommands(IntegrationTest): @classmethod def setUpClass(cls): - super(TestSampleShellCommands, cls).setUpClass() + super().setUpClass() # Run once before any tests run. cls.db.inventory.drop() @@ -757,18 +757,18 @@ def insert_docs(): # 1. The database for reactive, real-time applications # Start Changestream Example 1 cursor = db.inventory.watch() - document = next(cursor) + next(cursor) # End Changestream Example 1 # Start Changestream Example 2 cursor = db.inventory.watch(full_document="updateLookup") - document = next(cursor) + next(cursor) # End Changestream Example 2 # Start Changestream Example 3 resume_token = cursor.resume_token cursor = db.inventory.watch(resume_after=resume_token) - document = next(cursor) + next(cursor) # End Changestream Example 3 # Start Changestream Example 4 @@ -777,7 +777,7 @@ def insert_docs(): {"$addFields": {"newField": "this is an added field!"}}, ] cursor = db.inventory.watch(pipeline=pipeline) - document = next(cursor) + next(cursor) # End Changestream Example 4 finally: done = True @@ -898,7 +898,7 @@ def test_misc(self): with client.start_session() as session: collection.insert_one({"_id": 1}, session=session) collection.update_one({"_id": 1}, {"$set": {"a": 1}}, session=session) - for doc in collection.find({}, session=session): + for _doc in collection.find({}, session=session): pass # 3. Exploiting the power of arrays @@ -1078,7 +1078,7 @@ def update_employee_info(session): with client.start_session() as session: try: run_transaction_with_retry(update_employee_info, session) - except Exception as exc: + except Exception: # Do something with error. raise @@ -1089,7 +1089,9 @@ def update_employee_info(session): self.assertIsNotNone(employee) self.assertEqual(employee["status"], "Inactive") - MongoClient = lambda _: rs_client() + def MongoClient(_): + return rs_client() + uriString = None # Start Transactions withTxn API Example 1 @@ -1179,25 +1181,27 @@ class TestVersionedApiExamples(IntegrationTest): @client_context.require_version_min(4, 7) def test_versioned_api(self): # Versioned API examples - MongoClient = lambda _, server_api: rs_client(server_api=server_api, connect=False) + def MongoClient(_, server_api): + return rs_client(server_api=server_api, connect=False) + uri = None # Start Versioned API Example 1 from pymongo.server_api import ServerApi - client = MongoClient(uri, server_api=ServerApi("1")) + MongoClient(uri, server_api=ServerApi("1")) # End Versioned API Example 1 # Start Versioned API Example 2 - client = MongoClient(uri, server_api=ServerApi("1", strict=True)) + MongoClient(uri, server_api=ServerApi("1", strict=True)) # End Versioned API Example 2 # Start Versioned API Example 3 - client = MongoClient(uri, server_api=ServerApi("1", strict=False)) + MongoClient(uri, server_api=ServerApi("1", strict=False)) # End Versioned API Example 3 # Start Versioned API Example 4 - client = MongoClient(uri, server_api=ServerApi("1", deprecation_errors=True)) + MongoClient(uri, server_api=ServerApi("1", deprecation_errors=True)) # End Versioned API Example 4 @unittest.skip("PYTHON-3167 count has been added to API version 1") @@ -1339,7 +1343,7 @@ def test_snapshot_query(self): # Start Snapshot Query Example 2 db = client.retail with client.start_session(snapshot=True) as s: - total = db.sales.aggregate( + db.sales.aggregate( [ { "$match": { diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 8b46133a60..04003289e6 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2009-present MongoDB, Inc. # @@ -14,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for the grid_file module. -""" +"""Tests for the grid_file module.""" import datetime import io @@ -462,12 +460,10 @@ def test_multiple_reads(self): def test_readline(self): f = GridIn(self.db.fs, chunkSize=5) f.write( - ( - b"""Hello world, + b"""Hello world, How are you? Hope all is well. Bye""" - ) ) f.close() @@ -498,12 +494,10 @@ def test_readline(self): def test_readlines(self): f = GridIn(self.db.fs, chunkSize=5) f.write( - ( - b"""Hello world, + b"""Hello world, How are you? Hope all is well. Bye""" - ) ) f.close() diff --git a/test/test_gridfs.py b/test/test_gridfs.py index cfa6e43e85..4ba8467d22 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2009-present MongoDB, Inc. # @@ -14,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for the gridfs package. -""" +"""Tests for the gridfs package.""" import datetime import sys @@ -90,7 +88,7 @@ class TestGridfs(IntegrationTest): @classmethod def setUpClass(cls): - super(TestGridfs, cls).setUpClass() + super().setUpClass() cls.fs = gridfs.GridFS(cls.db) cls.alt = gridfs.GridFS(cls.db, "alt") @@ -141,7 +139,7 @@ def test_list(self): self.fs.put(b"foo", filename="test") self.fs.put(b"", filename="hello world") - self.assertEqual(set(["mike", "test", "hello world"]), set(self.fs.list())) + self.assertEqual({"mike", "test", "hello world"}, set(self.fs.list())) def test_empty_file(self): oid = self.fs.put(b"") @@ -210,7 +208,7 @@ def test_alt_collection(self): self.alt.put(b"foo", filename="test") self.alt.put(b"", filename="hello world") - self.assertEqual(set(["mike", "test", "hello world"]), set(self.alt.list())) + self.assertEqual({"mike", "test", "hello world"}, set(self.alt.list())) def test_threaded_reads(self): self.fs.put(b"hello", _id="test") @@ -394,7 +392,7 @@ def test_missing_length_iter(self): f = self.fs.get_last_version(filename="empty") def iterate_file(grid_file): - for chunk in grid_file: + for _chunk in grid_file: pass return True @@ -496,7 +494,7 @@ class TestGridfsReplicaSet(IntegrationTest): @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): - super(TestGridfsReplicaSet, cls).setUpClass() + super().setUpClass() @classmethod def tearDownClass(cls): diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index b6a33b4ecc..e5695f2c38 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2015-present MongoDB, Inc. # @@ -14,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for the gridfs package. -""" +"""Tests for the gridfs package.""" import datetime import itertools import threading @@ -75,7 +73,7 @@ class TestGridfs(IntegrationTest): @classmethod def setUpClass(cls): - super(TestGridfs, cls).setUpClass() + super().setUpClass() cls.fs = gridfs.GridFSBucket(cls.db) cls.alt = gridfs.GridFSBucket(cls.db, bucket_name="alt") @@ -196,8 +194,8 @@ def test_alt_collection(self): self.alt.upload_from_stream("hello world", b"") self.assertEqual( - set(["mike", "test", "hello world", "foo"]), - set(k["filename"] for k in list(self.db.alt.files.find())), + {"mike", "test", "hello world", "foo"}, + {k["filename"] for k in list(self.db.alt.files.find())}, ) def test_threaded_reads(self): @@ -442,7 +440,7 @@ class TestGridfsBucketReplicaSet(IntegrationTest): @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): - super(TestGridfsBucketReplicaSet, cls).setUpClass() + super().setUpClass() @classmethod def tearDownClass(cls): diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index d4de8debf5..df68b3e626 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -150,7 +150,7 @@ def test_session_gc(self): class PoolLocker(ExceptionCatchingThread): def __init__(self, pool): - super(PoolLocker, self).__init__(target=self.lock_pool) + super().__init__(target=self.lock_pool) self.pool = pool self.daemon = True self.locked = threading.Event() diff --git a/test/test_mongos_load_balancing.py b/test/test_mongos_load_balancing.py index e39940f56b..9e83e879a5 100644 --- a/test/test_mongos_load_balancing.py +++ b/test/test_mongos_load_balancing.py @@ -36,7 +36,7 @@ def setUpModule(): class SimpleOp(threading.Thread): def __init__(self, client): - super(SimpleOp, self).__init__() + super().__init__() self.client = client self.passed = False @@ -58,9 +58,9 @@ def do_simple_op(client, nthreads): def writable_addresses(topology): - return set( + return { server.description.address for server in topology.select_servers(writable_server_selector) - ) + } class TestMongosLoadBalancing(MockClientTest): @@ -133,7 +133,7 @@ def test_local_threshold(self): topology = client._topology # All are within a 30-ms latency window, see self.mock_client(). - self.assertEqual(set([("a", 1), ("b", 2), ("c", 3)]), writable_addresses(topology)) + self.assertEqual({("a", 1), ("b", 2), ("c", 3)}, writable_addresses(topology)) # No error client.admin.command("ping") @@ -143,7 +143,7 @@ def test_local_threshold(self): # No error client.db.command("ping") # Our chosen mongos goes down. - client.kill_host("%s:%s" % next(iter(client.nodes))) + client.kill_host("{}:{}".format(*next(iter(client.nodes)))) try: client.db.command("ping") except: @@ -174,13 +174,13 @@ def test_load_balancing(self): self.assertEqual(TOPOLOGY_TYPE.Sharded, topology.description.topology_type) # a and b are within the 15-ms latency window, see self.mock_client(). - self.assertEqual(set([("a", 1), ("b", 2)]), writable_addresses(topology)) + self.assertEqual({("a", 1), ("b", 2)}, writable_addresses(topology)) client.mock_rtts["a:1"] = 0.045 # Discover only b is within latency window. wait_until( - lambda: set([("b", 2)]) == writable_addresses(topology), + lambda: {("b", 2)} == writable_addresses(topology), 'discover server "a" is too far', ) diff --git a/test/test_monitor.py b/test/test_monitor.py index 85cfb0bc40..9ee3c52ff5 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -66,7 +66,7 @@ def test_cleanup_executors_on_client_del(self): del client for ref, name in executor_refs: - wait_until(partial(unregistered, ref), "unregister executor: %s" % (name,), timeout=5) + wait_until(partial(unregistered, ref), f"unregister executor: {name}", timeout=5) def test_cleanup_executors_on_client_close(self): client = create_client() @@ -76,9 +76,7 @@ def test_cleanup_executors_on_client_close(self): client.close() for executor in executors: - wait_until( - lambda: executor._stopped, "closed executor: %s" % (executor._name,), timeout=5 - ) + wait_until(lambda: executor._stopped, f"closed executor: {executor._name}", timeout=5) if __name__ == "__main__": diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 39b3d2f896..c7c793b382 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -39,18 +39,18 @@ class TestCommandMonitoring(IntegrationTest): @classmethod @client_context.require_connection def setUpClass(cls): - super(TestCommandMonitoring, cls).setUpClass() + super().setUpClass() cls.listener = EventListener() cls.client = rs_or_single_client(event_listeners=[cls.listener], retryWrites=False) @classmethod def tearDownClass(cls): cls.client.close() - super(TestCommandMonitoring, cls).tearDownClass() + super().tearDownClass() def tearDown(self): self.listener.reset() - super(TestCommandMonitoring, self).tearDown() + super().tearDown() def test_started_simple(self): self.client.pymongo_test.command("ping") @@ -232,40 +232,40 @@ def _test_find_options(self, query, expected_cmd): tuple(cursor) def test_find_options(self): - query = dict( - filter={}, - hint=[("x", 1)], - max_time_ms=10000, - max={"x": 10}, - min={"x": -10}, - return_key=True, - show_record_id=True, - projection={"x": False}, - skip=1, - no_cursor_timeout=True, - sort=[("_id", 1)], - allow_partial_results=True, - comment="this is a test", - batch_size=2, - ) + query = { + "filter": {}, + "hint": [("x", 1)], + "max_time_ms": 10000, + "max": {"x": 10}, + "min": {"x": -10}, + "return_key": True, + "show_record_id": True, + "projection": {"x": False}, + "skip": 1, + "no_cursor_timeout": True, + "sort": [("_id", 1)], + "allow_partial_results": True, + "comment": "this is a test", + "batch_size": 2, + } - cmd = dict( - find="test", - filter={}, - hint=SON([("x", 1)]), - comment="this is a test", - maxTimeMS=10000, - max={"x": 10}, - min={"x": -10}, - returnKey=True, - showRecordId=True, - sort=SON([("_id", 1)]), - projection={"x": False}, - skip=1, - batchSize=2, - noCursorTimeout=True, - allowPartialResults=True, - ) + cmd = { + "find": "test", + "filter": {}, + "hint": SON([("x", 1)]), + "comment": "this is a test", + "maxTimeMS": 10000, + "max": {"x": 10}, + "min": {"x": -10}, + "returnKey": True, + "showRecordId": True, + "sort": SON([("_id", 1)]), + "projection": {"x": False}, + "skip": 1, + "batchSize": 2, + "noCursorTimeout": True, + "allowPartialResults": True, + } if client_context.version < (4, 1, 0, -1): query["max_scan"] = 10 @@ -276,9 +276,9 @@ def test_find_options(self): @client_context.require_version_max(3, 7, 2) def test_find_snapshot(self): # Test "snapshot" parameter separately, can't combine with "sort". - query = dict(filter={}, snapshot=True) + query = {"filter": {}, "snapshot": True} - cmd = dict(find="test", filter={}, snapshot=True) + cmd = {"find": "test", "filter": {}, "snapshot": True} self._test_find_options(query, cmd) @@ -1049,7 +1049,7 @@ def test_write_errors(self): errors.extend(succeed.reply["writeErrors"]) self.assertEqual(2, len(errors)) - fields = set(["index", "code", "errmsg"]) + fields = {"index", "code", "errmsg"} for error in errors: self.assertTrue(fields.issubset(set(error))) @@ -1113,7 +1113,7 @@ class TestGlobalListener(IntegrationTest): @classmethod @client_context.require_connection def setUpClass(cls): - super(TestGlobalListener, cls).setUpClass() + super().setUpClass() cls.listener = EventListener() # We plan to call register(), which internally modifies _LISTENERS. cls.saved_listeners = copy.deepcopy(monitoring._LISTENERS) @@ -1126,10 +1126,10 @@ def setUpClass(cls): def tearDownClass(cls): monitoring._LISTENERS = cls.saved_listeners cls.client.close() - super(TestGlobalListener, cls).tearDownClass() + super().tearDownClass() def setUp(self): - super(TestGlobalListener, self).setUp() + super().setUp() self.listener.reset() def test_simple(self): diff --git a/test/test_on_demand_csfle.py b/test/test_on_demand_csfle.py index d5668199a3..499dc64b3b 100644 --- a/test/test_on_demand_csfle.py +++ b/test/test_on_demand_csfle.py @@ -30,10 +30,10 @@ class TestonDemandGCPCredentials(IntegrationTest): @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") @client_context.require_version_min(4, 2, -1) def setUpClass(cls): - super(TestonDemandGCPCredentials, cls).setUpClass() + super().setUpClass() def setUp(self): - super(TestonDemandGCPCredentials, self).setUp() + super().setUp() self.master_key = { "projectId": "devprod-drivers", "location": "global", @@ -72,10 +72,10 @@ class TestonDemandAzureCredentials(IntegrationTest): @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") @client_context.require_version_min(4, 2, -1) def setUpClass(cls): - super(TestonDemandAzureCredentials, cls).setUpClass() + super().setUpClass() def setUp(self): - super(TestonDemandAzureCredentials, self).setUp() + super().setUp() self.master_key = { "keyVaultEndpoint": "https://keyvault-drivers-2411.vault.azure.net/keys/", "keyName": "KEY-NAME", diff --git a/test/test_pooling.py b/test/test_pooling.py index 923c89d83b..57c9b807a6 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -60,7 +60,7 @@ class MongoThread(threading.Thread): """A thread that uses a MongoClient.""" def __init__(self, client): - super(MongoThread, self).__init__() + super().__init__() self.daemon = True # Don't hang whole test if thread hangs. self.client = client self.db = self.client[DB] @@ -107,7 +107,7 @@ class SocketGetter(MongoThread): """ def __init__(self, client, pool): - super(SocketGetter, self).__init__(client) + super().__init__(client) self.state = "init" self.pool = pool self.sock = None @@ -132,7 +132,7 @@ def run_cases(client, cases): n_runs = 5 for case in cases: - for i in range(n_runs): + for _i in range(n_runs): t = case(client) t.start() threads.append(t) @@ -148,7 +148,7 @@ class _TestPoolingBase(IntegrationTest): """Base class for all connection-pool tests.""" def setUp(self): - super(_TestPoolingBase, self).setUp() + super().setUp() self.c = rs_or_single_client() db = self.c[DB] db.unique.drop() @@ -158,7 +158,7 @@ def setUp(self): def tearDown(self): self.c.close() - super(_TestPoolingBase, self).tearDown() + super().tearDown() def create_pool(self, pair=(client_context.host, client_context.port), *args, **kwargs): # Start the pool with the correct ssl options. @@ -329,7 +329,7 @@ def test_wait_queue_timeout(self): duration = time.time() - start self.assertTrue( abs(wait_queue_timeout - duration) < 1, - "Waited %.2f seconds for a socket, expected %f" % (duration, wait_queue_timeout), + f"Waited {duration:.2f} seconds for a socket, expected {wait_queue_timeout:f}", ) def test_no_wait_queue_timeout(self): @@ -440,7 +440,7 @@ def f(): with lock: self.n_passed += 1 - for i in range(nthreads): + for _i in range(nthreads): t = threading.Thread(target=f) threads.append(t) t.start() @@ -472,7 +472,7 @@ def f(): with lock: self.n_passed += 1 - for i in range(nthreads): + for _i in range(nthreads): t = threading.Thread(target=f) threads.append(t) t.start() @@ -500,7 +500,7 @@ def test_max_pool_size_with_connection_failure(self): # First call to get_socket fails; if pool doesn't release its semaphore # then the second call raises "ConnectionFailure: Timed out waiting for # socket from pool" instead of AutoReconnect. - for i in range(2): + for _i in range(2): with self.assertRaises(AutoReconnect) as context: with test_pool.get_socket(): pass diff --git a/test/test_read_concern.py b/test/test_read_concern.py index 2230f2bef2..682fe03e72 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -33,7 +33,7 @@ class TestReadConcern(IntegrationTest): @classmethod @client_context.require_connection def setUpClass(cls): - super(TestReadConcern, cls).setUpClass() + super().setUpClass() cls.listener = OvertCommandListener() cls.client = rs_or_single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test @@ -43,11 +43,11 @@ def setUpClass(cls): def tearDownClass(cls): cls.client.close() client_context.client.pymongo_test.drop_collection("coll") - super(TestReadConcern, cls).tearDownClass() + super().tearDownClass() def tearDown(self): self.listener.reset() - super(TestReadConcern, self).tearDown() + super().tearDown() def test_read_concern(self): rc = ReadConcern() @@ -65,7 +65,7 @@ def test_read_concern(self): self.assertRaises(TypeError, ReadConcern, 42) def test_read_concern_uri(self): - uri = "mongodb://%s/?readConcernLevel=majority" % (client_context.pair,) + uri = f"mongodb://{client_context.pair}/?readConcernLevel=majority" client = rs_or_single_client(uri, connect=False) self.assertEqual(ReadConcern("majority"), client.read_concern) diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 1362623dff..6156b6b3fc 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -90,10 +90,10 @@ class TestReadPreferencesBase(IntegrationTest): @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): - super(TestReadPreferencesBase, cls).setUpClass() + super().setUpClass() def setUp(self): - super(TestReadPreferencesBase, self).setUp() + super().setUp() # Insert some data so we can use cursors in read_from_which_host self.client.pymongo_test.test.drop() self.client.get_database( @@ -119,16 +119,17 @@ def read_from_which_kind(self, client): return "secondary" else: self.fail( - "Cursor used address %s, expected either primary " - "%s or secondaries %s" % (address, client.primary, client.secondaries) + "Cursor used address {}, expected either primary " + "{} or secondaries {}".format(address, client.primary, client.secondaries) ) + return None def assertReadsFrom(self, expected, **kwargs): c = rs_client(**kwargs) wait_until(lambda: len(c.nodes - c.arbiters) == client_context.w, "discovered all nodes") used = self.read_from_which_kind(c) - self.assertEqual(expected, used, "Cursor used %s, expected %s" % (used, expected)) + self.assertEqual(expected, used, f"Cursor used {used}, expected {expected}") class TestSingleSecondaryOk(TestReadPreferencesBase): @@ -271,7 +272,7 @@ def test_nearest(self): self.assertFalse( not_used, "Expected to use primary and all secondaries for mode NEAREST," - " but didn't use %s\nlatencies: %s" % (not_used, latencies), + " but didn't use {}\nlatencies: {}".format(not_used, latencies), ) @@ -280,18 +281,18 @@ def __init__(self, *args, **kwargs): self.has_read_from = set() client_options = client_context.client_options client_options.update(kwargs) - super(ReadPrefTester, self).__init__(*args, **client_options) + super().__init__(*args, **client_options) @contextlib.contextmanager def _socket_for_reads(self, read_preference, session): - context = super(ReadPrefTester, self)._socket_for_reads(read_preference, session) + context = super()._socket_for_reads(read_preference, session) with context as (sock_info, read_preference): self.record_a_read(sock_info.address) yield sock_info, read_preference @contextlib.contextmanager def _socket_from_server(self, read_preference, server, session): - context = super(ReadPrefTester, self)._socket_from_server(read_preference, server, session) + context = super()._socket_from_server(read_preference, server, session) with context as (sock_info, read_preference): self.record_a_read(sock_info.address) yield sock_info, read_preference @@ -317,7 +318,7 @@ class TestCommandAndReadPreference(IntegrationTest): @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): - super(TestCommandAndReadPreference, cls).setUpClass() + super().setUpClass() cls.c = ReadPrefTester( client_context.pair, # Ignore round trip times, to test ReadPreference modes only. @@ -360,7 +361,7 @@ def _test_fn(self, server_type, fn): break assert self.c.primary is not None - unused = self.c.secondaries.union(set([self.c.primary])).difference(used) + unused = self.c.secondaries.union({self.c.primary}).difference(used) if unused: self.fail("Some members not used for NEAREST: %s" % (unused)) else: @@ -373,7 +374,10 @@ def _test_primary_helper(self, func): def _test_coll_helper(self, secondary_ok, coll, meth, *args, **kwargs): for mode, server_type in _PREF_MAP: new_coll = coll.with_options(read_preference=mode()) - func = lambda: getattr(new_coll, meth)(*args, **kwargs) + + def func(): + return getattr(new_coll, meth)(*args, **kwargs) + if secondary_ok: self._test_fn(server_type, func) else: @@ -383,7 +387,10 @@ def test_command(self): # Test that the generic command helper obeys the read preference # passed to it. for mode, server_type in _PREF_MAP: - func = lambda: self.c.pymongo_test.command("dbStats", read_preference=mode()) + + def func(): + return self.c.pymongo_test.command("dbStats", read_preference=mode()) + self._test_fn(server_type, func) def test_create_collection(self): @@ -536,7 +543,7 @@ def test_send_hedge(self): client = rs_client(event_listeners=[listener]) self.addCleanup(client.close) client.admin.command("ping") - for mode, cls in cases.items(): + for _mode, cls in cases.items(): pref = cls(hedge={"enabled": True}) coll = client.test.get_collection("test", read_preference=pref) listener.reset() diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 26bc111f00..2b39f7d04e 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -89,16 +89,16 @@ def insert_command_default_write_concern(): f() self.assertGreaterEqual(len(listener.started_events), 1) - for i, event in enumerate(listener.started_events): + for _i, event in enumerate(listener.started_events): self.assertNotIn( "readConcern", event.command, - "%s sent default readConcern with %s" % (name, event.command_name), + f"{name} sent default readConcern with {event.command_name}", ) self.assertNotIn( "writeConcern", event.command, - "%s sent default writeConcern with %s" % (name, event.command_name), + f"{name} sent default writeConcern with {event.command_name}", ) def assertWriteOpsRaise(self, write_concern, expected_exception): @@ -307,7 +307,7 @@ def create_tests(): fname = os.path.splitext(filename)[0] for test_case in test_cases: new_test = create_test(test_case) - test_name = "test_%s_%s_%s" % ( + test_name = "test_{}_{}_{}".format( dirname.replace("-", "_"), fname.replace("-", "_"), str(test_case["description"].lower().replace(" ", "_")), diff --git a/test/test_replica_set_reconfig.py b/test/test_replica_set_reconfig.py index 898be99d4d..bdeaeb06a3 100644 --- a/test/test_replica_set_reconfig.py +++ b/test/test_replica_set_reconfig.py @@ -83,7 +83,7 @@ def test_replica_set_client(self): c.mock_members.remove("c:3") c.mock_standalones.append("c:3") - wait_until(lambda: set([("b", 2)]) == c.secondaries, "update the list of secondaries") + wait_until(lambda: {("b", 2)} == c.secondaries, "update the list of secondaries") self.assertEqual(("a", 1), c.primary) @@ -106,7 +106,7 @@ def test_replica_set_client(self): # C is removed. c.mock_hello_hosts.remove("c:3") - wait_until(lambda: set([("b", 2)]) == c.secondaries, "update list of secondaries") + wait_until(lambda: {("b", 2)} == c.secondaries, "update list of secondaries") self.assertEqual(("a", 1), c.primary) @@ -148,7 +148,7 @@ def test_client(self): # MongoClient connects to primary by default. self.assertEqual(c.address, ("a", 1)) - self.assertEqual(set([("a", 1), ("b", 2)]), c.nodes) + self.assertEqual({("a", 1), ("b", 2)}, c.nodes) # C is added. c.mock_members.append("c:3") @@ -159,7 +159,7 @@ def test_client(self): self.assertEqual(c.address, ("a", 1)) wait_until( - lambda: set([("a", 1), ("b", 2), ("c", 3)]) == c.nodes, "reconnect to both secondaries" + lambda: {("a", 1), ("b", 2), ("c", 3)} == c.nodes, "reconnect to both secondaries" ) def test_replica_set_client(self): @@ -169,13 +169,13 @@ def test_replica_set_client(self): self.addCleanup(c.close) wait_until(lambda: ("a", 1) == c.primary, "discover the primary") - wait_until(lambda: set([("b", 2)]) == c.secondaries, "discover the secondary") + wait_until(lambda: {("b", 2)} == c.secondaries, "discover the secondary") # C is added. c.mock_members.append("c:3") c.mock_hello_hosts.append("c:3") - wait_until(lambda: set([("b", 2), ("c", 3)]) == c.secondaries, "discover the new secondary") + wait_until(lambda: {("b", 2), ("c", 3)} == c.secondaries, "discover the new secondary") self.assertEqual(("a", 1), c.primary) diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 517e1122b0..ee12c524c9 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -76,14 +76,14 @@ class TestSpec(SpecRunner): # TODO: remove this once PYTHON-1948 is done. @client_context.require_no_mmap def setUpClass(cls): - super(TestSpec, cls).setUpClass() + super().setUpClass() def maybe_skip_scenario(self, test): - super(TestSpec, self).maybe_skip_scenario(test) + super().maybe_skip_scenario(test) skip_names = ["listCollectionObjects", "listIndexNames", "listDatabaseObjects"] for name in skip_names: if name.lower() in test["description"].lower(): - self.skipTest("PyMongo does not support %s" % (name,)) + self.skipTest(f"PyMongo does not support {name}") # Serverless does not support $out and collation. if client_context.serverless: @@ -107,7 +107,7 @@ def get_scenario_coll_name(self, scenario_def): """Override a test's collection name to support GridFS tests.""" if "bucket_name" in scenario_def: return scenario_def["bucket_name"] - return super(TestSpec, self).get_scenario_coll_name(scenario_def) + return super().get_scenario_coll_name(scenario_def) def setup_scenario(self, scenario_def): """Override a test's setup to support GridFS tests.""" @@ -127,7 +127,7 @@ def setup_scenario(self, scenario_def): db.get_collection("fs.chunks").drop() db.get_collection("fs.files", write_concern=wc).drop() else: - super(TestSpec, self).setup_scenario(scenario_def) + super().setup_scenario(scenario_def) def create_test(scenario_def, test, name): diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 1e978f21be..32841a8227 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -68,7 +68,7 @@ class InsertEventListener(EventListener): def succeeded(self, event: CommandSucceededEvent) -> None: - super(InsertEventListener, self).succeeded(event) + super().succeeded(event) if ( event.command_name == "insert" and event.reply.get("writeConcernError", {}).get("code", None) == 91 @@ -108,7 +108,7 @@ def run_test_ops(self, sessions, collection, test): if "result" in outcome: operation["result"] = outcome["result"] test["operations"] = [operation] - super(TestAllScenarios, self).run_test_ops(sessions, collection, test) + super().run_test_ops(sessions, collection, test) def create_test(scenario_def, test, name): @@ -168,13 +168,13 @@ class IgnoreDeprecationsTest(IntegrationTest): @classmethod def setUpClass(cls): - super(IgnoreDeprecationsTest, cls).setUpClass() + super().setUpClass() cls.deprecation_filter = DeprecationFilter() @classmethod def tearDownClass(cls): cls.deprecation_filter.stop() - super(IgnoreDeprecationsTest, cls).tearDownClass() + super().tearDownClass() class TestRetryableWritesMMAPv1(IgnoreDeprecationsTest): @@ -182,7 +182,7 @@ class TestRetryableWritesMMAPv1(IgnoreDeprecationsTest): @classmethod def setUpClass(cls): - super(TestRetryableWritesMMAPv1, cls).setUpClass() + super().setUpClass() # Speed up the tests by decreasing the heartbeat frequency. cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() @@ -193,7 +193,7 @@ def setUpClass(cls): def tearDownClass(cls): cls.knobs.disable() cls.client.close() - super(TestRetryableWritesMMAPv1, cls).tearDownClass() + super().tearDownClass() @client_context.require_no_standalone def test_actionable_error_message(self): @@ -217,7 +217,7 @@ class TestRetryableWrites(IgnoreDeprecationsTest): @classmethod @client_context.require_no_mmap def setUpClass(cls): - super(TestRetryableWrites, cls).setUpClass() + super().setUpClass() # Speed up the tests by decreasing the heartbeat frequency. cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() @@ -229,7 +229,7 @@ def setUpClass(cls): def tearDownClass(cls): cls.knobs.disable() cls.client.close() - super(TestRetryableWrites, cls).tearDownClass() + super().tearDownClass() def setUp(self): if client_context.is_rs and client_context.test_commands_enabled: @@ -248,20 +248,20 @@ def test_supported_single_statement_no_retry(self): client = rs_or_single_client(retryWrites=False, event_listeners=[listener]) self.addCleanup(client.close) for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" listener.reset() method(*args, **kwargs) for event in listener.started_events: self.assertNotIn( "txnNumber", event.command, - "%s sent txnNumber with %s" % (msg, event.command_name), + f"{msg} sent txnNumber with {event.command_name}", ) @client_context.require_no_standalone def test_supported_single_statement_supported_cluster(self): for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" self.listener.reset() method(*args, **kwargs) commands_started = self.listener.started_events @@ -270,13 +270,13 @@ def test_supported_single_statement_supported_cluster(self): self.assertIn( "lsid", first_attempt.command, - "%s sent no lsid with %s" % (msg, first_attempt.command_name), + f"{msg} sent no lsid with {first_attempt.command_name}", ) initial_session_id = first_attempt.command["lsid"] self.assertIn( "txnNumber", first_attempt.command, - "%s sent no txnNumber with %s" % (msg, first_attempt.command_name), + f"{msg} sent no txnNumber with {first_attempt.command_name}", ) # There should be no retry when the failpoint is not active. @@ -289,13 +289,13 @@ def test_supported_single_statement_supported_cluster(self): self.assertIn( "lsid", retry_attempt.command, - "%s sent no lsid with %s" % (msg, first_attempt.command_name), + f"{msg} sent no lsid with {first_attempt.command_name}", ) self.assertEqual(retry_attempt.command["lsid"], initial_session_id, msg) self.assertIn( "txnNumber", retry_attempt.command, - "%s sent no txnNumber with %s" % (msg, first_attempt.command_name), + f"{msg} sent no txnNumber with {first_attempt.command_name}", ) self.assertEqual(retry_attempt.command["txnNumber"], initial_transaction_id, msg) @@ -304,7 +304,7 @@ def test_supported_single_statement_unsupported_cluster(self): raise SkipTest("This cluster supports retryable writes") for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" self.listener.reset() method(*args, **kwargs) @@ -312,7 +312,7 @@ def test_supported_single_statement_unsupported_cluster(self): self.assertNotIn( "txnNumber", event.command, - "%s sent txnNumber with %s" % (msg, event.command_name), + f"{msg} sent txnNumber with {event.command_name}", ) def test_unsupported_single_statement(self): @@ -322,7 +322,7 @@ def test_unsupported_single_statement(self): for method, args, kwargs in non_retryable_single_statement_ops( coll ) + retryable_single_statement_ops(coll_w0): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" self.listener.reset() method(*args, **kwargs) started_events = self.listener.started_events @@ -332,7 +332,7 @@ def test_unsupported_single_statement(self): self.assertNotIn( "txnNumber", event.command, - "%s sent txnNumber with %s" % (msg, event.command_name), + f"{msg} sent txnNumber with {event.command_name}", ) def test_server_selection_timeout_not_retried(self): @@ -345,7 +345,7 @@ def test_server_selection_timeout_not_retried(self): event_listeners=[listener], ) for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" listener.reset() with self.assertRaises(ServerSelectionTimeoutError, msg=msg): method(*args, **kwargs) @@ -374,7 +374,7 @@ def raise_error(*args, **kwargs): return server for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" listener.reset() topology.select_server = mock_select_server with self.assertRaises(ConnectionFailure, msg=msg): @@ -479,7 +479,7 @@ class TestWriteConcernError(IntegrationTest): @client_context.require_no_mmap @client_context.require_failCommand_fail_point def setUpClass(cls): - super(TestWriteConcernError, cls).setUpClass() + super().setUpClass() cls.fail_insert = { "configureFailPoint": "failCommand", "mode": {"times": 2}, @@ -668,7 +668,7 @@ def raise_connection_err_select_server(*args, **kwargs): with client.start_session() as session: kwargs = copy.deepcopy(kwargs) kwargs["session"] = session - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" initial_txn_id = session._server_session.transaction_id # Each operation should fail on the first attempt and succeed diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index d7b3744399..2587ae7965 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -44,12 +44,12 @@ def compare_server_descriptions(expected, actual): - if (not expected["address"] == "%s:%s" % actual.address) or ( + if (not expected["address"] == "{}:{}".format(*actual.address)) or ( not server_name_to_type(expected["type"]) == actual.server_type ): return False expected_hosts = set(expected["arbiters"] + expected["passives"] + expected["hosts"]) - return expected_hosts == set("%s:%s" % s for s in actual.all_hosts) + return expected_hosts == {"{}:{}".format(*s) for s in actual.all_hosts} def compare_topology_descriptions(expected, actual): @@ -60,7 +60,7 @@ def compare_topology_descriptions(expected, actual): if len(expected) != len(actual): return False for exp_server in expected: - for address, actual_server in actual.items(): + for _address, actual_server in actual.items(): if compare_server_descriptions(exp_server, actual_server): break else: @@ -79,22 +79,22 @@ def compare_events(expected_dict, actual): if expected_type == "server_opening_event": if not isinstance(actual, monitoring.ServerOpeningEvent): return False, "Expected ServerOpeningEvent, got %s" % (actual.__class__) - if not expected["address"] == "%s:%s" % actual.server_address: + if not expected["address"] == "{}:{}".format(*actual.server_address): return ( False, "ServerOpeningEvent published with wrong address (expected" - " %s, got %s" % (expected["address"], actual.server_address), + " {}, got {}".format(expected["address"], actual.server_address), ) elif expected_type == "server_description_changed_event": if not isinstance(actual, monitoring.ServerDescriptionChangedEvent): return (False, "Expected ServerDescriptionChangedEvent, got %s" % (actual.__class__)) - if not expected["address"] == "%s:%s" % actual.server_address: + if not expected["address"] == "{}:{}".format(*actual.server_address): return ( False, "ServerDescriptionChangedEvent has wrong address" - " (expected %s, got %s" % (expected["address"], actual.server_address), + " (expected {}, got {}".format(expected["address"], actual.server_address), ) if not compare_server_descriptions(expected["newDescription"], actual.new_description): @@ -110,11 +110,11 @@ def compare_events(expected_dict, actual): elif expected_type == "server_closed_event": if not isinstance(actual, monitoring.ServerClosedEvent): return False, "Expected ServerClosedEvent, got %s" % (actual.__class__) - if not expected["address"] == "%s:%s" % actual.server_address: + if not expected["address"] == "{}:{}".format(*actual.server_address): return ( False, "ServerClosedEvent published with wrong address" - " (expected %s, got %s" % (expected["address"], actual.server_address), + " (expected {}, got {}".format(expected["address"], actual.server_address), ) elif expected_type == "topology_opening_event": @@ -145,7 +145,7 @@ def compare_events(expected_dict, actual): return False, "Expected TopologyClosedEvent, got %s" % (actual.__class__) else: - return False, "Incorrect event: expected %s, actual %s" % (expected_type, actual) + return False, f"Incorrect event: expected {expected_type}, actual {actual}" return True, "" @@ -170,7 +170,7 @@ def compare_multiple_events(i, expected_results, actual_results): class TestAllScenarios(IntegrationTest): def setUp(self): - super(TestAllScenarios, self).setUp() + super().setUp() self.all_listener = ServerAndTopologyEventListener() @@ -235,7 +235,7 @@ def _run(self): # Assert no extra events. extra_events = self.all_listener.results[expected_len:] if extra_events: - self.fail("Extra events %r" % (extra_events,)) + self.fail(f"Extra events {extra_events!r}") self.all_listener.reset() finally: @@ -251,7 +251,7 @@ def create_tests(): scenario_def = json.load(scenario_stream, object_hook=object_hook) # Construct test from scenario. new_test = create_test(scenario_def) - test_name = "test_%s" % (os.path.splitext(filename)[0],) + test_name = f"test_{os.path.splitext(filename)[0]}" new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) @@ -268,7 +268,7 @@ class TestSdamMonitoring(IntegrationTest): @classmethod @client_context.require_failCommand_fail_point def setUpClass(cls): - super(TestSdamMonitoring, cls).setUpClass() + super().setUpClass() # Speed up the tests by decreasing the event publish frequency. cls.knobs = client_knobs(events_queue_frequency=0.1) cls.knobs.enable() @@ -284,7 +284,7 @@ def setUpClass(cls): def tearDownClass(cls): cls.test_client.close() cls.knobs.disable() - super(TestSdamMonitoring, cls).tearDownClass() + super().tearDownClass() def setUp(self): self.listener.reset() diff --git a/test/test_server_selection.py b/test/test_server_selection.py index 8d4ffe5e9b..30b82769bc 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -48,7 +48,7 @@ ) -class SelectionStoreSelector(object): +class SelectionStoreSelector: """No-op selector that keeps track of what was passed to it.""" def __init__(self): @@ -103,7 +103,7 @@ def all_hosts_started(): def test_invalid_server_selector(self): # Client initialization must fail if server_selector is not callable. - for selector_candidate in [list(), 10, "string", {}]: + for selector_candidate in [[], 10, "string", {}]: with self.assertRaisesRegex(ValueError, "must be a callable"): MongoClient(connect=False, server_selector=selector_candidate) diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index d076ae77b3..63769a6457 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -46,7 +46,7 @@ def run_scenario(self, scenario_def): server.pool.operation_count = mock["operation_count"] pref = ReadPreference.NEAREST - counts = dict((address, 0) for address in topology._description.server_descriptions()) + counts = {address: 0 for address in topology._description.server_descriptions()} # Number of times to repeat server selection iterations = scenario_def["iterations"] @@ -91,7 +91,7 @@ def tests(self, scenario_def): class FinderThread(threading.Thread): def __init__(self, collection, iterations): - super(FinderThread, self).__init__() + super().__init__() self.daemon = True self.collection = collection self.iterations = iterations diff --git a/test/test_server_selection_rtt.py b/test/test_server_selection_rtt.py index d2d8768809..5c2a8a6fba 100644 --- a/test/test_server_selection_rtt.py +++ b/test/test_server_selection_rtt.py @@ -57,7 +57,7 @@ def create_tests(): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = "test_%s_%s" % (dirname, os.path.splitext(filename)[0]) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/test_session.py b/test/test_session.py index 25d209ebaf..18d0122dae 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -47,15 +47,15 @@ class SessionTestListener(EventListener): def started(self, event): if not event.command_name.startswith("sasl"): - super(SessionTestListener, self).started(event) + super().started(event) def succeeded(self, event): if not event.command_name.startswith("sasl"): - super(SessionTestListener, self).succeeded(event) + super().succeeded(event) def failed(self, event): if not event.command_name.startswith("sasl"): - super(SessionTestListener, self).failed(event) + super().failed(event) def first_command_started(self): assert len(self.started_events) >= 1, "No command-started events" @@ -74,7 +74,7 @@ class TestSession(IntegrationTest): @classmethod @client_context.require_sessions def setUpClass(cls): - super(TestSession, cls).setUpClass() + super().setUpClass() # Create a second client so we can make sure clients cannot share # sessions. cls.client2 = rs_or_single_client() @@ -87,7 +87,7 @@ def setUpClass(cls): def tearDownClass(cls): monitoring._SENSITIVE_COMMANDS.update(cls.sensitive_commands) cls.client2.close() - super(TestSession, cls).tearDownClass() + super().tearDownClass() def setUp(self): self.listener = SessionTestListener() @@ -97,7 +97,7 @@ def setUp(self): ) self.addCleanup(self.client.close) self.db = self.client.pymongo_test - self.initial_lsids = set(s["id"] for s in session_ids(self.client)) + self.initial_lsids = {s["id"] for s in session_ids(self.client)} def tearDown(self): """All sessions used in the test must be returned to the pool.""" @@ -107,7 +107,7 @@ def tearDown(self): if "lsid" in event.command: used_lsids.add(event.command["lsid"]["id"]) - current_lsids = set(s["id"] for s in session_ids(self.client)) + current_lsids = {s["id"] for s in session_ids(self.client)} self.assertLessEqual(used_lsids, current_lsids) def _test_ops(self, client, *ops): @@ -129,13 +129,13 @@ def _test_ops(self, client, *ops): for event in listener.started_events: self.assertTrue( "lsid" in event.command, - "%s sent no lsid with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent no lsid with {event.command_name}", ) self.assertEqual( s.session_id, event.command["lsid"], - "%s sent wrong lsid with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent wrong lsid with {event.command_name}", ) self.assertFalse(s.has_ended) @@ -164,7 +164,7 @@ def _test_ops(self, client, *ops): for event in listener.started_events: self.assertTrue( "lsid" in event.command, - "%s sent no lsid with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent no lsid with {event.command_name}", ) lsids.append(event.command["lsid"]) @@ -176,7 +176,7 @@ def _test_ops(self, client, *ops): self.assertIn( lsid, session_ids(client), - "%s did not return implicit session to pool" % (f.__name__,), + f"{f.__name__} did not return implicit session to pool", ) def test_implicit_sessions_checkout(self): @@ -405,13 +405,13 @@ def test_cursor(self): for event in listener.started_events: self.assertTrue( "lsid" in event.command, - "%s sent no lsid with %s" % (name, event.command_name), + f"{name} sent no lsid with {event.command_name}", ) self.assertEqual( s.session_id, event.command["lsid"], - "%s sent wrong lsid with %s" % (name, event.command_name), + f"{name} sent wrong lsid with {event.command_name}", ) with self.assertRaisesRegex(InvalidOperation, "ended session"): @@ -423,20 +423,20 @@ def test_cursor(self): f(session=None) event0 = listener.first_command_started() self.assertTrue( - "lsid" in event0.command, "%s sent no lsid with %s" % (name, event0.command_name) + "lsid" in event0.command, f"{name} sent no lsid with {event0.command_name}" ) lsid = event0.command["lsid"] for event in listener.started_events[1:]: self.assertTrue( - "lsid" in event.command, "%s sent no lsid with %s" % (name, event.command_name) + "lsid" in event.command, f"{name} sent no lsid with {event.command_name}" ) self.assertEqual( lsid, event.command["lsid"], - "%s sent wrong lsid with %s" % (name, event.command_name), + f"{name} sent wrong lsid with {event.command_name}", ) def test_gridfs(self): @@ -693,7 +693,7 @@ def _test_unacknowledged_ops(self, client, *ops): kw = copy.copy(kw) kw["session"] = s with self.assertRaises( - ConfigurationError, msg="%s did not raise ConfigurationError" % (f.__name__,) + ConfigurationError, msg=f"{f.__name__} did not raise ConfigurationError" ): f(*args, **kw) if f.__name__ == "create_collection": @@ -703,11 +703,11 @@ def _test_unacknowledged_ops(self, client, *ops): self.assertIn( "lsid", event.command, - "%s sent no lsid with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent no lsid with {event.command_name}", ) # Should not run any command before raising an error. - self.assertFalse(listener.started_events, "%s sent command" % (f.__name__,)) + self.assertFalse(listener.started_events, f"{f.__name__} sent command") self.assertTrue(s.has_ended) @@ -724,12 +724,12 @@ def _test_unacknowledged_ops(self, client, *ops): self.assertIn( "lsid", event.command, - "%s sent no lsid with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent no lsid with {event.command_name}", ) for event in listener.started_events: self.assertNotIn( - "lsid", event.command, "%s sent lsid with %s" % (f.__name__, event.command_name) + "lsid", event.command, f"{f.__name__} sent lsid with {event.command_name}" ) def test_unacknowledged_writes(self): @@ -792,7 +792,7 @@ def tearDownClass(cls): @client_context.require_sessions def setUp(self): - super(TestCausalConsistency, self).setUp() + super().setUp() @client_context.require_no_standalone def test_core(self): @@ -1072,7 +1072,7 @@ def test_cluster_time_no_server_support(self): class TestClusterTime(IntegrationTest): def setUp(self): - super(TestClusterTime, self).setUp() + super().setUp() if "$clusterTime" not in client_context.hello: raise SkipTest("$clusterTime not supported") @@ -1128,7 +1128,7 @@ def insert_and_aggregate(): ("rename_and_drop", rename_and_drop), ] - for name, f in ops: + for _name, f in ops: listener.reset() # Call f() twice, insert to advance clusterTime, call f() again. f() @@ -1140,21 +1140,20 @@ def insert_and_aggregate(): for i, event in enumerate(listener.started_events): self.assertTrue( "$clusterTime" in event.command, - "%s sent no $clusterTime with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent no $clusterTime with {event.command_name}", ) if i > 0: succeeded = listener.succeeded_events[i - 1] self.assertTrue( "$clusterTime" in succeeded.reply, - "%s received no $clusterTime with %s" - % (f.__name__, succeeded.command_name), + f"{f.__name__} received no $clusterTime with {succeeded.command_name}", ) self.assertTrue( event.command["$clusterTime"]["clusterTime"] >= succeeded.reply["$clusterTime"]["clusterTime"], - "%s sent wrong $clusterTime with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent wrong $clusterTime with {event.command_name}", ) diff --git a/test/test_son.py b/test/test_son.py index 5c1f43594d..5e62ffb176 100644 --- a/test/test_son.py +++ b/test/test_son.py @@ -47,7 +47,7 @@ def test_equality(self): self.assertEqual(a1, SON({"hello": "world"})) self.assertEqual(b2, SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike")))) - self.assertEqual(b2, dict((("hello_", "mike"), ("mike", "awesome"), ("hello", "world")))) + self.assertEqual(b2, {"hello_": "mike", "mike": "awesome", "hello": "world"}) self.assertNotEqual(a1, b2) self.assertNotEqual(b2, SON((("hello_", "mike"), ("mike", "awesome"), ("hello", "world")))) @@ -55,7 +55,7 @@ def test_equality(self): # Explicitly test inequality self.assertFalse(a1 != SON({"hello": "world"})) self.assertFalse(b2 != SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike")))) - self.assertFalse(b2 != dict((("hello_", "mike"), ("mike", "awesome"), ("hello", "world")))) + self.assertFalse(b2 != {"hello_": "mike", "mike": "awesome", "hello": "world"}) # Embedded SON. d4 = SON([("blah", {"foo": SON()})]) @@ -97,10 +97,10 @@ def test_pickle_backwards_compatability(self): # This string was generated by pickling a SON object in pymongo # version 2.1.1 pickled_with_2_1_1 = ( - "ccopy_reg\n_reconstructor\np0\n(cbson.son\nSON\np1\n" - "c__builtin__\ndict\np2\n(dp3\ntp4\nRp5\n(dp6\n" - "S'_SON__keys'\np7\n(lp8\nsb." - ).encode("utf8") + b"ccopy_reg\n_reconstructor\np0\n(cbson.son\nSON\np1\n" + b"c__builtin__\ndict\np2\n(dp3\ntp4\nRp5\n(dp6\n" + b"S'_SON__keys'\np7\n(lp8\nsb." + ) son_2_1_1 = pickle.loads(pickled_with_2_1_1) self.assertEqual(son_2_1_1, SON([])) @@ -138,18 +138,14 @@ def test_copying(self): self.assertEqual(id(reflexive_son1), id(reflexive_son1["reflexive"])) def test_iteration(self): - """ - Test __iter__ - """ + """Test __iter__""" # test success case test_son = SON([(1, 100), (2, 200), (3, 300)]) for ele in test_son: self.assertEqual(ele * 100, test_son[ele]) def test_contains_has(self): - """ - has_key and __contains__ - """ + """has_key and __contains__""" test_son = SON([(1, 100), (2, 200), (3, 300)]) self.assertIn(1, test_son) self.assertTrue(2 in test_son, "in failed") @@ -158,9 +154,7 @@ def test_contains_has(self): self.assertFalse(test_son.has_key(22), "has_key succeeded when it shouldn't") # noqa def test_clears(self): - """ - Test clear() - """ + """Test clear()""" test_son = SON([(1, 100), (2, 200), (3, 300)]) test_son.clear() self.assertNotIn(1, test_son) @@ -169,9 +163,7 @@ def test_clears(self): self.assertEqual({}, test_son.to_dict()) def test_len(self): - """ - Test len - """ + """Test len""" test_son = SON() self.assertEqual(0, len(test_son)) test_son = SON([(1, 100), (2, 200), (3, 300)]) diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 7a6c61ad21..8bf81f4de9 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -32,7 +32,7 @@ WAIT_TIME = 0.1 -class SrvPollingKnobs(object): +class SrvPollingKnobs: def __init__( self, ttl_time=None, diff --git a/test/test_ssl.py b/test/test_ssl.py index bf151578cb..e6df2a1c24 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -142,7 +142,7 @@ def assertClientWorks(self, client): @classmethod @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") def setUpClass(cls): - super(TestSSL, cls).setUpClass() + super().setUpClass() # MongoClient should connect to the primary by default. cls.saved_port = MongoClient.PORT MongoClient.PORT = client_context.port @@ -150,7 +150,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): MongoClient.PORT = cls.saved_port - super(TestSSL, cls).tearDownClass() + super().tearDownClass() @client_context.require_tls def test_simple_ssl(self): diff --git a/test/test_threads.py b/test/test_threads.py index 899392e1a0..b948bf9249 100644 --- a/test/test_threads.py +++ b/test/test_threads.py @@ -111,7 +111,7 @@ def test_threading(self): self.db.test.insert_many([{"x": i} for i in range(1000)]) threads = [] - for i in range(10): + for _i in range(10): t = SaveAndFind(self.db.test) t.start() threads.append(t) diff --git a/test/test_topology.py b/test/test_topology.py index e09d7c3691..adbf19f571 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -89,7 +89,7 @@ class TopologyTest(unittest.TestCase): """Disables periodic monitoring, to make tests deterministic.""" def setUp(self): - super(TopologyTest, self).setUp() + super().setUp() self.client_knobs = client_knobs(heartbeat_frequency=999999) self.client_knobs.enable() self.addCleanup(self.client_knobs.disable) @@ -647,13 +647,13 @@ def test_topology_repr(self): ) self.assertEqual( repr(t.description), - ", " ", " "]>" % (t._topology_id,), + " rtt: None>]>".format(t._topology_id), ) def test_unexpected_load_balancer(self): @@ -734,7 +734,7 @@ def _check_with_socket(self, *args, **kwargs): if hello_count[0] in (1, 3): return Hello({"ok": 1, "maxWireVersion": 6}), 0 else: - raise AutoReconnect("mock monitor error #%s" % (hello_count[0],)) + raise AutoReconnect(f"mock monitor error #{hello_count[0]}") t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) diff --git a/test/test_transactions.py b/test/test_transactions.py index dc58beb930..9b51927d67 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -63,19 +63,19 @@ class TransactionsBase(SpecRunner): @classmethod def setUpClass(cls): - super(TransactionsBase, cls).setUpClass() + super().setUpClass() if client_context.supports_transactions(): for address in client_context.mongoses: - cls.mongos_clients.append(single_client("%s:%s" % address)) + cls.mongos_clients.append(single_client("{}:{}".format(*address))) @classmethod def tearDownClass(cls): for client in cls.mongos_clients: client.close() - super(TransactionsBase, cls).tearDownClass() + super().tearDownClass() def maybe_skip_scenario(self, test): - super(TransactionsBase, self).maybe_skip_scenario(test) + super().maybe_skip_scenario(test) if ( "secondary" in self.id() and not client_context.is_mongos @@ -390,7 +390,7 @@ def test_transaction_direct_connection(self): list(res) -class PatchSessionTimeout(object): +class PatchSessionTimeout: """Patches the client_session's with_transaction timeout for testing.""" def __init__(self, mock_timeout): @@ -416,7 +416,7 @@ class _MyException(Exception): pass def raise_error(_): - raise _MyException() + raise _MyException with self.client.start_session() as s: with self.assertRaises(_MyException): diff --git a/test/test_typing.py b/test/test_typing.py index 0aebc707cd..27597bb2c8 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -13,7 +13,8 @@ # limitations under the License. """Test that each file in mypy_fails/ actually fails mypy, and test some -sample client code that uses PyMongo typings.""" +sample client code that uses PyMongo typings. +""" import os import sys import tempfile @@ -39,7 +40,7 @@ class ImplicitMovie(TypedDict): name: str year: int -except ImportError as exc: +except ImportError: Movie = dict # type:ignore[misc,assignment] ImplicitMovie = dict # type: ignore[assignment,misc] MovieWithId = dict # type: ignore[assignment,misc] @@ -164,12 +165,12 @@ def test_bulk_write_heterogeneous(self): def test_command(self) -> None: result: Dict = self.client.admin.command("ping") - items = result.items() + result.items() def test_list_collections(self) -> None: cursor = self.client.test.list_collections() value = cursor.next() - items = value.items() + value.items() def test_list_databases(self) -> None: cursor = self.client.list_databases() @@ -237,7 +238,7 @@ def foo(self): assert rt_document2.foo() == "bar" codec_options2 = CodecOptions(document_class=RawBSONDocument) - bsonbytes3 = encode(doc, codec_options=codec_options2) + encode(doc, codec_options=codec_options2) rt_document3 = decode(bsonbytes2, codec_options=codec_options2) assert rt_document3.raw @@ -463,7 +464,7 @@ def test_son_document_type(self) -> None: retrieved["a"] = 1 def test_son_document_type_runtime(self) -> None: - client = MongoClient(document_class=SON[str, Any], connect=False) + MongoClient(document_class=SON[str, Any], connect=False) @only_type_check def test_create_index(self) -> None: diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index 2f81e3b512..e2dd17ec26 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -508,7 +508,7 @@ def test_redact_AWS_SESSION_TOKEN(self): def test_special_chars(self): user = "user@ /9+:?~!$&'()*+,;=" pwd = "pwd@ /9+:?~!$&'()*+,;=" - uri = "mongodb://%s:%s@localhost" % (quote_plus(user), quote_plus(pwd)) + uri = f"mongodb://{quote_plus(user)}:{quote_plus(pwd)}@localhost" res = parse_uri(uri) self.assertEqual(user, res["username"]) self.assertEqual(pwd, res["password"]) diff --git a/test/test_uri_spec.py b/test/test_uri_spec.py index d12abf3b91..5b68c80401 100644 --- a/test/test_uri_spec.py +++ b/test/test_uri_spec.py @@ -13,7 +13,8 @@ # limitations under the License. """Test that the pymongo.uri_parser module is compliant with the connection -string and uri options specifications.""" +string and uri options specifications. +""" import json import os @@ -73,7 +74,7 @@ def setUp(self): def get_error_message_template(expected, artefact): - return "%s %s for test '%s'" % ("Expected" if expected else "Unexpected", artefact, "%s") + return "{} {} for test '{}'".format("Expected" if expected else "Unexpected", artefact, "%s") def run_scenario_in_dir(target_workdir): @@ -133,13 +134,15 @@ def run_scenario(self): for exp, actual in zip(test["hosts"], options["nodelist"]): self.assertEqual( - exp["host"], actual[0], "Expected host %s but got %s" % (exp["host"], actual[0]) + exp["host"], + actual[0], + "Expected host {} but got {}".format(exp["host"], actual[0]), ) if exp["port"] is not None: self.assertEqual( exp["port"], actual[1], - "Expected port %s but got %s" % (exp["port"], actual), + "Expected port {} but got {}".format(exp["port"], actual), ) # Compare auth options. @@ -157,7 +160,7 @@ def run_scenario(self): self.assertEqual( auth[elm], options[elm], - "Expected %s but got %s" % (auth[elm], options[elm]), + f"Expected {auth[elm]} but got {options[elm]}", ) # Compare URI options. @@ -183,7 +186,7 @@ def run_scenario(self): ), ) else: - self.fail("Missing expected option %s" % (opt,)) + self.fail(f"Missing expected option {opt}") return run_scenario_in_dir(test_workdir)(run_scenario) @@ -209,7 +212,7 @@ def create_tests(test_path): continue testmethod = create_test(testcase, dirpath) - testname = "test_%s_%s_%s" % ( + testname = "test_{}_{}_{}".format( dirname, os.path.splitext(filename)[0], str(dsc).replace(" ", "_"), diff --git a/test/test_write_concern.py b/test/test_write_concern.py index 02c562a348..822f3a4d1d 100644 --- a/test/test_write_concern.py +++ b/test/test_write_concern.py @@ -40,7 +40,7 @@ def test_equality_to_none(self): self.assertTrue(concern != None) # noqa def test_equality_compatible_type(self): - class _FakeWriteConcern(object): + class _FakeWriteConcern: def __init__(self, **document): self.document = document diff --git a/test/unified_format.py b/test/unified_format.py index 584ee04ddd..90cb442b28 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -131,7 +131,7 @@ # Build up a placeholder map. -PLACEHOLDER_MAP = dict() +PLACEHOLDER_MAP = {} for (provider_name, provider_data) in [ ("local", {"key": LOCAL_MASTER_KEY}), ("aws", AWS_CREDS), @@ -257,7 +257,7 @@ def parse_bulk_write_error_result(error): return parse_bulk_write_result(write_result) -class NonLazyCursor(object): +class NonLazyCursor: """A find cursor proxy that creates the remote cursor when initialized.""" def __init__(self, find_cursor, client): @@ -289,7 +289,7 @@ class EventListenerUtil(CMAPListener, CommandListener, ServerListener): def __init__( self, observe_events, ignore_commands, observe_sensitive_commands, store_events, entity_map ): - self._event_types = set(name.lower() for name in observe_events) + self._event_types = {name.lower() for name in observe_events} if observe_sensitive_commands: self._observe_sensitive_commands = True self._ignore_commands = set(ignore_commands) @@ -306,7 +306,7 @@ def __init__( for i in events: self._event_mapping[i].append(id) self.entity_map[id] = [] - super(EventListenerUtil, self).__init__() + super().__init__() def get_events(self, event_type): assert event_type in ("command", "cmap", "sdam", "all"), event_type @@ -321,7 +321,7 @@ def get_events(self, event_type): def add_event(self, event): event_name = type(event).__name__.lower() if event_name in self._event_types: - super(EventListenerUtil, self).add_event(event) + super().add_event(event) for id in self._event_mapping[event_name]: self.entity_map[id].append( { @@ -332,7 +332,7 @@ def add_event(self, event): ) def _command_event(self, event): - if not event.command_name.lower() in self._ignore_commands: + if event.command_name.lower() not in self._ignore_commands: self.add_event(event) def started(self, event): @@ -364,9 +364,10 @@ def closed(self, event: ServerClosedEvent) -> None: self.add_event(event) -class EntityMapUtil(object): +class EntityMapUtil: """Utility class that implements an entity map as per the unified - test format specification.""" + test format specification. + """ def __init__(self, test_class): self._entities: Dict[str, Any] = {} @@ -384,14 +385,14 @@ def __getitem__(self, item): try: return self._entities[item] except KeyError: - self.test.fail("Could not find entity named %s in map" % (item,)) + self.test.fail(f"Could not find entity named {item} in map") def __setitem__(self, key, value): if not isinstance(key, str): self.test.fail("Expected entity name of type str, got %s" % (type(key))) if key in self._entities: - self.test.fail("Entity named %s already in map" % (key,)) + self.test.fail(f"Entity named {key} already in map") self._entities[key] = value @@ -410,9 +411,7 @@ def _handle_placeholders(self, spec: dict, current: dict, path: str) -> Any: def _create_entity(self, entity_spec, uri=None): if len(entity_spec) != 1: - self.test.fail( - "Entity spec %s did not contain exactly one top-level key" % (entity_spec,) - ) + self.test.fail(f"Entity spec {entity_spec} did not contain exactly one top-level key") entity_type, spec = next(iter(entity_spec.items())) spec = self._handle_placeholders(spec, spec, "") @@ -454,8 +453,9 @@ def _create_entity(self, entity_spec, uri=None): client = self[spec["client"]] if not isinstance(client, MongoClient): self.test.fail( - "Expected entity %s to be of type MongoClient, got %s" - % (spec["client"], type(client)) + "Expected entity {} to be of type MongoClient, got {}".format( + spec["client"], type(client) + ) ) options = parse_collection_or_database_options(spec.get("databaseOptions", {})) self[spec["id"]] = client.get_database(spec["databaseName"], **options) @@ -464,8 +464,9 @@ def _create_entity(self, entity_spec, uri=None): database = self[spec["database"]] if not isinstance(database, Database): self.test.fail( - "Expected entity %s to be of type Database, got %s" - % (spec["database"], type(database)) + "Expected entity {} to be of type Database, got {}".format( + spec["database"], type(database) + ) ) options = parse_collection_or_database_options(spec.get("collectionOptions", {})) self[spec["id"]] = database.get_collection(spec["collectionName"], **options) @@ -474,8 +475,9 @@ def _create_entity(self, entity_spec, uri=None): client = self[spec["client"]] if not isinstance(client, MongoClient): self.test.fail( - "Expected entity %s to be of type MongoClient, got %s" - % (spec["client"], type(client)) + "Expected entity {} to be of type MongoClient, got {}".format( + spec["client"], type(client) + ) ) opts = camel_to_snake_args(spec.get("sessionOptions", {})) if "default_transaction_options" in opts: @@ -522,7 +524,7 @@ def drop(self: GridFSBucket, *args: Any, **kwargs: Any) -> None: self[name] = thread return - self.test.fail("Unable to create entity of unknown type %s" % (entity_type,)) + self.test.fail(f"Unable to create entity of unknown type {entity_type}") def create_entities_from_spec(self, entity_spec, uri=None): for spec in entity_spec: @@ -532,12 +534,12 @@ def get_listener_for_client(self, client_name: str) -> EventListenerUtil: client = self[client_name] if not isinstance(client, MongoClient): self.test.fail( - "Expected entity %s to be of type MongoClient, got %s" % (client_name, type(client)) + f"Expected entity {client_name} to be of type MongoClient, got {type(client)}" ) listener = self._listeners.get(client_name) if not listener: - self.test.fail("No listeners configured for client %s" % (client_name,)) + self.test.fail(f"No listeners configured for client {client_name}") return listener @@ -545,8 +547,7 @@ def get_lsid_for_session(self, session_name): session = self[session_name] if not isinstance(session, ClientSession): self.test.fail( - "Expected entity %s to be of type ClientSession, got %s" - % (session_name, type(session)) + f"Expected entity {session_name} to be of type ClientSession, got {type(session)}" ) try: @@ -587,9 +588,10 @@ def get_lsid_for_session(self, session_name): } -class MatchEvaluatorUtil(object): +class MatchEvaluatorUtil: """Utility class that implements methods for evaluating matches as per - the unified test format specification.""" + the unified test format specification. + """ def __init__(self, test_class): self.test = test_class @@ -606,11 +608,11 @@ def _operation_exists(self, spec, actual, key_to_compare): else: self.test.assertNotIn(key_to_compare, actual) else: - self.test.fail("Expected boolean value for $$exists operator, got %s" % (spec,)) + self.test.fail(f"Expected boolean value for $$exists operator, got {spec}") def __type_alias_to_type(self, alias): if alias not in BSON_TYPE_ALIAS_MAP: - self.test.fail("Unrecognized BSON type alias %s" % (alias,)) + self.test.fail(f"Unrecognized BSON type alias {alias}") return BSON_TYPE_ALIAS_MAP[alias] def _operation_type(self, spec, actual, key_to_compare): @@ -653,11 +655,11 @@ def _operation_lte(self, spec, actual, key_to_compare): self.test.assertLessEqual(actual[key_to_compare], spec) def _evaluate_special_operation(self, opname, spec, actual, key_to_compare): - method_name = "_operation_%s" % (opname.strip("$"),) + method_name = "_operation_{}".format(opname.strip("$")) try: method = getattr(self, method_name) except AttributeError: - self.test.fail("Unsupported special matching operator %s" % (opname,)) + self.test.fail(f"Unsupported special matching operator {opname}") else: method(spec, actual, key_to_compare) @@ -668,7 +670,8 @@ def _evaluate_if_special_operation(self, expectation, actual, key_to_compare=Non If given, ``key_to_compare`` is assumed to be the key in ``expectation`` whose corresponding value needs to be evaluated for a possible special operation. ``key_to_compare`` - is ignored when ``expectation`` has only one key.""" + is ignored when ``expectation`` has only one key. + """ if not isinstance(expectation, abc.Mapping): return False @@ -730,14 +733,16 @@ def match_result(self, expectation, actual, in_recursive_call=False): self._match_document(e, a, is_root=not in_recursive_call) else: self.match_result(e, a, in_recursive_call=True) - return + return None # account for flexible numerics in element-wise comparison if isinstance(expectation, int) or isinstance(expectation, float): self.test.assertEqual(expectation, actual) + return None else: self.test.assertIsInstance(actual, type(expectation)) self.test.assertEqual(expectation, actual) + return None def assertHasServiceId(self, spec, actual): if "hasServiceId" in spec: @@ -828,7 +833,7 @@ def match_event(self, event_type, expectation, actual): if "newDescription" in spec: self.match_server_description(actual.new_description, spec["newDescription"]) else: - raise Exception("Unsupported event type %s" % (name,)) + raise Exception(f"Unsupported event type {name}") def coerce_result(opname, result): @@ -840,7 +845,7 @@ def coerce_result(opname, result): if opname == "insertOne": return {"insertedId": result.inserted_id} if opname == "insertMany": - return {idx: _id for idx, _id in enumerate(result.inserted_ids)} + return dict(enumerate(result.inserted_ids)) if opname in ("deleteOne", "deleteMany"): return {"deletedCount": result.deleted_count} if opname in ("updateOne", "updateMany", "replaceOne"): @@ -904,11 +909,11 @@ def insert_initial_data(self, initial_data): @classmethod def setUpClass(cls): # super call creates internal client cls.client - super(UnifiedSpecTestMixinV1, cls).setUpClass() + super().setUpClass() # process file-level runOnRequirements run_on_spec = cls.TEST_SPEC.get("runOnRequirements", []) if not cls.should_run_on(run_on_spec): - raise unittest.SkipTest("%s runOnRequirements not satisfied" % (cls.__name__,)) + raise unittest.SkipTest(f"{cls.__name__} runOnRequirements not satisfied") # add any special-casing for skipping tests here if client_context.storage_engine == "mmapv1": @@ -916,7 +921,7 @@ def setUpClass(cls): raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") def setUp(self): - super(UnifiedSpecTestMixinV1, self).setUp() + super().setUp() # process schemaVersion # note: we check major schema version during class generation # note: we do this here because we cannot run assertions in setUpClass @@ -924,7 +929,7 @@ def setUp(self): self.assertLessEqual( version, self.SCHEMA_VERSION, - "expected schema version %s or lower, got %s" % (self.SCHEMA_VERSION, version), + f"expected schema version {self.SCHEMA_VERSION} or lower, got {version}", ) # initialize internals @@ -1044,20 +1049,18 @@ def process_error(self, exception, spec): if error_labels_omit: for err_label in error_labels_omit: if exception.has_error_label(err_label): - self.fail("Exception '%s' unexpectedly had label '%s'" % (exception, err_label)) + self.fail(f"Exception '{exception}' unexpectedly had label '{err_label}'") if expect_result: if isinstance(exception, BulkWriteError): result = parse_bulk_write_error_result(exception) self.match_evaluator.match_result(expect_result, result) else: - self.fail( - "expectResult can only be specified with %s exceptions" % (BulkWriteError,) - ) + self.fail(f"expectResult can only be specified with {BulkWriteError} exceptions") def __raise_if_unsupported(self, opname, target, *target_types): if not isinstance(target, target_types): - self.fail("Operation %s not supported for entity of type %s" % (opname, type(target))) + self.fail(f"Operation {opname} not supported for entity of type {type(target)}") def __entityOperation_createChangeStream(self, target, *args, **kwargs): if client_context.storage_engine == "mmapv1": @@ -1153,6 +1156,7 @@ def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs): return next(target) except StopIteration: pass + return None def _cursor_close(self, target, *args, **kwargs): self.__raise_if_unsupported("close", target, NonLazyCursor) @@ -1182,8 +1186,8 @@ def _clientEncryptionOperation_rewrapManyDataKey(self, target, *args, **kwargs): kwargs["master_key"] = opts.get("masterKey") data = target.rewrap_many_data_key(*args, **kwargs) if data.bulk_write_result: - return dict(bulkWriteResult=parse_bulk_write_result(data.bulk_write_result)) - return dict() + return {"bulkWriteResult": parse_bulk_write_result(data.bulk_write_result)} + return {} def _bucketOperation_download(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> bytes: with target.open_download_stream(*args, **kwargs) as gout: @@ -1234,30 +1238,30 @@ def run_entity_operation(self, spec): arguments = {} if isinstance(target, MongoClient): - method_name = "_clientOperation_%s" % (opname,) + method_name = f"_clientOperation_{opname}" elif isinstance(target, Database): - method_name = "_databaseOperation_%s" % (opname,) + method_name = f"_databaseOperation_{opname}" elif isinstance(target, Collection): - method_name = "_collectionOperation_%s" % (opname,) + method_name = f"_collectionOperation_{opname}" # contentType is always stored in metadata in pymongo. if target.name.endswith(".files") and opname == "find": for doc in spec.get("expectResult", []): if "contentType" in doc: doc.setdefault("metadata", {})["contentType"] = doc.pop("contentType") elif isinstance(target, ChangeStream): - method_name = "_changeStreamOperation_%s" % (opname,) + method_name = f"_changeStreamOperation_{opname}" elif isinstance(target, NonLazyCursor): - method_name = "_cursor_%s" % (opname,) + method_name = f"_cursor_{opname}" elif isinstance(target, ClientSession): - method_name = "_sessionOperation_%s" % (opname,) + method_name = f"_sessionOperation_{opname}" elif isinstance(target, GridFSBucket): - method_name = "_bucketOperation_%s" % (opname,) + method_name = f"_bucketOperation_{opname}" if "id" in arguments: arguments["file_id"] = arguments.pop("id") # MD5 is always disabled in pymongo. arguments.pop("disable_md5", None) elif isinstance(target, ClientEncryption): - method_name = "_clientEncryptionOperation_%s" % (opname,) + method_name = f"_clientEncryptionOperation_{opname}" else: method_name = "doesNotExist" @@ -1270,7 +1274,7 @@ def run_entity_operation(self, spec): try: cmd = getattr(target, target_opname) except AttributeError: - self.fail("Unsupported operation %s on entity %s" % (opname, target)) + self.fail(f"Unsupported operation {opname} on entity {target}") else: cmd = functools.partial(method, target) @@ -1286,15 +1290,13 @@ def run_entity_operation(self, spec): # Ignore all operation errors but to avoid masking bugs don't # ignore things like TypeError and ValueError. if ignore and isinstance(exc, (PyMongoError,)): - return + return None if expect_error: return self.process_error(exc, expect_error) raise else: if expect_error: - self.fail( - 'Excepted error %s but "%s" succeeded: %s' % (expect_error, opname, result) - ) + self.fail(f'Excepted error {expect_error} but "{opname}" succeeded: {result}') if expect_result: actual = coerce_result(opname, result) @@ -1302,6 +1304,8 @@ def run_entity_operation(self, spec): if save_as_entity: self.entity_map[save_as_entity] = result + return None + return None def __set_fail_point(self, client, command_args): if not client_context.test_commands_enabled: @@ -1324,10 +1328,10 @@ def _testOperation_targetedFailPoint(self, spec): if not session._pinned_address: self.fail( "Cannot use targetedFailPoint operation with unpinned " - "session %s" % (spec["session"],) + "session {}".format(spec["session"]) ) - client = single_client("%s:%s" % session._pinned_address) + client = single_client("{}:{}".format(*session._pinned_address)) self.addCleanup(client.close) self.__set_fail_point(client=client, command_args=spec["failPoint"]) @@ -1422,9 +1426,7 @@ def _testOperation_assertEventCount(self, spec): Assert the given event was published exactly `count` times. """ client, event, count = spec["client"], spec["event"], spec["count"] - self.assertEqual( - self._event_count(client, event), count, "expected %s not %r" % (count, event) - ) + self.assertEqual(self._event_count(client, event), count, f"expected {count} not {event!r}") def _testOperation_waitForEvent(self, spec): """Run the waitForEvent test operation. @@ -1434,7 +1436,7 @@ def _testOperation_waitForEvent(self, spec): client, event, count = spec["client"], spec["event"], spec["count"] wait_until( lambda: self._event_count(client, event) >= count, - "find %s %s event(s)" % (count, event), + f"find {count} {event} event(s)", ) def _testOperation_wait(self, spec): @@ -1485,7 +1487,7 @@ def _testOperation_waitForThread(self, spec): thread.join(10) if thread.exc: raise thread.exc - self.assertFalse(thread.is_alive(), "Thread %s is still running" % (spec["thread"],)) + self.assertFalse(thread.is_alive(), "Thread {} is still running".format(spec["thread"])) def _testOperation_loop(self, spec): failure_key = spec.get("storeFailuresAsEntity") @@ -1527,11 +1529,11 @@ def _testOperation_loop(self, spec): def run_special_operation(self, spec): opname = spec["name"] - method_name = "_testOperation_%s" % (opname,) + method_name = f"_testOperation_{opname}" try: method = getattr(self, method_name) except AttributeError: - self.fail("Unsupported special test operation %s" % (opname,)) + self.fail(f"Unsupported special test operation {opname}") else: method(spec["arguments"]) @@ -1604,8 +1606,10 @@ def run_scenario(self, spec, uri=None): self.setUp() continue raise + return None else: self._run_scenario(spec, uri) + return None def _run_scenario(self, spec, uri=None): # maybe skip test manually @@ -1619,7 +1623,7 @@ def _run_scenario(self, spec, uri=None): # process skipReason skip_reason = spec.get("skipReason", None) if skip_reason is not None: - raise unittest.SkipTest("%s" % (skip_reason,)) + raise unittest.SkipTest(f"{skip_reason}") # process createEntities self._uri = uri @@ -1648,7 +1652,7 @@ class UnifiedSpecTestMeta(type): EXPECTED_FAILURES: Any def __init__(cls, *args, **kwargs): - super(UnifiedSpecTestMeta, cls).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def create_test(spec): def test_case(self): @@ -1658,7 +1662,9 @@ def test_case(self): for test_spec in cls.TEST_SPEC["tests"]: description = test_spec["description"] - test_name = "test_%s" % (description.strip(". ").replace(" ", "_").replace(".", "_"),) + test_name = "test_{}".format( + description.strip(". ").replace(" ", "_").replace(".", "_") + ) test_method = create_test(copy.deepcopy(test_spec)) test_method.__name__ = str(test_name) @@ -1690,13 +1696,15 @@ def generate_test_classes( **kwargs, ): """Method for generating test classes. Returns a dictionary where keys are - the names of test classes and values are the test class objects.""" + the names of test classes and values are the test class objects. + """ test_klasses = {} def test_base_class_factory(test_spec): """Utility that creates the base class to use for test generation. This is needed to ensure that cls.TEST_SPEC is appropriately set when - the metaclass __init__ is invoked.""" + the metaclass __init__ is invoked. + """ class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore TEST_SPEC = test_spec @@ -1716,7 +1724,7 @@ class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore scenario_def = json_util.loads(scenario_stream.read(), json_options=opts) test_type = os.path.splitext(filename)[0] - snake_class_name = "Test%s_%s_%s" % ( + snake_class_name = "Test{}_{}_{}".format( class_name_prefix, dirname.replace("-", "_"), test_type.replace("-", "_").replace(".", "_"), @@ -1728,8 +1736,7 @@ class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore mixin_class = _SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS.get(schema_version[0]) if mixin_class is None: raise ValueError( - "test file '%s' has unsupported schemaVersion '%s'" - % (fpath, schema_version) + f"test file '{fpath}' has unsupported schemaVersion '{schema_version}'" ) module_dict = {"__module__": module} module_dict.update(kwargs) diff --git a/test/utils.py b/test/utils.py index b39375925c..810a02b872 100644 --- a/test/utils.py +++ b/test/utils.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Utilities for testing pymongo -""" +"""Utilities for testing pymongo""" import contextlib import copy @@ -65,7 +64,7 @@ IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) -class BaseListener(object): +class BaseListener: def __init__(self): self.events = [] @@ -91,7 +90,7 @@ def matching(self, matcher): def wait_for_event(self, event, count): """Wait for a number of events to be published, or fail.""" - wait_until(lambda: self.event_count(event) >= count, "find %s %s event(s)" % (count, event)) + wait_until(lambda: self.event_count(event) >= count, f"find {count} {event} event(s)") class CMAPListener(BaseListener, monitoring.ConnectionPoolListener): @@ -142,7 +141,7 @@ def pool_closed(self, event): class EventListener(BaseListener, monitoring.CommandListener): def __init__(self): - super(EventListener, self).__init__() + super().__init__() self.results = defaultdict(list) @property @@ -176,7 +175,7 @@ def started_command_names(self) -> List[str]: def reset(self) -> None: """Reset the state of this listener.""" self.results.clear() - super(EventListener, self).reset() + super().reset() class TopologyEventListener(monitoring.TopologyListener): @@ -200,19 +199,19 @@ def reset(self): class AllowListEventListener(EventListener): def __init__(self, *commands): self.commands = set(commands) - super(AllowListEventListener, self).__init__() + super().__init__() def started(self, event): if event.command_name in self.commands: - super(AllowListEventListener, self).started(event) + super().started(event) def succeeded(self, event): if event.command_name in self.commands: - super(AllowListEventListener, self).succeeded(event) + super().succeeded(event) def failed(self, event): if event.command_name in self.commands: - super(AllowListEventListener, self).failed(event) + super().failed(event) class OvertCommandListener(EventListener): @@ -222,18 +221,18 @@ class OvertCommandListener(EventListener): def started(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: - super(OvertCommandListener, self).started(event) + super().started(event) def succeeded(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: - super(OvertCommandListener, self).succeeded(event) + super().succeeded(event) def failed(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: - super(OvertCommandListener, self).failed(event) + super().failed(event) -class _ServerEventListener(object): +class _ServerEventListener: """Listens to all events.""" def __init__(self): @@ -280,7 +279,7 @@ def failed(self, event): self.add_event(event) -class MockSocketInfo(object): +class MockSocketInfo: def __init__(self): self.cancel_context = _CancellationContext() self.more_to_come = False @@ -295,7 +294,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): pass -class MockPool(object): +class MockPool: def __init__(self, address, options, handshake=True): self.gen = _PoolGeneration() self._lock = _create_lock() @@ -357,7 +356,7 @@ def __getitem__(self, item): return ScenarioDict({}) -class CompareType(object): +class CompareType: """Class that compares equal to any object of the given type(s).""" def __init__(self, types): @@ -367,7 +366,7 @@ def __eq__(self, other): return isinstance(other, self.types) -class FunctionCallRecorder(object): +class FunctionCallRecorder: """Utility class to wrap a callable and record its invocations.""" def __init__(self, function): @@ -392,7 +391,7 @@ def call_count(self): return len(self._call_list) -class TestCreator(object): +class TestCreator: """Class to create test cases from specifications.""" def __init__(self, create_test, test_class, test_path): @@ -415,7 +414,8 @@ def __init__(self, create_test, test_class, test_path): def _ensure_min_max_server_version(self, scenario_def, method): """Test modifier that enforces a version range for the server on a - test case.""" + test case. + """ if "minServerVersion" in scenario_def: min_ver = tuple(int(elt) for elt in scenario_def["minServerVersion"].split(".")) if min_ver is not None: @@ -524,7 +524,7 @@ def create_tests(self): # Construct test from scenario. for test_def in self.tests(scenario_def): - test_name = "test_%s_%s_%s" % ( + test_name = "test_{}_{}_{}".format( dirname, test_type.replace("-", "_").replace(".", "_"), str(test_def["description"].replace(" ", "_").replace(".", "_")), @@ -539,9 +539,9 @@ def create_tests(self): def _connection_string(h): - if h.startswith("mongodb://") or h.startswith("mongodb+srv://"): + if h.startswith(("mongodb://", "mongodb+srv://")): return h - return "mongodb://%s" % (str(h),) + return f"mongodb://{str(h)}" def _mongo_client(host, port, authenticate=True, directConnection=None, **kwargs): @@ -620,7 +620,7 @@ def ensure_all_connected(client: MongoClient) -> None: raise ConfigurationError("cluster is not a replica set") target_host_list = set(hello["hosts"] + hello.get("passives", [])) - connected_host_list = set([hello["me"]]) + connected_host_list = {hello["me"]} # Run hello until we have connected to each host at least once. def discover(): @@ -821,7 +821,7 @@ def assertRaisesExactly(cls, fn, *args, **kwargs): try: fn(*args, **kwargs) except Exception as e: - assert e.__class__ == cls, "got %s, expected %s" % (e.__class__.__name__, cls.__name__) + assert e.__class__ == cls, f"got {e.__class__.__name__}, expected {cls.__name__}" else: raise AssertionError("%s not raised" % cls) @@ -848,7 +848,7 @@ def wrapper(*args, **kwargs): return _ignore_deprecations() -class DeprecationFilter(object): +class DeprecationFilter: def __init__(self, action="ignore"): """Start filtering deprecations.""" self.warn_context = warnings.catch_warnings() @@ -922,7 +922,7 @@ def lazy_client_trial(reset, target, test, get_client): collection = client_context.client.pymongo_test.test with frequent_thread_switches(): - for i in range(NTRIALS): + for _i in range(NTRIALS): reset(collection) lazy_client = get_client() lazy_collection = lazy_client.pymongo_test.test @@ -972,11 +972,11 @@ class ExceptionCatchingThread(threading.Thread): def __init__(self, *args, **kwargs): self.exc = None - super(ExceptionCatchingThread, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def run(self): try: - super(ExceptionCatchingThread, self).run() + super().run() except BaseException as exc: self.exc = exc raise @@ -1147,6 +1147,6 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac elif cursor_type == "tailableAwait": arguments["cursor_type"] = CursorType.TAILABLE else: - assert False, f"Unsupported cursorType: {cursor_type}" + raise AssertionError(f"Unsupported cursorType: {cursor_type}") else: arguments[c2s] = arguments.pop(arg_name) diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index e693fc25f0..ccb3897966 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -109,9 +109,11 @@ def get_topology_type_name(scenario_def): def get_topology_settings_dict(**kwargs): - settings = dict( - monitor_class=DummyMonitor, heartbeat_frequency=HEARTBEAT_FREQUENCY, pool_class=MockPool - ) + settings = { + "monitor_class": DummyMonitor, + "heartbeat_frequency": HEARTBEAT_FREQUENCY, + "pool_class": MockPool, + } settings.update(kwargs) return settings @@ -255,7 +257,7 @@ class TestAllScenarios(unittest.TestCase): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = "test_%s_%s" % (dirname, os.path.splitext(filename)[0]) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 6530f39da6..4ca6f1cc58 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -49,7 +49,7 @@ class SpecRunnerThread(threading.Thread): def __init__(self, name): - super(SpecRunnerThread, self).__init__() + super().__init__() self.name = name self.exc = None self.daemon = True @@ -88,7 +88,7 @@ class SpecRunner(IntegrationTest): @classmethod def setUpClass(cls): - super(SpecRunner, cls).setUpClass() + super().setUpClass() cls.mongos_clients = [] # Speed up the tests by decreasing the heartbeat frequency. @@ -98,10 +98,10 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.knobs.disable() - super(SpecRunner, cls).tearDownClass() + super().tearDownClass() def setUp(self): - super(SpecRunner, self).setUp() + super().setUp() self.targets = {} self.listener = None # type: ignore self.pool_listener = None @@ -170,7 +170,7 @@ def assertErrorLabelsContain(self, exc, expected_labels): def assertErrorLabelsOmit(self, exc, omit_labels): for label in omit_labels: self.assertFalse( - exc.has_error_label(label), msg="error labels should not contain %s" % (label,) + exc.has_error_label(label), msg=f"error labels should not contain {label}" ) def kill_all_sessions(self): @@ -242,6 +242,7 @@ def _helper(expected_result, result): self.assertEqual(expected_result, result) _helper(expected_result, result) + return None def get_object_name(self, op): """Allow subclasses to override handling of 'object' @@ -335,7 +336,7 @@ def _run_op(self, sessions, collection, op, in_with_transaction): expected_result = op.get("result") if expect_error(op): with self.assertRaises(self.allowable_errors(op), msg=op["name"]) as context: - out = self.run_operation(sessions, collection, op.copy()) + self.run_operation(sessions, collection, op.copy()) exc = context.exception if expect_error_message(expected_result): if isinstance(exc, BulkWriteError): @@ -425,9 +426,9 @@ def check_events(self, test, listener, session_ids): for key, val in expected.items(): if val is None: if key in actual: - self.fail("Unexpected key [%s] in %r" % (key, actual)) + self.fail(f"Unexpected key [{key}] in {actual!r}") elif key not in actual: - self.fail("Expected key [%s] in %r" % (key, actual)) + self.fail(f"Expected key [{key}] in {actual!r}") else: # Workaround an incorrect command started event in fle2v2-CreateCollection.yml # added in DRIVERS-2524. @@ -436,7 +437,7 @@ def check_events(self, test, listener, session_ids): if val.get(n) is None: val.pop(n, None) self.assertEqual( - val, decode_raw(actual[key]), "Key [%s] in %s" % (key, actual) + val, decode_raw(actual[key]), f"Key [{key}] in {actual}" ) else: self.assertEqual(actual, expected) @@ -459,7 +460,8 @@ def get_outcome_coll_name(self, outcome, collection): def run_test_ops(self, sessions, collection, test): """Added to allow retryable writes spec to override a test's - operation.""" + operation. + """ self.run_operations(sessions, collection, test["operations"]) def parse_client_options(self, opts): diff --git a/test/version.py b/test/version.py index e102db7111..1dd1bec5f9 100644 --- a/test/version.py +++ b/test/version.py @@ -18,7 +18,7 @@ class Version(tuple): def __new__(cls, *version): padded_version = cls._padded(version, 4) - return super(Version, cls).__new__(cls, tuple(padded_version)) + return super().__new__(cls, tuple(padded_version)) @classmethod def _padded(cls, iter, length, padding=0): From bc1a513d1041fceabc8cb1e8372bcb0807343813 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 11 May 2023 15:29:43 -0700 Subject: [PATCH 0391/1588] PYTHON-2504 Add pyupgrade/ruff commit to git-blame ignore --- .git-blame-ignore-revs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 8f02673e41..67ad992c75 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -1,2 +1,4 @@ # Initial pre-commit reformat 5578999a90e439fbca06fc0ffc98f4d04e96f7b4 +# pyupgrade and ruff +0092b0af79378abf35b6db73a082ecb91af1d973 From 0123d32a20ced822c711c1312785e1419ace365f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 12 May 2023 12:17:40 -0700 Subject: [PATCH 0392/1588] PYTHON-3709 Remove "beta" from Queryable Encryption Equality API (#1210) --- doc/examples/encryption.rst | 17 ++++++++-------- pymongo/encryption.py | 37 ++++++++++++----------------------- pymongo/encryption_options.py | 8 ++------ pymongo/errors.py | 3 --- 4 files changed, 22 insertions(+), 43 deletions(-) diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 2823d3f9bc..52fc548285 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -605,18 +605,17 @@ Queryable Encryption .. _automatic-queryable-client-side-encryption: -Automatic Queryable Encryption (Beta) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Automatic Queryable Encryption +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -PyMongo 4.4 brings beta support for Queryable Encryption with MongoDB >=7.0. +Automatic Queryable Encryption requires MongoDB 7.0+ Enterprise or a MongoDB 7.0+ Atlas cluster. Queryable Encryption is the second version of Client-Side Field Level Encryption. Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, which are further processed server-side. -You must have MongoDB 7.0 Enterprise to preview the capability. - -Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, as demonstrated by the following example:: +Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, +as demonstrated by the following example:: import os from bson.codec_options import CodecOptions @@ -669,10 +668,10 @@ Automatic encryption in Queryable Encryption is configured with an ``encrypted_f In the above example, the ``firstName`` and ``lastName`` fields are automatically encrypted and decrypted. -Explicit Queryable Encryption (Beta) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Explicit Queryable Encryption +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -PyMongo 4.4 brings beta support for Queryable Encryption with MongoDB >=7.0. +Explicit Queryable Encryption requires MongoDB 7.0+. Queryable Encryption is the second version of Client-Side Field Level Encryption. Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, diff --git a/pymongo/encryption.py b/pymongo/encryption.py index f2eb71ce71..3e6163f80f 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -417,17 +417,11 @@ class Algorithm(str, enum.Enum): INDEXED = "Indexed" """Indexed. - .. note:: Support for Queryable Encryption is in beta. - Backwards-breaking changes may be made before the final release. - .. versionadded:: 4.2 """ UNINDEXED = "Unindexed" """Unindexed. - .. note:: Support for Queryable Encryption is in beta. - Backwards-breaking changes may be made before the final release. - .. versionadded:: 4.2 """ RANGEPREVIEW = "RangePreview" @@ -441,10 +435,7 @@ class Algorithm(str, enum.Enum): class QueryType(str, enum.Enum): - """**(BETA)** An enum that defines the supported values for explicit encryption query_type. - - .. note:: Support for Queryable Encryption is in beta. - Backwards-breaking changes may be made before the final release. + """An enum that defines the supported values for explicit encryption query_type. .. versionadded:: 4.2 """ @@ -453,7 +444,11 @@ class QueryType(str, enum.Enum): """Used to encrypt a value for an equality query.""" RANGEPREVIEW = "rangePreview" - """Used to encrypt a value for a range query.""" + """Used to encrypt a value for a range query. + + .. note:: Support for Range queries is in beta. + Backwards-breaking changes may be made before the final release. +""" class ClientEncryption(Generic[_DocumentType]): @@ -577,9 +572,6 @@ def create_encrypted_collection( ) -> Tuple[Collection[_DocumentType], Mapping[str, Any]]: """Create a collection with encryptedFields. - .. note:: Support for Queryable Encryption is in beta. - Backwards-breaking changes may be made before the final release. - .. warning:: This function does not update the encryptedFieldsMap in the client's AutoEncryptionOpts, thus the user must create a new client after calling this function with @@ -592,7 +584,7 @@ def create_encrypted_collection( :Parameters: - `name`: the name of the collection to create - - `encrypted_fields` (dict): **(BETA)** Document that describes the encrypted fields for + - `encrypted_fields` (dict): Document that describes the encrypted fields for Queryable Encryption. For example:: { @@ -801,23 +793,18 @@ def encrypt( :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - `key_alt_name`: Identifies a key vault document by 'keyAltName'. - - `query_type` (str): **(BETA)** The query type to execute. See - :class:`QueryType` for valid options. - - `contention_factor` (int): **(BETA)** The contention factor to use + - `query_type` (str): The query type to execute. See :class:`QueryType` for valid options. + - `contention_factor` (int): The contention factor to use when the algorithm is :attr:`Algorithm.INDEXED`. An integer value *must* be given when the :attr:`Algorithm.INDEXED` algorithm is used. - `range_opts`: Experimental only, not intended for public use. - .. note:: `query_type`, and `contention_factor` are part of the Queryable Encryption beta. - Backwards-breaking changes may be made before the final release. - :Returns: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. .. versionchanged:: 4.2 Added the `query_type` and `contention_factor` parameters. - """ return self._encrypt_helper( value=value, @@ -846,16 +833,16 @@ def encrypt_expression( provided. :Parameters: - - `expression`: **(BETA)** The BSON aggregate or match expression to encrypt. + - `expression`: The BSON aggregate or match expression to encrypt. - `algorithm` (string): The encryption algorithm to use. See :class:`Algorithm` for some valid options. - `key_id`: Identifies a data key by ``_id`` which must be a :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - `key_alt_name`: Identifies a key vault document by 'keyAltName'. - - `query_type` (str): **(BETA)** The query type to execute. See + - `query_type` (str): The query type to execute. See :class:`QueryType` for valid options. - - `contention_factor` (int): **(BETA)** The contention factor to use + - `contention_factor` (int): The contention factor to use when the algorithm is :attr:`Algorithm.INDEXED`. An integer value *must* be given when the :attr:`Algorithm.INDEXED` algorithm is used. diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index e87d96b31a..285b082a7d 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -148,11 +148,11 @@ def __init__( - `crypt_shared_lib_path` (optional): Override the path to load the crypt_shared library. - `crypt_shared_lib_required` (optional): If True, raise an error if libmongocrypt is unable to load the crypt_shared library. - - `bypass_query_analysis` (optional): **(BETA)** If ``True``, disable automatic analysis + - `bypass_query_analysis` (optional): If ``True``, disable automatic analysis of outgoing commands. Set `bypass_query_analysis` to use explicit encryption on indexed fields without the MongoDB Enterprise Advanced licensed crypt_shared library. - - `encrypted_fields_map`: **(BETA)** Map of collection namespace ("db.coll") to documents + - `encrypted_fields_map`: Map of collection namespace ("db.coll") to documents that described the encrypted fields for Queryable Encryption. For example:: { @@ -175,10 +175,6 @@ def __init__( } } - .. note:: `bypass_query_analysis` and `encrypted_fields_map` are part of the - Queryable Encryption beta. Backwards-breaking changes may be made before the - final release. - .. versionchanged:: 4.2 Added `encrypted_fields_map` `crypt_shared_lib_path`, `crypt_shared_lib_required`, and `bypass_query_analysis` parameters. diff --git a/pymongo/errors.py b/pymongo/errors.py index 36f97f4b5a..e7aef90552 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -356,9 +356,6 @@ def timeout(self) -> bool: class EncryptedCollectionError(EncryptionError): """Raised when creating a collection with encrypted_fields fails. - .. note:: EncryptedCollectionError and `create_encrypted_collection` are both part of the - Queryable Encryption beta. Backwards-breaking changes may be made before the final release. - .. versionadded:: 4.4 """ From 622df873aecfe87e61b35867bc4a22ba5be52733 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 12 May 2023 16:05:23 -0500 Subject: [PATCH 0393/1588] PYTHON-3696 Bump minimum pymongocrypt version req to >=1.6 for QEv2 (#1211) --- doc/changelog.rst | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index a0d73eb4de..3d03a6f386 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -7,7 +7,7 @@ Changes in Version 4.4 - Added support for MongoDB 7.0. - Added support for passing a list containing (key, direction) pairs or keys to :meth:`~pymongo.collection.Collection.create_index`. -- pymongocrypt 1.5.0 or later is now required for client side field level +- pymongocrypt 1.6.0 or later is now required for client side field level encryption support. - Improved support for Pyright to improve typing support for IDEs like Visual Studio Code or Visual Studio. diff --git a/setup.py b/setup.py index 4fa51fa314..9e8cf4b291 100755 --- a/setup.py +++ b/setup.py @@ -281,7 +281,7 @@ def build_extension(self, ext): aws_reqs = ["pymongo-auth-aws<2.0.0"] extras_require = { - "encryption": ["pymongocrypt>=1.5.0,<2.0.0"] + aws_reqs, + "encryption": ["pymongocrypt>=1.6.0,<2.0.0"] + aws_reqs, "ocsp": pyopenssl_reqs, "snappy": ["python-snappy"], "zstd": ["zstandard"], From 738048bf4e62db9b9ae8733ecd0459aeecb7895a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 12 May 2023 16:29:24 -0500 Subject: [PATCH 0394/1588] PYTHON-3646 Update readme for PyMongo driver (#1212) --- README.rst | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index cc2b79d842..71d47bdc0b 100644 --- a/README.rst +++ b/README.rst @@ -24,8 +24,8 @@ Support / Feedback For issues with, questions about, or feedback for PyMongo, please look into our `support channels `_. Please do not email any of the PyMongo developers directly with issues or -questions - you're more likely to get an answer on the `MongoDB Community -Forums `_. +questions - you're more likely to get an answer on `StackOverflow `_ +(using a "mongodb" tag). Bugs / Feature Requests ======================= @@ -192,6 +192,12 @@ Documentation can be generated by running **python setup.py doc**. Generated documentation can be found in the *doc/build/html/* directory. +Learning Resources +================== + +MongoDB Learn - `Python courses `_. +`Python Articles on Developer Center `_. + Testing ======= From 2a869b56ca42118820f047e5481debb723c13836 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 12 May 2023 16:58:10 -0500 Subject: [PATCH 0395/1588] PYTHON-3613 Improving Time-Series Scalability (#1213) --- .../timeseries-collection.json | 65 +++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/test/collection_management/timeseries-collection.json b/test/collection_management/timeseries-collection.json index b5638fd36e..8525056fd1 100644 --- a/test/collection_management/timeseries-collection.json +++ b/test/collection_management/timeseries-collection.json @@ -250,6 +250,71 @@ ] } ] + }, + { + "description": "createCollection with bucketing options", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "timeseries": { + "timeField": "time", + "bucketMaxSpanSeconds": 3600, + "bucketRoundingSeconds": 3600 + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "ts-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ts-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "timeseries": { + "timeField": "time", + "bucketMaxSpanSeconds": 3600, + "bucketRoundingSeconds": 3600 + } + }, + "databaseName": "ts-tests" + } + } + ] + } + ] } ] } From bda9e3a0bb533b6d0c6c5141feeecb77c4d31709 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 15 May 2023 06:36:36 -0500 Subject: [PATCH 0396/1588] PYTHON-3469 Error if RewrapManyDataKey is called with masterKey and without provider (#1214) --- pymongo/encryption.py | 2 ++ test/test_encryption.py | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 3e6163f80f..1d407fae88 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -1033,6 +1033,8 @@ def rewrap_many_data_key( .. versionadded:: 4.2 """ + if master_key is not None and provider is None: + raise ConfigurationError("A provider must be given if a master_key is given") self._check_closed() with _wrap_encryption_errors(): raw_result = self._encryption.rewrap_many_data_key(filter, provider, master_key) diff --git a/test/test_encryption.py b/test/test_encryption.py index 95f18eb307..314b8dfbbe 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2387,6 +2387,12 @@ def run_test(self, src_provider, dst_provider): decrypt_result2 = client_encryption2.decrypt(cipher_text) self.assertEqual(decrypt_result2, "test") + # 8. Case 2. Provider is not optional when master_key is given. + with self.assertRaises(ConfigurationError): + rewrap_many_data_key_result = client_encryption2.rewrap_many_data_key( + {}, master_key=self.MASTER_KEYS[dst_provider] + ) + # https://github.com/mongodb/specifications/blob/5cf3ed/source/client-side-encryption/tests/README.rst#on-demand-aws-credentials class TestOnDemandAWSCredentials(EncryptionIntegrationTest): From 4c0196d3409286bad13d654126a6f5a226ce1430 Mon Sep 17 00:00:00 2001 From: thalassemia Date: Fri, 26 May 2023 07:40:32 -0700 Subject: [PATCH 0397/1588] PYTHON-3717 Speed up _type_marker check in BSON (#1219) --- bson/_cbsonmodule.c | 52 +++++++++++++++++++-------------------- bson/_cbsonmodule.h | 2 +- pymongo/_cmessagemodule.c | 33 +++++++++++++++---------- 3 files changed, 47 insertions(+), 40 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index e45a11be32..8e5e8b6c0c 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -55,6 +55,7 @@ struct module_state { PyObject* DatetimeMS; PyObject* _min_datetime_ms; PyObject* _max_datetime_ms; + PyObject* _type_marker_str; }; #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) @@ -378,6 +379,9 @@ static int _load_python_objects(PyObject* module) { PyObject* compiled = NULL; struct module_state *state = GETSTATE(module); + /* Python str for faster _type_marker check */ + state->_type_marker_str = PyUnicode_FromString("_type_marker"); + if (_load_object(&state->Binary, "bson.binary", "Binary") || _load_object(&state->Code, "bson.code", "Code") || _load_object(&state->ObjectId, "bson.objectid", "ObjectId") || @@ -428,12 +432,12 @@ static int _load_python_objects(PyObject* module) { * * Return the type marker, 0 if there is no marker, or -1 on failure. */ -static long _type_marker(PyObject* object) { +static long _type_marker(PyObject* object, PyObject* _type_marker_str) { PyObject* type_marker = NULL; long type = 0; - if (PyObject_HasAttrString(object, "_type_marker")) { - type_marker = PyObject_GetAttrString(object, "_type_marker"); + if (PyObject_HasAttr(object, _type_marker_str)) { + type_marker = PyObject_GetAttr(object, _type_marker_str); if (type_marker == NULL) { return -1; } @@ -450,13 +454,6 @@ static long _type_marker(PyObject* object) { if (type_marker && PyLong_CheckExact(type_marker)) { type = PyLong_AsLong(type_marker); Py_DECREF(type_marker); - /* - * Py(Long|Int)_AsLong returns -1 for error but -1 is a valid value - * so we call PyErr_Occurred to differentiate. - */ - if (type == -1 && PyErr_Occurred()) { - return -1; - } } else { Py_XDECREF(type_marker); } @@ -504,13 +501,12 @@ int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registr return 0; } -/* Fill out a codec_options_t* from a CodecOptions object. Use with the "O&" - * format spec in PyArg_ParseTuple. +/* Fill out a codec_options_t* from a CodecOptions object. * * Return 1 on success. options->document_class is a new reference. * Return 0 on failure. */ -int convert_codec_options(PyObject* options_obj, void* p) { +int convert_codec_options(PyObject* self, PyObject* options_obj, void* p) { codec_options_t* options = (codec_options_t*)p; PyObject* type_registry_obj = NULL; long type_marker; @@ -527,7 +523,8 @@ int convert_codec_options(PyObject* options_obj, void* p) { &options->datetime_conversion)) return 0; - type_marker = _type_marker(options->document_class); + type_marker = _type_marker(options->document_class, + GETSTATE(self)->_type_marker_str); if (type_marker < 0) { return 0; } @@ -730,7 +727,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, * problems with python sub interpreters. Our custom types should * have a _type_marker attribute, which we can switch on instead. */ - long type = _type_marker(value); + long type = _type_marker(value, state->_type_marker_str); if (type < 0) { return 0; } @@ -1382,7 +1379,7 @@ int write_dict(PyObject* self, buffer_t buffer, long type_marker; /* check for RawBSONDocument */ - type_marker = _type_marker(dict); + type_marker = _type_marker(dict, state->_type_marker_str); if (type_marker < 0) { return 0; } @@ -1504,18 +1501,20 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { PyObject* result; unsigned char check_keys; unsigned char top_level = 1; + PyObject* options_obj; codec_options_t options; buffer_t buffer; PyObject* raw_bson_document_bytes_obj; long type_marker; - if (!PyArg_ParseTuple(args, "ObO&|b", &dict, &check_keys, - convert_codec_options, &options, &top_level)) { + if (!(PyArg_ParseTuple(args, "ObO|b", &dict, &check_keys, + &options_obj, &top_level) && + convert_codec_options(self, options_obj, &options))) { return NULL; } /* check for RawBSONDocument */ - type_marker = _type_marker(dict); + type_marker = _type_marker(dict, GETSTATE(self)->_type_marker_str); if (type_marker < 0) { destroy_codec_options(&options); return NULL; @@ -2526,6 +2525,7 @@ static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { /* TODO: Support buffer protocol */ char* string; PyObject* bson; + PyObject* options_obj; codec_options_t options; unsigned position; unsigned max; @@ -2535,8 +2535,9 @@ static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { PyObject* value; PyObject* result_tuple; - if (!PyArg_ParseTuple(args, "OIIO&p", &bson, &position, &max, - convert_codec_options, &options, &raw_array)) { + if (!(PyArg_ParseTuple(args, "OIIOp", &bson, &position, &max, + &options_obj, &raw_array) && + convert_codec_options(self, options_obj, &options))) { return NULL; } @@ -2638,7 +2639,7 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { Py_buffer view = {0}; if (! (PyArg_ParseTuple(args, "OO", &bson, &options_obj) && - convert_codec_options(options_obj, &options))) { + convert_codec_options(self, options_obj, &options))) { return result; } @@ -2715,10 +2716,8 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { PyObject* options_obj = NULL; Py_buffer view = {0}; - if (!PyArg_ParseTuple(args, "OO", &bson, &options_obj)) { - return NULL; - } - if (!convert_codec_options(options_obj, &options)) { + if (!(PyArg_ParseTuple(args, "OO", &bson, &options_obj) && + convert_codec_options(self, options_obj, &options))) { return NULL; } @@ -2966,6 +2965,7 @@ static int _cbson_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->MaxKey); Py_CLEAR(GETSTATE(m)->UTC); Py_CLEAR(GETSTATE(m)->REType); + Py_CLEAR(GETSTATE(m)->_type_marker_str); return 0; } diff --git a/bson/_cbsonmodule.h b/bson/_cbsonmodule.h index 6ff453b8ff..682205bd84 100644 --- a/bson/_cbsonmodule.h +++ b/bson/_cbsonmodule.h @@ -86,7 +86,7 @@ typedef struct codec_options_t { #define _cbson_convert_codec_options_INDEX 4 #define _cbson_convert_codec_options_RETURN int -#define _cbson_convert_codec_options_PROTO (PyObject* options_obj, void* p) +#define _cbson_convert_codec_options_PROTO (PyObject* self, PyObject* options_obj, void* p) #define _cbson_destroy_codec_options_INDEX 5 #define _cbson_destroy_codec_options_RETURN void diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index 2f03ce73e0..7d5e2db3cc 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -75,19 +75,21 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { int num_to_return; PyObject* query; PyObject* field_selector; + PyObject* options_obj; codec_options_t options; buffer_t buffer = NULL; int length_location, message_length; PyObject* result = NULL; - if (!PyArg_ParseTuple(args, "Iet#iiOOO&", + if (!(PyArg_ParseTuple(args, "Iet#iiOOO", &flags, "utf-8", &collection_name, &collection_name_length, &num_to_skip, &num_to_return, &query, &field_selector, - convert_codec_options, &options)) { + &options_obj) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } buffer = pymongo_buffer_new(); @@ -220,6 +222,7 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { Py_ssize_t identifier_length = 0; PyObject* docs; PyObject* doc; + PyObject* options_obj; codec_options_t options; buffer_t buffer = NULL; int length_location, message_length; @@ -229,14 +232,15 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { PyObject* iterator = NULL; /*flags, command, identifier, docs, opts*/ - if (!PyArg_ParseTuple(args, "IOet#OO&", + if (!(PyArg_ParseTuple(args, "IOet#OO", &flags, &command, "utf-8", &identifier, &identifier_length, &docs, - convert_codec_options, &options)) { + &options_obj) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } buffer = pymongo_buffer_new(); @@ -528,14 +532,15 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { PyObject* ctx = NULL; PyObject* to_publish = NULL; PyObject* result = NULL; + PyObject* options_obj; codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); - if (!PyArg_ParseTuple(args, "bOObO&O", + if (!(PyArg_ParseTuple(args, "bOObOO", &op, &command, &docs, &ack, - convert_codec_options, &options, - &ctx)) { + &options_obj, &ctx) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } if (!(buffer = pymongo_buffer_new())) { @@ -581,14 +586,15 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { PyObject* ctx = NULL; PyObject* to_publish = NULL; PyObject* result = NULL; + PyObject* options_obj; codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); - if (!PyArg_ParseTuple(args, "bOObO&O", + if (!(PyArg_ParseTuple(args, "bOObOO", &op, &command, &docs, &ack, - convert_codec_options, &options, - &ctx)) { + &options_obj, &ctx) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } if (!(buffer = pymongo_buffer_new())) { @@ -850,14 +856,15 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { PyObject* ctx = NULL; PyObject* to_publish = NULL; PyObject* result = NULL; + PyObject* options_obj; codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); - if (!PyArg_ParseTuple(args, "et#bOOO&O", "utf-8", + if (!(PyArg_ParseTuple(args, "et#bOOOO", "utf-8", &ns, &ns_len, &op, &command, &docs, - convert_codec_options, &options, - &ctx)) { + &options_obj, &ctx) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } if (!(buffer = pymongo_buffer_new())) { From 3bc853a6206e833e01cb00985ef4897e044b1417 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 31 May 2023 18:48:05 -0500 Subject: [PATCH 0398/1588] PYTHON-3692 [Build Failure] Container Test failed MONGODB-AWS on MongoDB 5.0 (#1220) --- .evergreen/config.yml | 45 ++++++++++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 3f06fc1a03..cdc7178ba2 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -598,12 +598,14 @@ functions: - command: shell.exec type: test params: + shell: "bash" working_dir: "src" script: | ${PREPARE_SHELL} + set -ex cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate_venv.sh - mongo aws_e2e_regular_aws.js + . ./activate-authawsvenv.sh + python aws_tester.py regular - command: shell.exec type: test params: @@ -628,12 +630,14 @@ functions: - command: shell.exec type: test params: + shell: "bash" working_dir: "src" script: | ${PREPARE_SHELL} + set -ex cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate_venv.sh - mongo aws_e2e_assume_role.js + . ./activate-authawsvenv.sh + python aws_tester.py assume-role - command: shell.exec type: test params: @@ -665,15 +669,17 @@ functions: type: test params: working_dir: "src" + shell: "bash" script: | ${PREPARE_SHELL} if [ "${skip_EC2_auth_test}" = "true" ]; then echo "This platform does not support the EC2 auth test, skipping..." exit 0 fi + set -ex cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate_venv.sh - mongo aws_e2e_ec2.js + . ./activate-authawsvenv.sh + python aws_tester.py ec2 - command: shell.exec type: test params: @@ -694,15 +700,17 @@ functions: type: test params: working_dir: "src" + shell: "bash" script: | ${PREPARE_SHELL} if [ "${skip_EC2_auth_test}" = "true" ]; then echo "This platform does not support the web identity auth test, skipping..." exit 0 fi + set -ex cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate_venv.sh - mongo aws_e2e_web_identity.js + . ./activate-authawsvenv.sh + python aws_tester.py web-identity - command: shell.exec type: test params: @@ -857,6 +865,7 @@ functions: - command: shell.exec type: test params: + shell: "bash" working_dir: "src" script: | ${PREPARE_SHELL} @@ -864,14 +873,12 @@ functions: echo "This platform does not support the ECS auth test, skipping..." exit 0 fi + set -ex cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate_venv.sh - cat < setup.js - const mongo_binaries = "$MONGODB_BINARIES"; - const project_dir = "$PROJECT_DIRECTORY"; - EOF - - mongo --nodb setup.js aws_e2e_ecs.js + . ./activate-authawsvenv.sh + export MONGODB_BINARIES="${MONGODB_BINARIES}"; + export PROJECT_DIRECTORY="${PROJECT_DIRECTORY}"; + python aws_tester.py ecs cd - "cleanup": @@ -2328,6 +2335,12 @@ axes: batchtime: 10080 # 7 days variables: python3_binary: python3 + - id: ubuntu-18.04 + display_name: "Ubuntu 18.04" + run_on: ubuntu1804-small + batchtime: 10080 # 7 days + variables: + python3_binary: python3 - id: rhel83-zseries display_name: "RHEL 8.3 (zSeries)" run_on: rhel83-zseries-small @@ -3188,7 +3201,7 @@ buildvariants: - matrix_name: "aws-auth-test" matrix_spec: - platform: [ubuntu-20.04] + platform: [ubuntu-18.04] python-version: ["3.7"] display_name: "MONGODB-AWS Auth ${platform} ${python-version}" tasks: From 2fe01929e93929cb904aafac1038fadd3d324395 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 31 May 2023 18:48:34 -0500 Subject: [PATCH 0399/1588] BUILD-17302 AWS EC2 credential retrieval 404s in Drivers CI (#1218) --- .evergreen/config.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index cdc7178ba2..ac2ac11513 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1049,6 +1049,18 @@ functions: # Remove all Docker images docker rmi -f $(docker images -a -q) &> /dev/null || true + "teardown_aws": + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + cd "${DRIVERS_TOOLS}/.evergreen/auth_aws" + if [ -f "./aws_e2e_setup.json" ]; then + . ./activate-authawsvenv.sh + python ./lib/aws_assign_instance_profile.py + fi + "build release": - command: shell.exec type: test @@ -1144,6 +1156,7 @@ post: - func: "upload mo artifacts" - func: "upload test results" - func: "stop mongo-orchestration" + - func: "teardown_aws" - func: "cleanup" - func: "teardown_docker" From 5831934b379e2a3c634778e92feed41837d4c9b2 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 5 Jun 2023 12:03:51 -0500 Subject: [PATCH 0400/1588] PYTHON-3691 [Build Failure] test_client.TestClient.test_exhaust_network_error (#1216) --- pymongo/pyopenssl_context.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index bfc52df671..83d8f853ef 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -107,6 +107,11 @@ def _call(self, call, *args, **kwargs): try: return call(*args, **kwargs) except BLOCKING_IO_ERRORS as exc: + # Check for closed socket. + if self.fileno() == -1: + if timeout and _time.monotonic() - start > timeout: + raise _socket.timeout("timed out") + raise SSLError("Underlying socket has been closed") if isinstance(exc, _SSL.WantReadError): want_read = True want_write = False From 1ba4c0bcbdde870ca3c857069a038677eef74c29 Mon Sep 17 00:00:00 2001 From: thalassemia Date: Mon, 5 Jun 2023 16:35:39 -0700 Subject: [PATCH 0401/1588] PYTHON-3718 Faster INT2STRING (#1221) --- bson/_cbsonmodule.c | 101 +++++++++++++++++++++++++++++++++++++- bson/_cbsonmodule.h | 19 ++++--- doc/contributors.rst | 1 + pymongo/_cmessagemodule.c | 7 ++- setup.py | 7 ++- test/test_bson.py | 10 ++++ 6 files changed, 134 insertions(+), 11 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 8e5e8b6c0c..a5bc66f0c5 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -82,6 +82,99 @@ struct module_state { #define DATETIME_MS 3 #define DATETIME_AUTO 4 +/* Converts integer to its string representation in decimal notation. */ +extern int cbson_long_long_to_str(long long num, char* str, size_t size) { + // Buffer should fit 64-bit signed integer + if (size < 21) { + PyErr_Format( + PyExc_RuntimeError, + "Buffer too small to hold long long: %d < 21", size); + return -1; + } + int index = 0; + int sign = 1; + // Convert to unsigned to handle -LLONG_MIN overflow + unsigned long long absNum; + // Handle the case of 0 + if (num == 0) { + str[index++] = '0'; + str[index] = '\0'; + return 0; + } + // Handle negative numbers + if (num < 0) { + sign = -1; + absNum = 0ULL - (unsigned long long)num; + } else { + absNum = (unsigned long long)num; + } + // Convert the number to string + unsigned long long digit; + while (absNum > 0) { + digit = absNum % 10ULL; + str[index++] = (char)digit + '0'; // Convert digit to character + absNum /= 10; + } + // Add minus sign if negative + if (sign == -1) { + str[index++] = '-'; + } + str[index] = '\0'; // Null terminator + // Reverse the string + int start = 0; + int end = index - 1; + while (start < end) { + char temp = str[start]; + str[start++] = str[end]; + str[end--] = temp; + } + return 0; +} + +static PyObject* _test_long_long_to_str(PyObject* self, PyObject* args) { + // Test extreme values + Py_ssize_t maxNum = PY_SSIZE_T_MAX; + Py_ssize_t minNum = PY_SSIZE_T_MIN; + Py_ssize_t num; + char str_1[BUF_SIZE]; + char str_2[BUF_SIZE]; + int res = LL2STR(str_1, (long long)minNum); + if (res == -1) { + return NULL; + } + INT2STRING(str_2, (long long)minNum); + if (strcmp(str_1, str_2) != 0) { + PyErr_Format( + PyExc_RuntimeError, + "LL2STR != INT2STRING: %s != %s", str_1, str_2); + return NULL; + } + LL2STR(str_1, (long long)maxNum); + INT2STRING(str_2, (long long)maxNum); + if (strcmp(str_1, str_2) != 0) { + PyErr_Format( + PyExc_RuntimeError, + "LL2STR != INT2STRING: %s != %s", str_1, str_2); + return NULL; + } + + // Test common values + for (num = 0; num < 10000; num++) { + char str_1[BUF_SIZE]; + char str_2[BUF_SIZE]; + LL2STR(str_1, (long long)num); + INT2STRING(str_2, (long long)num); + if (strcmp(str_1, str_2) != 0) { + PyErr_Format( + PyExc_RuntimeError, + "LL2STR != INT2STRING: %s != %s", str_1, str_2); + return NULL; + } + } + + return args; +} + /* Get an error class from the bson.errors module. * * Returns a new ref */ @@ -1027,13 +1120,16 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } for(i = 0; i < items; i++) { int list_type_byte = pymongo_buffer_save_space(buffer, 1); - char name[16]; + char name[BUF_SIZE]; PyObject* item_value; if (list_type_byte == -1) { return 0; } - INT2STRING(name, (int)i); + int res = LL2STR(name, (long long)i); + if (res == -1) { + return 0; + } if (!buffer_write_bytes(buffer, name, (int)strlen(name) + 1)) { return 0; } @@ -2934,6 +3030,7 @@ static PyMethodDef _CBSONMethods[] = { {"_element_to_dict", _cbson_element_to_dict, METH_VARARGS, "Decode a single key, value pair."}, {"_array_of_documents_to_buffer", _cbson_array_of_documents_to_buffer, METH_VARARGS, "Convert raw array of documents to a stream of BSON documents"}, + {"_test_long_long_to_str", _test_long_long_to_str, METH_VARARGS, "Test conversion of extreme and common Py_ssize_t values to str."}, {NULL, NULL, 0, NULL} }; diff --git a/bson/_cbsonmodule.h b/bson/_cbsonmodule.h index 682205bd84..b7b92538e4 100644 --- a/bson/_cbsonmodule.h +++ b/bson/_cbsonmodule.h @@ -23,28 +23,35 @@ /* * This macro is basically an implementation of asprintf for win32 * We print to the provided buffer to get the string value as an int. + * USE LL2STR. This is kept only to test LL2STR. */ #if defined(_MSC_VER) && (_MSC_VER >= 1400) #define INT2STRING(buffer, i) \ _snprintf_s((buffer), \ - _scprintf("%d", (i)) + 1, \ - _scprintf("%d", (i)) + 1, \ - "%d", \ + _scprintf("%lld", (i)) + 1, \ + _scprintf("%lld", (i)) + 1, \ + "%lld", \ (i)) #define STRCAT(dest, n, src) strcat_s((dest), (n), (src)) #else #define INT2STRING(buffer, i) \ _snprintf((buffer), \ - _scprintf("%d", (i)) + 1, \ - "%d", \ + _scprintf("%lld", (i)) + 1, \ + "%lld", \ (i)) #define STRCAT(dest, n, src) strcat((dest), (src)) #endif #else -#define INT2STRING(buffer, i) snprintf((buffer), sizeof((buffer)), "%d", (i)) +#define INT2STRING(buffer, i) snprintf((buffer), sizeof((buffer)), "%lld", (i)) #define STRCAT(dest, n, src) strcat((dest), (src)) #endif +/* Just enough space in char array to hold LLONG_MIN and null terminator */ +#define BUF_SIZE 21 +/* Converts integer to its string representation in decimal notation. */ +extern int cbson_long_long_to_str(long long int num, char* str, size_t size); +#define LL2STR(buffer, i) cbson_long_long_to_str((i), (buffer), sizeof(buffer)) + typedef struct type_registry_t { PyObject* encoder_map; PyObject* decoder_map; diff --git a/doc/contributors.rst b/doc/contributors.rst index 7efda5b20d..0bea46466e 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -94,3 +94,4 @@ The following is a list of people who have contributed to - Arie Bovenberg (ariebovenberg) - Ben Warner (bcwarner) - Jean-Christophe Fillion-Robin (jcfr) +- Sean Cheah (thalassemia) diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index 7d5e2db3cc..ee7623d832 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -767,8 +767,11 @@ _batched_write_command( int cur_doc_begin; int cur_size; int enough_data = 0; - char key[16]; - INT2STRING(key, idx); + char key[BUF_SIZE]; + int res = LL2STR(key, (long long)idx); + if (res == -1) { + return 0; + } if (!buffer_write_bytes(buffer, "\x03", 1) || !buffer_write_bytes(buffer, key, (int)strlen(key) + 1)) { goto fail; diff --git a/setup.py b/setup.py index 9e8cf4b291..e570d04c5a 100755 --- a/setup.py +++ b/setup.py @@ -263,7 +263,12 @@ def build_extension(self, ext): Extension( "pymongo._cmessage", include_dirs=["bson"], - sources=["pymongo/_cmessagemodule.c", "bson/buffer.c"], + sources=[ + "pymongo/_cmessagemodule.c", + "bson/_cbsonmodule.c", + "bson/time64.c", + "bson/buffer.c", + ], ), ] diff --git a/test/test_bson.py b/test/test_bson.py index a6e6352333..e38fe970f2 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1310,5 +1310,15 @@ def __int__(self): encode({"x": float_ms}) +class TestLongLongToString(unittest.TestCase): + def test_long_long_to_string(self): + try: + from bson import _cbson + + _cbson._test_long_long_to_str() + except ImportError: + print("_cbson was not imported. Check compilation logs.") + + if __name__ == "__main__": unittest.main() From c7e06e6fc17d829ade89916129e6264100b53b2f Mon Sep 17 00:00:00 2001 From: Dainis Gorbunovs Date: Tue, 6 Jun 2023 00:38:28 +0100 Subject: [PATCH 0402/1588] PYTHON-3725 Fix Test Failure - MockupDB test_network_disconnect_primary (#1222) --- doc/contributors.rst | 1 + test/mockupdb/test_network_disconnect_primary.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/contributors.rst b/doc/contributors.rst index 0bea46466e..17ae4784e2 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -95,3 +95,4 @@ The following is a list of people who have contributed to - Ben Warner (bcwarner) - Jean-Christophe Fillion-Robin (jcfr) - Sean Cheah (thalassemia) +- Dainis Gorbunovs (DainisGorbunovs) diff --git a/test/mockupdb/test_network_disconnect_primary.py b/test/mockupdb/test_network_disconnect_primary.py index dd14abf84f..936130484a 100755 --- a/test/mockupdb/test_network_disconnect_primary.py +++ b/test/mockupdb/test_network_disconnect_primary.py @@ -26,12 +26,12 @@ def test_network_disconnect_primary(self): # Application operation fails against primary. Test that topology # type changes from ReplicaSetWithPrimary to ReplicaSetNoPrimary. # http://bit.ly/1B5ttuL - primary, secondary = servers = (MockupDB() for _ in range(2)) - for server in servers: + primary, secondary = MockupDB(), MockupDB() + for server in primary, secondary: server.run() self.addCleanup(server.stop) - hosts = [server.address_string for server in servers] + hosts = [server.address_string for server in (primary, secondary)] primary_response = OpReply( ismaster=True, setName="rs", hosts=hosts, minWireVersion=2, maxWireVersion=6 ) From 1ad0df085841cdb052057bd1981e8d9991da55d9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 6 Jun 2023 12:06:08 -0700 Subject: [PATCH 0403/1588] PYTHON-3724 Remove null values from `command_started_event` in fle2v2-CreateCollection.yml (#1223) --- .../spec/legacy/fle2v2-CreateCollection.json | 18 ------------------ test/utils_spec_runner.py | 6 ------ 2 files changed, 24 deletions(-) diff --git a/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json index 819d2eec3c..cc8bd17145 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json @@ -158,9 +158,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", @@ -343,9 +340,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", @@ -851,9 +845,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", @@ -1048,9 +1039,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", @@ -1367,9 +1355,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", @@ -1635,9 +1620,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 4ca6f1cc58..21cc3e6d81 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -430,12 +430,6 @@ def check_events(self, test, listener, session_ids): elif key not in actual: self.fail(f"Expected key [{key}] in {actual!r}") else: - # Workaround an incorrect command started event in fle2v2-CreateCollection.yml - # added in DRIVERS-2524. - if key == "encryptedFields": - for n in ("eccCollection", "ecocCollection", "escCollection"): - if val.get(n) is None: - val.pop(n, None) self.assertEqual( val, decode_raw(actual[key]), f"Key [{key}] in {actual}" ) From 7146be01aef4dd34fdd22c9af942f350ad8e581f Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 9 Jun 2023 12:00:14 -0700 Subject: [PATCH 0404/1588] PYTHON-3721 Stop Testing on AWS Linux 2018 (#1226) --- .evergreen/config.yml | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index ac2ac11513..f62cb0d0c0 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2284,14 +2284,6 @@ axes: - id: platform display_name: OS values: - - id: awslinux - display_name: "Amazon Linux 2018 (Enterprise)" - run_on: amazon1-2018-test - batchtime: 10080 # 7 days - variables: - skip_crypt_shared: true - python3_binary: "/opt/python/3.8/bin/python3" - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/linux-64-amazon-ami/master/latest/libmongocrypt.tar.gz - id: archlinux-test display_name: "Archlinux" run_on: archlinux-test @@ -2330,6 +2322,12 @@ axes: skip_web_identity_auth_test: true python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz + - id: rhel76 + display_name: "RHEL 7.6" + run_on: rhel76-small + batchtime: 10080 # 7 days + variables: + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz - id: rhel84 display_name: "RHEL 8.4" run_on: rhel84-small @@ -2990,7 +2988,7 @@ buildvariants: - matrix_name: "tests-python-version-supports-openssl-102-test-ssl" matrix_spec: - platform: awslinux + platform: rhel76 # Python 3.10+ requires OpenSSL 1.1.1+ python-version: ["3.7", "3.8", "3.9", "pypy3.7", "pypy3.8"] auth-ssl: "*" From 0bce579b819561021e2abda9681670ff4e925437 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 9 Jun 2023 13:08:56 -0700 Subject: [PATCH 0405/1588] PYTHON-3728 Simplify convert_codec_options signature (#1225) --- bson/_cbsonmodule.c | 6 +++--- bson/_cbsonmodule.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index a5bc66f0c5..2632e2f339 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -599,8 +599,7 @@ int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registr * Return 1 on success. options->document_class is a new reference. * Return 0 on failure. */ -int convert_codec_options(PyObject* self, PyObject* options_obj, void* p) { - codec_options_t* options = (codec_options_t*)p; +int convert_codec_options(PyObject* self, PyObject* options_obj, codec_options_t* options) { PyObject* type_registry_obj = NULL; long type_marker; @@ -613,8 +612,9 @@ int convert_codec_options(PyObject* self, PyObject* options_obj, void* p) { &options->unicode_decode_error_handler, &options->tzinfo, &type_registry_obj, - &options->datetime_conversion)) + &options->datetime_conversion)) { return 0; + } type_marker = _type_marker(options->document_class, GETSTATE(self)->_type_marker_str); diff --git a/bson/_cbsonmodule.h b/bson/_cbsonmodule.h index b7b92538e4..3be2b74427 100644 --- a/bson/_cbsonmodule.h +++ b/bson/_cbsonmodule.h @@ -93,7 +93,7 @@ typedef struct codec_options_t { #define _cbson_convert_codec_options_INDEX 4 #define _cbson_convert_codec_options_RETURN int -#define _cbson_convert_codec_options_PROTO (PyObject* self, PyObject* options_obj, void* p) +#define _cbson_convert_codec_options_PROTO (PyObject* self, PyObject* options_obj, codec_options_t* options) #define _cbson_destroy_codec_options_INDEX 5 #define _cbson_destroy_codec_options_RETURN void From 3f687f71fb1f6c6e7fbf742cc7731213f3521627 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 12 Jun 2023 12:41:59 -0700 Subject: [PATCH 0406/1588] PYTHON-3443 Remove redundant code to avoid Coverity warnings (#1228) --- bson/_cbsonmodule.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 2632e2f339..5918a678c6 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -304,19 +304,15 @@ static int millis_from_datetime_ms(PyObject* dt, long long* out){ long long millis; if (!(ll_millis = PyNumber_Long(dt))){ - if (PyErr_Occurred()) { // TypeError - return 0; - } - } - - if ((millis = PyLong_AsLongLong(ll_millis)) == -1){ - if (PyErr_Occurred()) { /* Overflow */ - PyErr_SetString(PyExc_OverflowError, - "MongoDB datetimes can only handle up to 8-byte ints"); - return 0; - } + return 0; } + millis = PyLong_AsLongLong(ll_millis); Py_DECREF(ll_millis); + if (millis == -1 && PyErr_Occurred()) { /* Overflow */ + PyErr_SetString(PyExc_OverflowError, + "MongoDB datetimes can only handle up to 8-byte ints"); + return 0; + } *out = millis; return 1; } @@ -2081,7 +2077,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, millis = max_millis; } // Continues from here to return a datetime. - } else if (dt_auto) { + } else { // dt_auto if (millis < min_millis || millis > max_millis){ value = datetime_ms_from_millis(self, millis); break; // Out-of-range so done. From ec3437849e4cf4186f117d85be173494f1bb9b75 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 12 Jun 2023 15:43:30 -0700 Subject: [PATCH 0407/1588] PYTHON-3702 Stop using utcnow and utcfromtimestamp (#1229) --- bson/datetime_ms.py | 2 +- doc/examples/datetimes.rst | 8 +++++--- doc/tutorial.rst | 2 +- gridfs/grid_file.py | 2 +- pymongo/ocsp_cache.py | 5 +++-- pymongo/ocsp_support.py | 3 ++- test/test_bson.py | 4 ++-- test/test_client.py | 2 +- test/test_objectid.py | 4 ++-- test/test_ocsp_cache.py | 4 ++-- test/utils_selection_tests.py | 2 +- 11 files changed, 21 insertions(+), 17 deletions(-) diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py index c64a0cce87..5fc8b70328 100644 --- a/bson/datetime_ms.py +++ b/bson/datetime_ms.py @@ -26,7 +26,7 @@ from bson.tz_util import utc EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) -EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0) +EPOCH_NAIVE = datetime.datetime.fromtimestamp(0, tz=datetime.timezone.utc).replace(tzinfo=None) class DatetimeMS: diff --git a/doc/examples/datetimes.rst b/doc/examples/datetimes.rst index 562c9480a6..2dc9c003eb 100644 --- a/doc/examples/datetimes.rst +++ b/doc/examples/datetimes.rst @@ -25,10 +25,12 @@ time into MongoDB: .. doctest:: - >>> result = db.objects.insert_one({"last_modified": datetime.datetime.utcnow()}) + >>> result = db.objects.insert_one( + ... {"last_modified": datetime.datetime.now(tz=timezone.utc)} + ... ) -Always use :meth:`datetime.datetime.utcnow`, which returns the current time in -UTC, instead of :meth:`datetime.datetime.now`, which returns the current local +Always use :meth:`datetime.datetime.now(tz=timezone.utc)`, which explicitly returns the current time in +UTC, instead of :meth:`datetime.datetime.now`, with no arguments, which returns the current local time. Avoid doing this: .. doctest:: diff --git a/doc/tutorial.rst b/doc/tutorial.rst index d7854c885a..768b535fe3 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -109,7 +109,7 @@ post: ... "author": "Mike", ... "text": "My first blog post!", ... "tags": ["mongodb", "python", "pymongo"], - ... "date": datetime.datetime.utcnow(), + ... "date": datetime.datetime.now(tz=timezone.utc), ... } Note that documents can contain native Python types (like diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index fd260963d7..fe3b56cdde 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -292,7 +292,7 @@ def __flush(self) -> Any: self.__flush_buffer() # The GridFS spec says length SHOULD be an Int64. self._file["length"] = Int64(self._position) - self._file["uploadDate"] = datetime.datetime.utcnow() + self._file["uploadDate"] = datetime.datetime.now(tz=datetime.timezone.utc) return self._coll.files.insert_one(self._file, session=self._session) except DuplicateKeyError: diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py index 0c50902167..b60a24b027 100644 --- a/pymongo/ocsp_cache.py +++ b/pymongo/ocsp_cache.py @@ -16,6 +16,7 @@ from collections import namedtuple from datetime import datetime as _datetime +from datetime import timezone from pymongo.lock import _create_lock @@ -60,7 +61,7 @@ def __setitem__(self, key, value): return # Do nothing if the response is invalid. - if not (value.this_update <= _datetime.utcnow() < value.next_update): + if not (value.this_update <= _datetime.now(tz=timezone.utc) < value.next_update): return # Cache new response OR update cached response if new response @@ -81,7 +82,7 @@ def __getitem__(self, item): value = self._data[cache_key] # Return cached response if it is still valid. - if value.this_update <= _datetime.utcnow() < value.next_update: + if value.this_update <= _datetime.now(tz=timezone.utc) < value.next_update: return value self._data.pop(cache_key, None) diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index e7f4a15d84..dda92d0d3b 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -17,6 +17,7 @@ import logging as _logging import re as _re from datetime import datetime as _datetime +from datetime import timezone from cryptography.exceptions import InvalidSignature as _InvalidSignature from cryptography.hazmat.backends import default_backend as _default_backend @@ -219,7 +220,7 @@ def _verify_response(issuer, response): # Note that we are not using a "tolerance period" as discussed in # https://tools.ietf.org/rfc/rfc5019.txt? - now = _datetime.utcnow() + now = _datetime.now(tz=timezone.utc) # RFC6960, Section 3.2, Number 5 if response.this_update > now: _LOGGER.debug("thisUpdate is in the future") diff --git a/test/test_bson.py b/test/test_bson.py index e38fe970f2..12fbea92fa 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -986,7 +986,7 @@ def test_codec_options_repr(self): def test_decode_all_defaults(self): # Test decode_all()'s default document_class is dict and tz_aware is # False. - doc = {"sub_document": {}, "dt": datetime.datetime.utcnow()} + doc = {"sub_document": {}, "dt": datetime.datetime.now(tz=datetime.timezone.utc)} decoded = bson.decode_all(bson.encode(doc))[0] self.assertIsInstance(decoded["sub_document"], dict) @@ -998,7 +998,7 @@ def test_decode_all_defaults(self): def test_decode_all_no_options(self): # Test decode_all()'s default document_class is dict and tz_aware is # False. - doc = {"sub_document": {}, "dt": datetime.datetime.utcnow()} + doc = {"sub_document": {}, "dt": datetime.datetime.now(tz=datetime.timezone.utc)} decoded = bson.decode_all(bson.encode(doc), None)[0] self.assertIsInstance(decoded["sub_document"], dict) diff --git a/test/test_client.py b/test/test_client.py index ec2b4bac97..bba6b37287 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1143,7 +1143,7 @@ def test_tz_aware(self): naive = self.client aware.pymongo_test.drop_collection("test") - now = datetime.datetime.utcnow() + now = datetime.datetime.now(tz=datetime.timezone.utc) aware.pymongo_test.test.insert_one({"x": now}) self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo) diff --git a/test/test_objectid.py b/test/test_objectid.py index bb1af865c0..cb96feaf34 100644 --- a/test/test_objectid.py +++ b/test/test_objectid.py @@ -86,7 +86,7 @@ def test_binary_str_equivalence(self): self.assertEqual(a, ObjectId(str(a))) def test_generation_time(self): - d1 = datetime.datetime.utcnow() + d1 = datetime.datetime.now(tz=datetime.timezone.utc).replace(tzinfo=None) d2 = ObjectId().generation_time self.assertEqual(utc, d2.tzinfo) @@ -97,7 +97,7 @@ def test_from_datetime(self): if "PyPy 1.8.0" in sys.version: # See https://bugs.pypy.org/issue1092 raise SkipTest("datetime.timedelta is broken in pypy 1.8.0") - d = datetime.datetime.utcnow() + d = datetime.datetime.now(tz=datetime.timezone.utc).replace(tzinfo=None) d = d - datetime.timedelta(microseconds=d.microsecond) oid = ObjectId.from_datetime(d) self.assertEqual(d, oid.generation_time.replace(tzinfo=None)) diff --git a/test/test_ocsp_cache.py b/test/test_ocsp_cache.py index 0e6777a9f9..3740b6b28a 100644 --- a/test/test_ocsp_cache.py +++ b/test/test_ocsp_cache.py @@ -17,7 +17,7 @@ import random import sys from collections import namedtuple -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone from os import urandom from time import sleep from typing import Any @@ -61,7 +61,7 @@ def _create_mock_request(self): ) def _create_mock_response(self, this_update_delta_seconds, next_update_delta_seconds): - now = datetime.utcnow() + now = datetime.now(tz=timezone.utc) this_update = now + timedelta(seconds=this_update_delta_seconds) if next_update_delta_seconds is not None: next_update = now + timedelta(seconds=next_update_delta_seconds) diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index ccb3897966..6967544f09 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -44,7 +44,7 @@ def get_addresses(server_list): def make_last_write_date(server): - epoch = datetime.datetime.utcfromtimestamp(0) + epoch = datetime.datetime.fromtimestamp(0, tz=datetime.timezone.utc).replace(tzinfo=None) millis = server.get("lastWrite", {}).get("lastWriteDate") if millis: diff = ((millis % 1000) + 1000) % 1000 From eed9d02a2e2c3e0b7fcde6ad4d07d09832254eaa Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 13 Jun 2023 11:30:50 -0500 Subject: [PATCH 0408/1588] PYTHON-3731 Disable MONGODB-OIDC Auth for 4.4 (#1230) --- .evergreen/config.yml | 2 +- pymongo/auth.py | 1 - test/auth_aws/test_auth_oidc.py | 4 ++++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index f62cb0d0c0..c3e8a3d1f3 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -3204,7 +3204,7 @@ buildvariants: - matrix_name: "oidc-auth-test" matrix_spec: - platform: [ ubuntu-20.04 ] + platform: [ rhel84 ] python-version: ["3.9"] display_name: "MONGODB-OIDC Auth ${platform} ${python-version}" tasks: diff --git a/pymongo/auth.py b/pymongo/auth.py index ac7cb254e9..00b6faa6fd 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -510,7 +510,6 @@ def _authenticate_default(credentials, sock_info): "MONGODB-CR": _authenticate_mongo_cr, "MONGODB-X509": _authenticate_x509, "MONGODB-AWS": _authenticate_aws, - "MONGODB-OIDC": _authenticate_oidc, "PLAIN": _authenticate_plain, "SCRAM-SHA-1": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-1"), "SCRAM-SHA-256": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-256"), diff --git a/test/auth_aws/test_auth_oidc.py b/test/auth_aws/test_auth_oidc.py index 26e71573d4..7b42f98a1c 100644 --- a/test/auth_aws/test_auth_oidc.py +++ b/test/auth_aws/test_auth_oidc.py @@ -28,12 +28,16 @@ from bson import SON from pymongo import MongoClient +from pymongo.auth import _AUTH_MAP, _authenticate_oidc from pymongo.auth_oidc import _CACHE as _oidc_cache from pymongo.cursor import CursorType from pymongo.errors import ConfigurationError, OperationFailure from pymongo.hello import HelloCompat from pymongo.operations import InsertOne +# Force MONGODB-OIDC to be enabled. +_AUTH_MAP["MONGODB-OIDC"] = _authenticate_oidc # type:ignore + class TestAuthOIDC(unittest.TestCase): uri: str From ece45b1edf451f606c1efebfc944d13f80fbdd1e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 14 Jun 2023 10:00:52 -0700 Subject: [PATCH 0409/1588] PYTHON-3699 Add prose test for change stream splitting (#1232) --- test/test_change_stream.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/test/test_change_stream.py b/test/test_change_stream.py index c9ddfcd137..dae8b1f5a1 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -762,6 +762,26 @@ def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) + # Prose test no. 19 + @no_type_check + @client_context.require_version_min(7, 0, -1) + def test_split_large_change(self): + self.db.drop_collection("test_split_large_change") + coll = self.db.create_collection( + "test_split_large_change", changeStreamPreAndPostImages={"enabled": True} + ) + coll.insert_one({"_id": 1, "value": "q" * 10 * 1024 * 1024}) + with coll.watch( + [{"$changeStreamSplitLargeEvent": {}}], full_document_before_change="required" + ) as change_stream: + coll.update_one({"_id": 1}, {"$set": {"value": "z" * 10 * 1024 * 1024}}) + doc_1 = change_stream.next() + self.assertIn("splitEvent", doc_1) + self.assertEqual(doc_1["splitEvent"], {"fragment": 1, "of": 2}) + doc_2 = change_stream.next() + self.assertIn("splitEvent", doc_2) + self.assertEqual(doc_2["splitEvent"], {"fragment": 2, "of": 2}) + class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin): dbs: list From 1269c006da2ad9c35d812ff309ded7beebc50e81 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 14 Jun 2023 11:27:58 -0700 Subject: [PATCH 0410/1588] PYTHON-3735 Add types to PyMongo auth module (#1231) --- pymongo/auth.py | 87 +++++++++++++++++++++++++++++++------------------ pymongo/pool.py | 4 ++- 2 files changed, 59 insertions(+), 32 deletions(-) diff --git a/pymongo/auth.py b/pymongo/auth.py index 00b6faa6fd..b4d04f8d14 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -13,15 +13,17 @@ # limitations under the License. """Authentication helpers.""" +from __future__ import annotations import functools import hashlib import hmac import os import socket +import typing from base64 import standard_b64decode, standard_b64encode from collections import namedtuple -from typing import Callable, Mapping +from typing import TYPE_CHECKING, Any, Callable, Mapping, MutableMapping, Optional from urllib.parse import quote from bson.binary import Binary @@ -31,6 +33,10 @@ from pymongo.errors import ConfigurationError, OperationFailure from pymongo.saslprep import saslprep +if TYPE_CHECKING: + from pymongo.hello import Hello + from pymongo.pool import SocketInfo + HAVE_KERBEROS = True _USE_PRINCIPAL = False try: @@ -66,21 +72,21 @@ class _Cache: _hash_val = hash("_Cache") - def __init__(self): + def __init__(self) -> None: self.data = None - def __eq__(self, other): + def __eq__(self, other: object) -> bool: # Two instances must always compare equal. if isinstance(other, _Cache): return True return NotImplemented - def __ne__(self, other): + def __ne__(self, other: object) -> bool: if isinstance(other, _Cache): return False return NotImplemented - def __hash__(self): + def __hash__(self) -> int: return self._hash_val @@ -101,7 +107,14 @@ def __hash__(self): """Mechanism properties for MONGODB-AWS authentication.""" -def _build_credentials_tuple(mech, source, user, passwd, extra, database): +def _build_credentials_tuple( + mech: str, + source: Optional[str], + user: str, + passwd: str, + extra: Mapping[str, Any], + database: Optional[str], +) -> MongoCredential: """Build and return a mechanism specific credentials tuple.""" if mech not in ("MONGODB-X509", "MONGODB-AWS", "MONGODB-OIDC") and user is None: raise ConfigurationError(f"{mech} requires a username.") @@ -175,17 +188,21 @@ def _build_credentials_tuple(mech, source, user, passwd, extra, database): return MongoCredential(mech, source_database, user, passwd, None, _Cache()) -def _xor(fir, sec): +def _xor(fir: bytes, sec: bytes) -> bytes: """XOR two byte strings together (python 3.x).""" return b"".join([bytes([x ^ y]) for x, y in zip(fir, sec)]) -def _parse_scram_response(response): +def _parse_scram_response(response: bytes) -> dict: """Split a scram response into key, value pairs.""" - return dict(item.split(b"=", 1) for item in response.split(b",")) + return dict( + typing.cast(typing.Tuple[str, str], item.split(b"=", 1)) for item in response.split(b",") + ) -def _authenticate_scram_start(credentials, mechanism): +def _authenticate_scram_start( + credentials: MongoCredential, mechanism: str +) -> tuple[bytes, bytes, MutableMapping[str, Any]]: username = credentials.username user = username.encode("utf-8").replace(b"=", b"=3D").replace(b",", b"=2C") nonce = standard_b64encode(os.urandom(32)) @@ -203,7 +220,9 @@ def _authenticate_scram_start(credentials, mechanism): return nonce, first_bare, cmd -def _authenticate_scram(credentials, sock_info, mechanism): +def _authenticate_scram( + credentials: MongoCredential, sock_info: SocketInfo, mechanism: str +) -> None: """Authenticate using SCRAM.""" username = credentials.username if mechanism == "SCRAM-SHA-256": @@ -287,7 +306,7 @@ def _authenticate_scram(credentials, sock_info, mechanism): raise OperationFailure("SASL conversation failed to complete.") -def _password_digest(username, password): +def _password_digest(username: str, password: str) -> str: """Get a password digest to use for authentication.""" if not isinstance(password, str): raise TypeError("password must be an instance of str") @@ -302,7 +321,7 @@ def _password_digest(username, password): return md5hash.hexdigest() -def _auth_key(nonce, username, password): +def _auth_key(nonce: str, username: str, password: str) -> str: """Get an auth key to use for authentication.""" digest = _password_digest(username, password) md5hash = hashlib.md5() @@ -311,7 +330,7 @@ def _auth_key(nonce, username, password): return md5hash.hexdigest() -def _canonicalize_hostname(hostname): +def _canonicalize_hostname(hostname: str) -> str: """Canonicalize hostname following MIT-krb5 behavior.""" # https://github.com/krb5/krb5/blob/d406afa363554097ac48646a29249c04f498c88e/src/util/k5test.py#L505-L520 af, socktype, proto, canonname, sockaddr = socket.getaddrinfo( @@ -326,7 +345,7 @@ def _canonicalize_hostname(hostname): return name[0].lower() -def _authenticate_gssapi(credentials, sock_info): +def _authenticate_gssapi(credentials: MongoCredential, sock_info: SocketInfo) -> None: """Authenticate using GSSAPI.""" if not HAVE_KERBEROS: raise ConfigurationError( @@ -443,7 +462,7 @@ def _authenticate_gssapi(credentials, sock_info): raise OperationFailure(str(exc)) -def _authenticate_plain(credentials, sock_info): +def _authenticate_plain(credentials: MongoCredential, sock_info: SocketInfo) -> None: """Authenticate using SASL PLAIN (RFC 4616)""" source = credentials.source username = credentials.username @@ -460,7 +479,7 @@ def _authenticate_plain(credentials, sock_info): sock_info.command(source, cmd) -def _authenticate_x509(credentials, sock_info): +def _authenticate_x509(credentials: MongoCredential, sock_info: SocketInfo) -> None: """Authenticate using MONGODB-X509.""" ctx = sock_info.auth_ctx if ctx and ctx.speculate_succeeded(): @@ -471,7 +490,7 @@ def _authenticate_x509(credentials, sock_info): sock_info.command("$external", cmd) -def _authenticate_mongo_cr(credentials, sock_info): +def _authenticate_mongo_cr(credentials: MongoCredential, sock_info: SocketInfo) -> None: """Authenticate using MONGODB-CR.""" source = credentials.source username = credentials.username @@ -486,7 +505,7 @@ def _authenticate_mongo_cr(credentials, sock_info): sock_info.command(source, query) -def _authenticate_default(credentials, sock_info): +def _authenticate_default(credentials: MongoCredential, sock_info: SocketInfo) -> None: if sock_info.max_wire_version >= 7: if sock_info.negotiated_mechs: mechs = sock_info.negotiated_mechs @@ -518,35 +537,39 @@ def _authenticate_default(credentials, sock_info): class _AuthContext: - def __init__(self, credentials, address): + def __init__(self, credentials: MongoCredential, address: tuple[str, int]) -> None: self.credentials = credentials - self.speculative_authenticate = None + self.speculative_authenticate: Optional[Mapping[str, Any]] = None self.address = address @staticmethod - def from_credentials(creds, address): + def from_credentials( + creds: MongoCredential, address: tuple[str, int] + ) -> Optional[_AuthContext]: spec_cls = _SPECULATIVE_AUTH_MAP.get(creds.mechanism) if spec_cls: return spec_cls(creds, address) return None - def speculate_command(self): + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: raise NotImplementedError - def parse_response(self, hello): + def parse_response(self, hello: Hello) -> None: self.speculative_authenticate = hello.speculative_authenticate - def speculate_succeeded(self): + def speculate_succeeded(self) -> bool: return bool(self.speculative_authenticate) class _ScramContext(_AuthContext): - def __init__(self, credentials, address, mechanism): + def __init__( + self, credentials: MongoCredential, address: tuple[str, int], mechanism: str + ) -> None: super().__init__(credentials, address) - self.scram_data = None + self.scram_data: Optional[tuple[bytes, bytes]] = None self.mechanism = mechanism - def speculate_command(self): + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: nonce, first_bare, cmd = _authenticate_scram_start(self.credentials, self.mechanism) # The 'db' field is included only on the speculative command. cmd["db"] = self.credentials.source @@ -556,7 +579,7 @@ def speculate_command(self): class _X509Context(_AuthContext): - def speculate_command(self): + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: cmd = SON([("authenticate", 1), ("mechanism", "MONGODB-X509")]) if self.credentials.username is not None: cmd["user"] = self.credentials.username @@ -564,7 +587,7 @@ def speculate_command(self): class _OIDCContext(_AuthContext): - def speculate_command(self): + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: authenticator = _get_authenticator(self.credentials, self.address) cmd = authenticator.auth_start_cmd(False) if cmd is None: @@ -582,7 +605,9 @@ def speculate_command(self): } -def authenticate(credentials, sock_info, reauthenticate=False): +def authenticate( + credentials: MongoCredential, sock_info: SocketInfo, reauthenticate: bool = False +) -> None: """Authenticate sock_info.""" mechanism = credentials.mechanism auth_func = _AUTH_MAP[mechanism] diff --git a/pymongo/pool.py b/pymongo/pool.py index 5bae8ce878..2b498078c2 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -758,7 +758,9 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): cmd["saslSupportedMechs"] = creds.source + "." + creds.username auth_ctx = auth._AuthContext.from_credentials(creds, self.address) if auth_ctx: - cmd["speculativeAuthenticate"] = auth_ctx.speculate_command() + speculative_authenticate = auth_ctx.speculate_command() + if speculative_authenticate is not None: + cmd["speculativeAuthenticate"] = speculative_authenticate else: auth_ctx = None From 6a04fe2c91efe4251b924a5f32a3b40e80547adc Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 14 Jun 2023 16:11:26 -0700 Subject: [PATCH 0411/1588] PYTHON-3702 Stop using utcnow and utcfromtimestamp changelog update (#1235) --- doc/changelog.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index 3d03a6f386..e0e316e5b6 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -16,6 +16,9 @@ Changes in Version 4.4 - pymongocrypt 1.6.0 or later is now required for :ref:`In-Use Encryption` support. MongoDB Server 7.0 introduced a backwards breaking change to the QE protocol. Users taking advantage of the Queryable Encryption beta must now upgrade to MongoDB 7.0+ and PyMongo 4.4+. +- Previously, PyMongo's docs recommended using :meth:`datetime.datetime.utcnow` and :meth:`datetime.datetime.utcfromtimestamp`. utcnow and utcfromtimestamp are deprecated in Python 3.12, for reasons explained `in this Github issue`_. Instead, users should use :meth:`datetime.datetime.now(tz=timezone.utc)` and :meth:`datetime.datetime.fromtimestamp(tz=timezone.utc)` instead. + +.. _in this Github issue: https://github.com/python/cpython/issues/103857 Issues Resolved ............... From e27e710184d91c34eefad80cc3b9e412bf926ae8 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 15 Jun 2023 08:57:18 -0700 Subject: [PATCH 0412/1588] PYTHON-3736 Add Noah to code owners for PyMongo, Motor, and PyMongoArrow (#1237) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 15a41b6ce6..3be0c9b0d1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,2 @@ # Global owner for repo -* @blink1073 @juliusgeo @ShaneHarvey +* @blink1073 @NoahStapp @ShaneHarvey From bcfdd200c3987ce5606bf7cd4408062e9d741c0d Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 15 Jun 2023 08:57:50 -0700 Subject: [PATCH 0413/1588] PYTHON-3702 bson datetime utc import cleanup (#1233) --- bson/datetime_ms.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py index 5fc8b70328..c422d6e379 100644 --- a/bson/datetime_ms.py +++ b/bson/datetime_ms.py @@ -26,7 +26,7 @@ from bson.tz_util import utc EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) -EPOCH_NAIVE = datetime.datetime.fromtimestamp(0, tz=datetime.timezone.utc).replace(tzinfo=None) +EPOCH_NAIVE = EPOCH_AWARE.replace(tzinfo=None) class DatetimeMS: From f7874fb110851b16a70ac611a5a016467988becd Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Thu, 15 Jun 2023 11:54:20 -0700 Subject: [PATCH 0414/1588] PYTHON-2287 Improve error message for invalid boolean option (#1236) --- bson/codec_options.py | 2 +- doc/contributors.rst | 1 + pymongo/common.py | 9 +-------- pymongo/pyopenssl_context.py | 7 +++---- pymongo/write_concern.py | 14 ++++++++++---- test/test_common.py | 7 +++++++ 6 files changed, 23 insertions(+), 17 deletions(-) diff --git a/bson/codec_options.py b/bson/codec_options.py index a0bdd0eeb9..45860fa705 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -397,7 +397,7 @@ def __new__( "subclass of collections.abc.MutableMapping" ) if not isinstance(tz_aware, bool): - raise TypeError("tz_aware must be True or False") + raise TypeError(f"tz_aware must be True or False, was: tz_aware={tz_aware}") if uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError( "uuid_representation must be a value from bson.binary.UuidRepresentation" diff --git a/doc/contributors.rst b/doc/contributors.rst index 17ae4784e2..e6d5e5310d 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -96,3 +96,4 @@ The following is a list of people who have contributed to - Jean-Christophe Fillion-Robin (jcfr) - Sean Cheah (thalassemia) - Dainis Gorbunovs (DainisGorbunovs) +- Iris Ho (sleepyStick) diff --git a/pymongo/common.py b/pymongo/common.py index 82c773695a..15a4c6f227 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -50,7 +50,7 @@ from pymongo.read_concern import ReadConcern from pymongo.read_preferences import _MONGOS_MODES, _ServerMode from pymongo.server_api import ServerApi -from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern, validate_boolean ORDERED_TYPES: Sequence[Type] = (SON, OrderedDict) @@ -170,13 +170,6 @@ def raise_config_error(key: str, dummy: Any) -> NoReturn: } -def validate_boolean(option: str, value: Any) -> bool: - """Validates that 'value' is True or False.""" - if isinstance(value, bool): - return value - raise TypeError(f"{option} must be True or False") - - def validate_boolean_or_string(option: str, value: Any) -> bool: """Validates that value is True, False, 'true', or 'false'.""" if isinstance(value, str): diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 83d8f853ef..d6762bcaa2 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -37,6 +37,7 @@ from pymongo.ocsp_support import _load_trusted_ca_certs, _ocsp_callback from pymongo.socket_checker import SocketChecker as _SocketChecker from pymongo.socket_checker import _errno_from_exception +from pymongo.write_concern import validate_boolean try: import certifi @@ -228,8 +229,7 @@ def __get_check_hostname(self): return self._check_hostname def __set_check_hostname(self, value): - if not isinstance(value, bool): - raise TypeError("check_hostname must be True or False") + validate_boolean("check_hostname", value) self._check_hostname = value check_hostname = property(__get_check_hostname, __set_check_hostname) @@ -238,8 +238,7 @@ def __get_check_ocsp_endpoint(self): return self._callback_data.check_ocsp_endpoint def __set_check_ocsp_endpoint(self, value): - if not isinstance(value, bool): - raise TypeError("check_ocsp must be True or False") + validate_boolean("check_ocsp", value) self._callback_data.check_ocsp_endpoint = value check_ocsp_endpoint = property(__get_check_ocsp_endpoint, __set_check_ocsp_endpoint) diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index 25f87954b5..d62c3c3117 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -19,6 +19,14 @@ from pymongo.errors import ConfigurationError +# Moved here to avoid a circular import. +def validate_boolean(option: str, value: Any) -> bool: + """Validates that 'value' is True or False.""" + if isinstance(value, bool): + return value + raise TypeError(f"{option} must be True or False, was: {option}={value}") + + class WriteConcern: """WriteConcern @@ -65,13 +73,11 @@ def __init__( self.__document["wtimeout"] = wtimeout if j is not None: - if not isinstance(j, bool): - raise TypeError("j must be True or False") + validate_boolean("j", j) self.__document["j"] = j if fsync is not None: - if not isinstance(fsync, bool): - raise TypeError("fsync must be True or False") + validate_boolean("fsync", fsync) if j and fsync: raise ConfigurationError("Can't set both j and fsync at the same time") self.__document["fsync"] = fsync diff --git a/test/test_common.py b/test/test_common.py index 76367ffa0c..f1769cb214 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -165,6 +165,13 @@ def test_mongo_client(self): self.assertEqual(direct, direct2) self.assertFalse(direct != direct2) + def test_validate_boolean(self): + self.db.test.update_one({}, {"$set": {"total": 1}}, upsert=True) + with self.assertRaisesRegex( + TypeError, "upsert must be True or False, was: upsert={'upsert': True}" + ): + self.db.test.update_one({}, {"$set": {"total": 1}}, {"upsert": True}) # type: ignore + if __name__ == "__main__": unittest.main() From d86fb9496a6418d42c13d9caa0946dfb4e42df5f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 15 Jun 2023 14:07:48 -0500 Subject: [PATCH 0415/1588] PYTHON-3519 Skip test_pool_paused_error_is_retryable on PyPy for now (#1238) --- test/test_retryable_reads.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index ee12c524c9..97c51cd44f 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -162,6 +162,9 @@ class TestPoolPausedError(IntegrationTest): @client_context.require_failCommand_blockConnection @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) def test_pool_paused_error_is_retryable(self): + if "PyPy" in sys.version: + # Tracked in PYTHON-3519 + self.skipTest("Test is flakey on PyPy") cmap_listener = CMAPListener() cmd_listener = OvertCommandListener() client = rs_or_single_client(maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener]) From ada1280ad30ad5cd771b9a3c537ec3b4f83e3ba8 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 15 Jun 2023 14:08:13 -0500 Subject: [PATCH 0416/1588] PYTHON-3011 Skip test_connections_are_only_returned_once on PyPy for now (#1239) --- test/test_load_balancer.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index df68b3e626..9a824bbaf4 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -37,6 +37,9 @@ class TestLB(IntegrationTest): RUN_ON_SERVERLESS = True def test_connections_are_only_returned_once(self): + if "PyPy" in sys.version: + # Tracked in PYTHON-3011 + self.skipTest("Test is flaky on PyPy") pool = get_pool(self.client) nconns = len(pool.sockets) self.db.test.find_one({}) From 2cfebf52cdd7d35cc2502aef90d262c28b684817 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 15 Jun 2023 12:34:02 -0700 Subject: [PATCH 0417/1588] PYTHON-3706 Skip flaky test on Windows/macOS (#1241) --- test/test_encryption.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/test_encryption.py b/test/test_encryption.py index 314b8dfbbe..0b9087359e 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -639,6 +639,11 @@ def get_object_name(self, op): def maybe_skip_scenario(self, test): super().maybe_skip_scenario(test) desc = test["description"].lower() + if ( + "timeoutms applied to listcollections to get collection schema" in desc + and sys.platform in ("win32", "darwin") + ): + self.skipTest("PYTHON-3706 flaky test on Windows/macOS") if "type=symbol" in desc: self.skipTest("PyMongo does not support the symbol type") From 601d1ec3a13434761bfd9997cc8ee6633c3d9f93 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 15 Jun 2023 12:54:29 -0700 Subject: [PATCH 0418/1588] PYTHON-3737 Use __future__ annotations for forward reference type hints (#1234) --- pymongo/change_stream.py | 5 ++- pymongo/client_session.py | 6 ++- pymongo/collection.py | 71 ++++++++++++++++++----------------- pymongo/command_cursor.py | 11 +++--- pymongo/cursor.py | 14 ++++--- pymongo/database.py | 44 +++++++++++----------- pymongo/encryption_options.py | 3 +- pymongo/mongo_client.py | 11 +++--- pymongo/monitoring.py | 18 +++++---- test/test_comment.py | 5 ++- 10 files changed, 101 insertions(+), 87 deletions(-) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index c53f981188..3a4d968c18 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -13,6 +13,7 @@ # permissions and limitations under the License. """Watch changes on a collection, a database, or the entire cluster.""" +from __future__ import annotations import copy from typing import TYPE_CHECKING, Any, Dict, Generic, Mapping, Optional, Union @@ -96,7 +97,7 @@ class ChangeStream(Generic[_DocumentType]): def __init__( self, target: Union[ - "MongoClient[_DocumentType]", "Database[_DocumentType]", "Collection[_DocumentType]" + MongoClient[_DocumentType], Database[_DocumentType], Collection[_DocumentType] ], pipeline: Optional[_Pipeline], full_document: Optional[str], @@ -105,7 +106,7 @@ def __init__( batch_size: Optional[int], collation: Optional[_CollationIn], start_at_operation_time: Optional[Timestamp], - session: Optional["ClientSession"], + session: Optional[ClientSession], start_after: Optional[Mapping[str, Any]], comment: Optional[Any] = None, full_document_before_change: Optional[str] = None, diff --git a/pymongo/client_session.py b/pymongo/client_session.py index dbc5f3aa8d..08d9f03bb5 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -133,6 +133,8 @@ ======= """ +from __future__ import annotations + import collections import time import uuid @@ -478,7 +480,7 @@ class ClientSession: def __init__( self, - client: "MongoClient", + client: MongoClient, server_session: Any, options: SessionOptions, implicit: bool, @@ -524,7 +526,7 @@ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self._end_session(lock=True) @property - def client(self) -> "MongoClient": + def client(self) -> MongoClient: """The :class:`~pymongo.mongo_client.MongoClient` this session was created from. """ diff --git a/pymongo/collection.py b/pymongo/collection.py index 3b9001240e..428b1b0931 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -13,6 +13,7 @@ # limitations under the License. """Collection level utilities for Mongo.""" +from __future__ import annotations from collections import abc from typing import ( @@ -114,14 +115,14 @@ class Collection(common.BaseObject, Generic[_DocumentType]): def __init__( self, - database: "Database[_DocumentType]", + database: Database[_DocumentType], name: str, create: Optional[bool] = False, - codec_options: Optional["CodecOptions[_DocumentTypeArg]"] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, - read_concern: Optional["ReadConcern"] = None, - session: Optional["ClientSession"] = None, + read_concern: Optional[ReadConcern] = None, + session: Optional[ClientSession] = None, **kwargs: Any, ) -> None: """Get / create a Mongo collection. @@ -335,7 +336,7 @@ def __create( session=session, ) - def __getattr__(self, name: str) -> "Collection[_DocumentType]": + def __getattr__(self, name: str) -> Collection[_DocumentType]: """Get a sub-collection of this collection by name. Raises InvalidName if an invalid collection name is used. @@ -351,7 +352,7 @@ def __getattr__(self, name: str) -> "Collection[_DocumentType]": ) return self.__getitem__(name) - def __getitem__(self, name: str) -> "Collection[_DocumentType]": + def __getitem__(self, name: str) -> Collection[_DocumentType]: return Collection( self.__database, f"{self.__name}.{name}", @@ -397,7 +398,7 @@ def name(self) -> str: return self.__name @property - def database(self) -> "Database[_DocumentType]": + def database(self) -> Database[_DocumentType]: """The :class:`~pymongo.database.Database` that this :class:`Collection` is a part of. """ @@ -405,11 +406,11 @@ def database(self) -> "Database[_DocumentType]": def with_options( self, - codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, - read_concern: Optional["ReadConcern"] = None, - ) -> "Collection[_DocumentType]": + read_concern: Optional[ReadConcern] = None, + ) -> Collection[_DocumentType]: """Get a clone of this collection changing the specified settings. >>> coll1.read_preference @@ -455,7 +456,7 @@ def bulk_write( requests: Sequence[_WriteOp[_DocumentType]], ordered: bool = True, bypass_document_validation: bool = False, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, let: Optional[Mapping] = None, ) -> BulkWriteResult: @@ -585,7 +586,7 @@ def insert_one( self, document: Union[_DocumentType, RawBSONDocument], bypass_document_validation: bool = False, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, ) -> InsertOneResult: """Insert a single document. @@ -653,7 +654,7 @@ def insert_many( documents: Iterable[Union[_DocumentType, RawBSONDocument]], ordered: bool = True, bypass_document_validation: bool = False, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, ) -> InsertManyResult: """Insert an iterable of documents. @@ -855,7 +856,7 @@ def replace_one( bypass_document_validation: bool = False, collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> UpdateResult: @@ -959,7 +960,7 @@ def update_one( collation: Optional[_CollationIn] = None, array_filters: Optional[Sequence[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> UpdateResult: @@ -1073,7 +1074,7 @@ def update_many( bypass_document_validation: Optional[bool] = None, collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> UpdateResult: @@ -1168,7 +1169,7 @@ def update_many( def drop( self, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, encrypted_fields: Optional[Mapping[str, Any]] = None, ) -> None: @@ -1306,7 +1307,7 @@ def delete_one( filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> DeleteResult: @@ -1373,7 +1374,7 @@ def delete_many( filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> DeleteResult: @@ -1769,7 +1770,7 @@ def _cmd(session, server, sock_info, read_preference): def count_documents( self, filter: Mapping[str, Any], - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> int: @@ -1860,7 +1861,7 @@ def _retryable_non_cursor_read(self, func, session): def create_indexes( self, indexes: Sequence[IndexModel], - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> List[str]: @@ -1952,7 +1953,7 @@ def gen_indexes(): def create_index( self, keys: _IndexKeyHint, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> str: @@ -2071,7 +2072,7 @@ def create_index( def drop_indexes( self, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> None: @@ -2107,7 +2108,7 @@ def drop_indexes( def drop_index( self, index_or_name: _IndexKeyHint, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> None: @@ -2174,7 +2175,7 @@ def drop_index( def list_indexes( self, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, ) -> CommandCursor[MutableMapping[str, Any]]: """Get a cursor over the index documents for this collection. @@ -2239,7 +2240,7 @@ def _cmd(session, server, sock_info, read_preference): def index_information( self, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, ) -> MutableMapping[str, Any]: """Get information on this collection's indexes. @@ -2282,7 +2283,7 @@ def index_information( def options( self, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, ) -> MutableMapping[str, Any]: """Get the options set on this collection. @@ -2361,7 +2362,7 @@ def _aggregate( def aggregate( self, pipeline: _Pipeline, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, **kwargs: Any, @@ -2458,7 +2459,7 @@ def aggregate( def aggregate_raw_batches( self, pipeline: _Pipeline, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> RawBatchCursor[_DocumentType]: @@ -2509,7 +2510,7 @@ def watch( batch_size: Optional[int] = None, collation: Optional[_CollationIn] = None, start_at_operation_time: Optional[Timestamp] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, start_after: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, full_document_before_change: Optional[str] = None, @@ -2644,7 +2645,7 @@ def watch( def rename( self, new_name: str, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> MutableMapping[str, Any]: @@ -2709,7 +2710,7 @@ def distinct( self, key: str, filter: Optional[Mapping[str, Any]] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> List: @@ -2860,7 +2861,7 @@ def find_one_and_delete( projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, sort: Optional[_IndexList] = None, hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, **kwargs: Any, @@ -2953,7 +2954,7 @@ def find_one_and_replace( upsert: bool = False, return_document: bool = ReturnDocument.BEFORE, hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, **kwargs: Any, @@ -3062,7 +3063,7 @@ def find_one_and_update( return_document: bool = ReturnDocument.BEFORE, array_filters: Optional[Sequence[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, **kwargs: Any, diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index d57b45154d..c831dfb49b 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -13,6 +13,7 @@ # limitations under the License. """CommandCursor class to iterate over command results.""" +from __future__ import annotations from collections import deque from typing import TYPE_CHECKING, Any, Generic, Iterator, Mapping, NoReturn, Optional @@ -36,12 +37,12 @@ class CommandCursor(Generic[_DocumentType]): def __init__( self, - collection: "Collection[_DocumentType]", + collection: Collection[_DocumentType], cursor_info: Mapping[str, Any], address: Optional[_Address], batch_size: int = 0, max_await_time_ms: Optional[int] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, explicit_session: bool = False, comment: Any = None, ) -> None: @@ -267,7 +268,7 @@ def address(self) -> Optional[_Address]: return self.__address @property - def session(self) -> Optional["ClientSession"]: + def session(self) -> Optional[ClientSession]: """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. .. versionadded:: 3.6 @@ -312,12 +313,12 @@ class RawBatchCommandCursor(CommandCursor, Generic[_DocumentType]): def __init__( self, - collection: "Collection[_DocumentType]", + collection: Collection[_DocumentType], cursor_info: Mapping[str, Any], address: Optional[_Address], batch_size: int = 0, max_await_time_ms: Optional[int] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, explicit_session: bool = False, comment: Any = None, ) -> None: diff --git a/pymongo/cursor.py b/pymongo/cursor.py index cc4e1a1146..8d131a711e 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -13,6 +13,8 @@ # limitations under the License. """Cursor class to iterate over Mongo query results.""" +from __future__ import annotations + import copy import warnings from collections import deque @@ -163,7 +165,7 @@ class Cursor(Generic[_DocumentType]): def __init__( self, - collection: "Collection[_DocumentType]", + collection: Collection[_DocumentType], filter: Optional[Mapping[str, Any]] = None, projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, skip: int = 0, @@ -184,7 +186,7 @@ def __init__( show_record_id: Optional[bool] = None, snapshot: Optional[bool] = None, comment: Optional[Any] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, allow_disk_use: Optional[bool] = None, let: Optional[bool] = None, ) -> None: @@ -202,7 +204,7 @@ def __init__( self.__exhaust = False self.__sock_mgr: Any = None self.__killed = False - self.__session: Optional["ClientSession"] + self.__session: Optional[ClientSession] if session: self.__session = session @@ -312,7 +314,7 @@ def __init__( self.__collname = collection.name @property - def collection(self) -> "Collection[_DocumentType]": + def collection(self) -> Collection[_DocumentType]: """The :class:`~pymongo.collection.Collection` that this :class:`Cursor` is iterating. """ @@ -1230,7 +1232,7 @@ def address(self) -> Optional[Tuple[str, Any]]: return self.__address @property - def session(self) -> Optional["ClientSession"]: + def session(self) -> Optional[ClientSession]: """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. .. versionadded:: 3.6 @@ -1313,7 +1315,7 @@ class RawBatchCursor(Cursor, Generic[_DocumentType]): _query_class = _RawBatchQuery _getmore_class = _RawBatchGetMore - def __init__(self, collection: "Collection[_DocumentType]", *args: Any, **kwargs: Any) -> None: + def __init__(self, collection: Collection[_DocumentType], *args: Any, **kwargs: Any) -> None: """Create a new cursor / iterator over raw batches of BSON data. Should not be called directly by application developers - diff --git a/pymongo/database.py b/pymongo/database.py index 66cfce2090..1fa9913c60 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -13,6 +13,8 @@ # limitations under the License. """Database level operations.""" +from __future__ import annotations + from copy import deepcopy from typing import ( TYPE_CHECKING, @@ -74,10 +76,10 @@ def __init__( self, client: "MongoClient[_DocumentType]", name: str, - codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, - write_concern: Optional["WriteConcern"] = None, - read_concern: Optional["ReadConcern"] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, ) -> None: """Get a database by client and name. @@ -154,10 +156,10 @@ def name(self) -> str: def with_options( self, - codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, - write_concern: Optional["WriteConcern"] = None, - read_concern: Optional["ReadConcern"] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, ) -> "Database[_DocumentType]": """Get a clone of this database changing the specified settings. @@ -241,10 +243,10 @@ def __getitem__(self, name: str) -> "Collection[_DocumentType]": def get_collection( self, name: str, - codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, - write_concern: Optional["WriteConcern"] = None, - read_concern: Optional["ReadConcern"] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, ) -> Collection[_DocumentType]: """Get a :class:`~pymongo.collection.Collection` with the given name and options. @@ -319,11 +321,11 @@ def _get_encrypted_fields(self, kwargs, coll_name, ask_db): def create_collection( self, name: str, - codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, - write_concern: Optional["WriteConcern"] = None, - read_concern: Optional["ReadConcern"] = None, - session: Optional["ClientSession"] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + session: Optional[ClientSession] = None, check_exists: Optional[bool] = True, **kwargs: Any, ) -> Collection[_DocumentType]: @@ -472,7 +474,7 @@ def create_collection( ) def aggregate( - self, pipeline: _Pipeline, session: Optional["ClientSession"] = None, **kwargs: Any + self, pipeline: _Pipeline, session: Optional[ClientSession] = None, **kwargs: Any ) -> CommandCursor[_DocumentType]: """Perform a database-level aggregation. @@ -557,7 +559,7 @@ def watch( batch_size: Optional[int] = None, collation: Optional[_CollationIn] = None, start_at_operation_time: Optional[Timestamp] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, start_after: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, full_document_before_change: Optional[str] = None, @@ -720,7 +722,7 @@ def command( allowable_errors: Optional[Sequence[Union[str, int]]] = None, read_preference: Optional[_ServerMode] = None, codec_options: "Optional[bson.codec_options.CodecOptions[_CodecDocumentType]]" = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> _CodecDocumentType: @@ -883,7 +885,7 @@ def _list_collections(self, sock_info, session, read_preference, **kwargs): def list_collections( self, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, filter: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, **kwargs: Any, @@ -924,7 +926,7 @@ def _cmd(session, server, sock_info, read_preference): def list_collection_names( self, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, filter: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, **kwargs: Any, @@ -989,7 +991,7 @@ def _drop_helper(self, name, session=None, comment=None): def drop_collection( self, name_or_collection: Union[str, Collection[_DocumentTypeArg]], - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, encrypted_fields: Optional[Mapping[str, Any]] = None, ) -> Dict[str, Any]: @@ -1069,7 +1071,7 @@ def validate_collection( name_or_collection: Union[str, Collection[_DocumentTypeArg]], scandata: bool = False, full: bool = False, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, background: Optional[bool] = None, comment: Optional[Any] = None, ) -> Dict[str, Any]: @@ -1165,7 +1167,7 @@ def __bool__(self) -> NoReturn: def dereference( self, dbref: DBRef, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> Optional[_DocumentType]: diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 285b082a7d..d6f3ca6835 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -13,6 +13,7 @@ # limitations under the License. """Support for automatic client-side field level encryption.""" +from __future__ import annotations from typing import TYPE_CHECKING, Any, List, Mapping, Optional @@ -38,7 +39,7 @@ def __init__( self, kms_providers: Mapping[str, Any], key_vault_namespace: str, - key_vault_client: Optional["MongoClient"] = None, + key_vault_client: Optional[MongoClient] = None, schema_map: Optional[Mapping[str, Any]] = None, bypass_auto_encryption: bool = False, mongocryptd_uri: str = "mongodb://localhost:27020", diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ccfaaa31c1..871c4545e5 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -30,6 +30,7 @@ >>> c["test-database"] Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test-database') """ +from __future__ import annotations import contextlib import os @@ -1762,7 +1763,7 @@ def _ensure_session(self, session=None): @contextlib.contextmanager def _tmp_session( self, session: Optional[client_session.ClientSession], close: bool = True - ) -> "Generator[Optional[client_session.ClientSession], None, None]": + ) -> Generator[Optional[client_session.ClientSession], None, None]: """If provided session is None, lend a temporary session.""" if session is not None: if not isinstance(session, client_session.ClientSession): @@ -1939,10 +1940,10 @@ def drop_database( def get_default_database( self, default: Optional[str] = None, - codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, - read_concern: Optional["ReadConcern"] = None, + read_concern: Optional[ReadConcern] = None, ) -> database.Database[_DocumentType]: """Get the database named in the MongoDB connection URI. @@ -2000,10 +2001,10 @@ def get_default_database( def get_database( self, name: Optional[str] = None, - codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, - read_concern: Optional["ReadConcern"] = None, + read_concern: Optional[ReadConcern] = None, ) -> database.Database[_DocumentType]: """Get a :class:`~pymongo.database.Database` with the given name and options. diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 391ca13540..24ac7f06bc 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -183,6 +183,8 @@ def connection_checked_in(self, event): handler first. """ +from __future__ import annotations + import datetime from collections import abc, namedtuple from typing import TYPE_CHECKING, Any, Dict, Optional @@ -1128,8 +1130,8 @@ class ServerDescriptionChangedEvent(_ServerEvent): def __init__( self, - previous_description: "ServerDescription", - new_description: "ServerDescription", + previous_description: ServerDescription, + new_description: ServerDescription, *args: Any, ) -> None: super().__init__(*args) @@ -1137,14 +1139,14 @@ def __init__( self.__new_description = new_description @property - def previous_description(self) -> "ServerDescription": + def previous_description(self) -> ServerDescription: """The previous :class:`~pymongo.server_description.ServerDescription`. """ return self.__previous_description @property - def new_description(self) -> "ServerDescription": + def new_description(self) -> ServerDescription: """The new :class:`~pymongo.server_description.ServerDescription`. """ @@ -1204,8 +1206,8 @@ class TopologyDescriptionChangedEvent(TopologyEvent): def __init__( self, - previous_description: "TopologyDescription", - new_description: "TopologyDescription", + previous_description: TopologyDescription, + new_description: TopologyDescription, *args: Any, ) -> None: super().__init__(*args) @@ -1213,14 +1215,14 @@ def __init__( self.__new_description = new_description @property - def previous_description(self) -> "TopologyDescription": + def previous_description(self) -> TopologyDescription: """The previous :class:`~pymongo.topology_description.TopologyDescription`. """ return self.__previous_description @property - def new_description(self) -> "TopologyDescription": + def new_description(self) -> TopologyDescription: """The new :class:`~pymongo.topology_description.TopologyDescription`. """ diff --git a/test/test_comment.py b/test/test_comment.py index ea44c74257..baac68be58 100644 --- a/test/test_comment.py +++ b/test/test_comment.py @@ -14,9 +14,10 @@ """Test the keyword argument 'comment' in various helpers.""" +from __future__ import annotations + import inspect import sys -from typing import Any, Union sys.path[0:0] = [""] @@ -69,7 +70,7 @@ def _test_ops( "signature of function %s" % (h.__name__), ) self.assertEqual( - inspect.signature(h).parameters["comment"].annotation, Union[Any, None] + inspect.signature(h).parameters["comment"].annotation, "Optional[Any]" ) if isinstance(maybe_cursor, CommandCursor): maybe_cursor.close() From 37202c0db1e8c282b62852f10f63e0c127fd88f7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 16 Jun 2023 14:31:26 -0500 Subject: [PATCH 0419/1588] PYTHON-3726 Migrate off of Ubuntu in EG Builds (#1227) --- .evergreen/config.yml | 106 ++++++++++++++++++------------------------ 1 file changed, 45 insertions(+), 61 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index c3e8a3d1f3..df126f57a1 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1372,7 +1372,7 @@ tasks: - name: "release-combine" tags: ["release_tag"] - run_on: ubuntu2004-small + run_on: rhel84-small depends_on: - name: "*" variant: ".release_tag" @@ -2218,7 +2218,8 @@ tasks: shell: "bash" script: | ${PREPARE_SHELL} - export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu2004-64/master/latest/libmongocrypt.tar.gz + export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 + export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz SUCCESS=false TEST_FLE_GCP_AUTO=1 ./.evergreen/run-tests.sh - name: testazurekms-task @@ -2272,10 +2273,10 @@ tasks: set -o errexit ${PREPARE_SHELL} cd src - PYTHON_BINARY= + PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 \ KEY_NAME='${testazurekms_keyname}' \ KEY_VAULT_ENDPOINT='${testazurekms_keyvaultendpoint}' \ - LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu2004-64/master/latest/libmongocrypt.tar.gz \ + LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz \ SUCCESS=false TEST_FLE_AZURE_AUTO=1 \ ./.evergreen/run-tests.sh @@ -2322,17 +2323,19 @@ axes: skip_web_identity_auth_test: true python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - - id: rhel76 - display_name: "RHEL 7.6" + - id: rhel7 + display_name: "RHEL 7.x" run_on: rhel76-small batchtime: 10080 # 7 days variables: + python3_binary: "/opt/python/3.8/bin/python3" libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz - - id: rhel84 - display_name: "RHEL 8.4" + - id: rhel8 + display_name: "RHEL 8.x" run_on: rhel84-small batchtime: 10080 # 7 days variables: + python3_binary: "/opt/python/3.8/bin/python3" libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-80-64-bit/master/latest/libmongocrypt.tar.gz - id: rhel80-fips display_name: "RHEL 8.0 FIPS" @@ -2815,9 +2818,9 @@ buildvariants: tasks: - ".6.0" -- matrix_name: "tests-python-version-rhel8.4-test-ssl" +- matrix_name: "tests-python-version-rhel8-test-ssl" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" auth-ssl: "*" coverage: "*" @@ -2835,14 +2838,14 @@ buildvariants: - matrix_name: "tests-pyopenssl" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" auth: "*" ssl: "ssl" pyopenssl: "*" # Only test "noauth" with Python 3.7. exclude_spec: - platform: rhel84 + platform: rhel8 python-version: ["3.8", "3.9", "3.10", "pypy3.7", "pypy3.8"] auth: "noauth" ssl: "ssl" @@ -2874,9 +2877,9 @@ buildvariants: tasks: - '.replica_set' -- matrix_name: "tests-python-version-rhel84-test-encryption" +- matrix_name: "tests-python-version-rhel8-test-encryption" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" auth-ssl: noauth-nossl # TODO: dependency error for 'coverage-report' task: @@ -2888,22 +2891,22 @@ buildvariants: rules: - if: encryption: ["encryption", "encryption_crypt_shared"] - platform: rhel84 + platform: rhel8 auth-ssl: noauth-nossl python-version: "*" then: add_tasks: *encryption-server-versions -- matrix_name: "tests-python-version-rhel84-without-c-extensions" +- matrix_name: "tests-python-version-rhel8-without-c-extensions" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" c-extensions: without-c-extensions auth-ssl: noauth-nossl coverage: "*" exclude_spec: # These interpreters are always tested without extensions. - - platform: rhel84 + - platform: rhel8 python-version: ["pypy3.7", "pypy3.8"] c-extensions: "*" auth-ssl: "*" @@ -2911,15 +2914,15 @@ buildvariants: display_name: "${c-extensions} ${python-version} ${platform} ${auth} ${ssl} ${coverage}" tasks: *all-server-versions -- matrix_name: "tests-python-version-rhel84-compression" +- matrix_name: "tests-python-version-rhel8-compression" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" c-extensions: "*" compression: "*" exclude_spec: # These interpreters are always tested without extensions. - - platform: rhel84 + - platform: rhel8 python-version: ["pypy3.7", "pypy3.8"] c-extensions: "with-c-extensions" compression: "*" @@ -2940,36 +2943,21 @@ buildvariants: - "test-4.0-standalone" - "test-3.6-standalone" -- matrix_name: "tests-python-version-green-framework-rhel84" +- matrix_name: "tests-python-version-green-framework-rhel8" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" green-framework: "*" auth-ssl: "*" exclude_spec: # Don't test green frameworks on these Python versions. - - platform: rhel84 - python-version: ["pypy3.7", "pypy3.8", "3.11"] + - platform: rhel8 + python-version: ["pypy3.7", "pypy3.8"] green-framework: "*" auth-ssl: "*" display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" tasks: *all-server-versions -- matrix_name: "tests-python-version-green-framework-ubuntu20" - matrix_spec: - platform: ubuntu-20.04 - python-version: ["3.11"] - green-framework: "*" - auth-ssl: "*" - display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" - tasks: - - ".rapid" - - ".latest" - - ".7.0" - - ".6.0" - - ".5.0" - - ".4.4" - - matrix_name: "tests-windows-python-version" matrix_spec: platform: windows-64-vsMulti-small @@ -2988,7 +2976,7 @@ buildvariants: - matrix_name: "tests-python-version-supports-openssl-102-test-ssl" matrix_spec: - platform: rhel76 + platform: rhel7 # Python 3.10+ requires OpenSSL 1.1.1+ python-version: ["3.7", "3.8", "3.9", "pypy3.7", "pypy3.8"] auth-ssl: "*" @@ -3016,13 +3004,13 @@ buildvariants: # Storage engine tests on RHEL 8.4 (x86_64) with Python 3.7. - matrix_name: "tests-storage-engines" matrix_spec: - platform: rhel84 + platform: rhel8 storage-engine: "*" python-version: 3.7 display_name: "Storage ${storage-engine} ${python-version} ${platform}" rules: - if: - platform: rhel84 + platform: rhel8 storage-engine: ["inmemory"] python-version: "*" then: @@ -3037,7 +3025,7 @@ buildvariants: - "test-3.6-standalone" - if: # MongoDB 4.2 drops support for MMAPv1 - platform: rhel84 + platform: rhel8 storage-engine: ["mmapv1"] python-version: "*" then: @@ -3050,7 +3038,7 @@ buildvariants: # enableTestCommands=0 tests on RHEL 8.4 (x86_64) with Python 3.7. - matrix_name: "test-disableTestCommands" matrix_spec: - platform: rhel84 + platform: rhel8 disableTestCommands: "*" python-version: "3.7" display_name: "Disable test commands ${python-version} ${platform}" @@ -3059,7 +3047,7 @@ buildvariants: - matrix_name: "test-linux-enterprise-auth" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" auth: "auth" display_name: "Enterprise ${auth} ${platform} ${python-version}" @@ -3092,7 +3080,7 @@ buildvariants: - matrix_name: "mockupdb-tests" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: 3.7 display_name: "MockupDB Tests" tasks: @@ -3100,7 +3088,7 @@ buildvariants: - matrix_name: "tests-doctests" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: ["3.8"] display_name: "Doctests ${python-version} ${platform}" tasks: @@ -3126,7 +3114,7 @@ buildvariants: - matrix_name: "atlas-connect" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" display_name: "Atlas connect ${python-version} ${platform}" tasks: @@ -3134,7 +3122,7 @@ buildvariants: - matrix_name: "serverless" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" auth-ssl: auth-ssl serverless: "*" @@ -3144,7 +3132,7 @@ buildvariants: - matrix_name: "data-lake-spec-tests" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: ["3.7", "3.10"] auth: "auth" c-extensions: "*" @@ -3154,7 +3142,7 @@ buildvariants: - matrix_name: "stable-api-tests" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: ["3.7", "3.10"] auth: "auth" versionedApi: "*" @@ -3167,7 +3155,7 @@ buildvariants: - matrix_name: "ocsp-test" matrix_spec: - platform: ubuntu-20.04 + platform: rhel8 python-version: ["3.7", "3.10", "pypy3.7", "pypy3.8"] mongodb-version: ["4.4", "5.0", "6.0", "7.0", "latest"] auth: "noauth" @@ -3204,7 +3192,7 @@ buildvariants: - matrix_name: "oidc-auth-test" matrix_spec: - platform: [ rhel84 ] + platform: [ rhel8 ] python-version: ["3.9"] display_name: "MONGODB-OIDC Auth ${platform} ${python-version}" tasks: @@ -3251,7 +3239,7 @@ buildvariants: - matrix_name: "load-balancer" matrix_spec: - platform: rhel84 + platform: rhel8 mongodb-version: ["6.0", "7.0", "rapid", "latest"] auth-ssl: "*" python-version: "*" @@ -3263,7 +3251,7 @@ buildvariants: - name: testgcpkms-variant display_name: "GCP KMS" run_on: - - ubuntu2004-small + - debian10-small tasks: - name: testgcpkms_task_group batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README @@ -3271,7 +3259,7 @@ buildvariants: - name: testazurekms-variant display_name: "Azure KMS" - run_on: ubuntu2004-small + run_on: debian10-small tasks: - name: testazurekms_task_group batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README @@ -3286,10 +3274,6 @@ buildvariants: # Platform notes # i386 builds of OpenSSL or Cyrus SASL are not available - # Ubuntu16.04 ppc64le is only supported by MongoDB 3.4+ - # Ubuntu16.04 aarch64 is only supported by MongoDB 3.4+ - # Ubuntu16.04 s390x is only supported by MongoDB 3.4+ - # Ubuntu16.04 (x86) only supports MongoDB 3.2+ # Debian 8.1 only supports MongoDB 3.4+ # SUSE12 s390x is only supported by MongoDB 3.4+ # No enterprise build for Archlinux, SSL not available From 374250d5494ad8221a28f9b2c62f83b1ef16f451 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 16 Jun 2023 13:05:18 -0700 Subject: [PATCH 0420/1588] PYTHON-2963 Add tox config in preparation for migration from setup.py (#1240) --- .github/workflows/test-python.yml | 19 ++++---- tox.ini | 77 +++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+), 11 deletions(-) create mode 100644 tox.ini diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index b4a8177fda..83def93f57 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -33,13 +33,16 @@ jobs: python-version: ${{ matrix.python-version }} cache: 'pip' cache-dependency-path: 'setup.py' + - name: Install dependencies + run: | + pip install tox - name: Start MongoDB uses: supercharge/mongodb-github-action@1.7.0 with: mongodb-version: 4.4 - name: Run tests run: | - python setup.py test + tox -e test mypytest: name: Run mypy @@ -58,22 +61,16 @@ jobs: cache-dependency-path: 'setup.py' - name: Install dependencies run: | - python -m pip install -U pip mypy==1.2 - pip install -e ".[zstd, encryption, ocsp]" + pip install tox - name: Run mypy run: | - mypy --install-types --non-interactive bson gridfs tools pymongo - mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test - python -m pip install -U typing_extensions - mypy --install-types --non-interactive test/test_typing.py test/test_typing_strict.py + tox -e typecheck-mypy - name: Run pyright run: | - python -m pip install -U pip pyright==1.1.290 - pyright test/test_typing.py test/test_typing_strict.py + tox -e typecheck-pyright - name: Run pyright strict run: | - echo '{"strict": ["tests/test_typing_strict.py"]}' >> pyrightconfig.json - pyright test/test_typing_strict.py + tox -e typecheck-pyright-strict linkcheck: name: Check Links diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000000..e199914cb5 --- /dev/null +++ b/tox.ini @@ -0,0 +1,77 @@ +[tox] +requires = + tox>=4 +envlist = + # Test using the system Python. + test, + # Run pre-commit on all files. + lint, + # Run pre-commit on all files, including stages that require manual fixes. + lint-manual, + # Typecheck all files. + typecheck + +[testenv:test] +description = run unit tests +commands = + python --version + python setup.py test {posargs} + +[testenv:lint] +description = run pre-commit +deps = + pre-commit +commands = + pre-commit run --all-files + +[testenv:lint-manual] +description = run all pre-commit stages, including those that require manual fixes +deps = + pre-commit +commands = + pre-commit run --all-files --hook-stage manual + +[testenv:typecheck-mypy] +description = run mypy and pyright to typecheck +deps = + mypy + zstandard + certifi; platform_system == "win32" or platform_system == "Darwin" + typing_extensions + pyopenssl>=17.2.0 + requests<3.0.0 + service_identity>=18.1.0 + pymongocrypt>=1.6.0,<2.0.0 + pymongo-auth-aws<2.0.0 +commands = + mypy --install-types --non-interactive bson gridfs tools pymongo + mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test + mypy --install-types --non-interactive test/test_typing.py test/test_typing_strict.py + +[testenv:typecheck-pyright] +description = run pyright to typecheck +deps = + mypy + pyright==1.1.290 +commands = + pyright test/test_typing.py test/test_typing_strict.py + +[testenv:typecheck-pyright-strict] +description = run pyright with strict mode to typecheck +deps = + {[testenv:typecheck-pyright]deps} +allowlist_externals=echo +commands = + echo '{"strict": ["tests/test_typing_strict.py"]}' > pyrightconfig.json + pyright test/test_typing_strict.py + +[testenv:typecheck] +description = run mypy and pyright to typecheck +deps = + {[testenv:typecheck-mypy]deps} + {[testenv:typecheck-pyright]deps} +allowlist_externals=echo +commands = + {[testenv:typecheck-mypy]commands} + {[testenv:typecheck-pyright]commands} + {[testenv:typecheck-pyright-strict]commands} From 82d87dc173d0095faa7aef8622387debaca52b2c Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 16 Jun 2023 13:30:54 -0700 Subject: [PATCH 0421/1588] PYTHON-3744 Fix utcnow deprecation build regressions (#1244) --- doc/examples/datetimes.rst | 4 ++-- doc/tutorial.rst | 2 +- pymongo/ocsp_cache.py | 12 ++++++++++-- pymongo/ocsp_support.py | 2 +- test/test_ocsp_cache.py | 2 +- 5 files changed, 15 insertions(+), 7 deletions(-) diff --git a/doc/examples/datetimes.rst b/doc/examples/datetimes.rst index 2dc9c003eb..f9c9fa7a31 100644 --- a/doc/examples/datetimes.rst +++ b/doc/examples/datetimes.rst @@ -26,10 +26,10 @@ time into MongoDB: .. doctest:: >>> result = db.objects.insert_one( - ... {"last_modified": datetime.datetime.now(tz=timezone.utc)} + ... {"last_modified": datetime.datetime.now(tz=datetime.timezone.utc)} ... ) -Always use :meth:`datetime.datetime.now(tz=timezone.utc)`, which explicitly returns the current time in +Always use :meth:`datetime.datetime.now(tz=datetime.timezone.utc)`, which explicitly returns the current time in UTC, instead of :meth:`datetime.datetime.now`, with no arguments, which returns the current local time. Avoid doing this: diff --git a/doc/tutorial.rst b/doc/tutorial.rst index 768b535fe3..e33936363d 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -109,7 +109,7 @@ post: ... "author": "Mike", ... "text": "My first blog post!", ... "tags": ["mongodb", "python", "pymongo"], - ... "date": datetime.datetime.now(tz=timezone.utc), + ... "date": datetime.datetime.now(tz=datetime.timezone.utc), ... } Note that documents can contain native Python types (like diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py index b60a24b027..f6ac4bb08c 100644 --- a/pymongo/ocsp_cache.py +++ b/pymongo/ocsp_cache.py @@ -61,7 +61,11 @@ def __setitem__(self, key, value): return # Do nothing if the response is invalid. - if not (value.this_update <= _datetime.now(tz=timezone.utc) < value.next_update): + if not ( + value.this_update + <= _datetime.now(tz=timezone.utc).replace(tzinfo=None) + < value.next_update + ): return # Cache new response OR update cached response if new response @@ -82,7 +86,11 @@ def __getitem__(self, item): value = self._data[cache_key] # Return cached response if it is still valid. - if value.this_update <= _datetime.now(tz=timezone.utc) < value.next_update: + if ( + value.this_update + <= _datetime.now(tz=timezone.utc).replace(tzinfo=None) + < value.next_update + ): return value self._data.pop(cache_key, None) diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index dda92d0d3b..dd070748a4 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -220,7 +220,7 @@ def _verify_response(issuer, response): # Note that we are not using a "tolerance period" as discussed in # https://tools.ietf.org/rfc/rfc5019.txt? - now = _datetime.now(tz=timezone.utc) + now = _datetime.now(tz=timezone.utc).replace(tzinfo=None) # RFC6960, Section 3.2, Number 5 if response.this_update > now: _LOGGER.debug("thisUpdate is in the future") diff --git a/test/test_ocsp_cache.py b/test/test_ocsp_cache.py index 3740b6b28a..7fff4fd902 100644 --- a/test/test_ocsp_cache.py +++ b/test/test_ocsp_cache.py @@ -61,7 +61,7 @@ def _create_mock_request(self): ) def _create_mock_response(self, this_update_delta_seconds, next_update_delta_seconds): - now = datetime.now(tz=timezone.utc) + now = datetime.now(tz=timezone.utc).replace(tzinfo=None) this_update = now + timedelta(seconds=this_update_delta_seconds) if next_update_delta_seconds is not None: next_update = now + timedelta(seconds=next_update_delta_seconds) From bc66d83efc5e57c5685cdd621aa3bf74ba44f2a7 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 20 Jun 2023 09:13:23 -0700 Subject: [PATCH 0422/1588] PYTHON-3738 Use tox for sphinx doc instead of setup.py (#1245) --- .github/workflows/test-python.yml | 6 ++---- tox.ini | 35 ++++++++++++++++++++++++++++++- 2 files changed, 36 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 83def93f57..d7c442cc49 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -83,9 +83,7 @@ jobs: cache-dependency-path: 'setup.py' - name: Install dependencies run: | - python -m pip install -U pip - python -m pip install sphinx + pip install tox - name: Check links run: | - cd doc - make linkcheck + tox -e linkcheck diff --git a/tox.ini b/tox.ini index e199914cb5..46c1a697de 100644 --- a/tox.ini +++ b/tox.ini @@ -8,8 +8,20 @@ envlist = lint, # Run pre-commit on all files, including stages that require manual fixes. lint-manual, + # Typecheck using mypy. + typecheck-mypy, + # Typecheck using pyright. + typecheck-pyright, + # Typecheck using pyright strict. + typecheck-pyright-strict, # Typecheck all files. - typecheck + typecheck, + # Build sphinx docs + doc, + # Test sphinx docs + doc-test, + # Linkcheck sphinx docs + linkcheck [testenv:test] description = run unit tests @@ -75,3 +87,24 @@ commands = {[testenv:typecheck-mypy]commands} {[testenv:typecheck-pyright]commands} {[testenv:typecheck-pyright-strict]commands} + +[testenv:doc] +description = build sphinx docs +deps = + sphinx +commands = + sphinx-build -E -b html doc ./doc/_build/html + +[testenv:doc-test] +description = run sphinx doc tests +deps = + {[testenv:doc]deps} +commands = + sphinx-build -E -b doctest doc ./doc/_build/doctest + +[testenv:linkcheck] +description = check links of sphinx docs +deps = + {[testenv:doc]deps} +commands = + sphinx-build -E -b linkcheck doc ./doc/_build/linkcheck From 55a9bee81012057e90c47d08f396377c112e47bb Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 20 Jun 2023 23:20:39 -0700 Subject: [PATCH 0423/1588] BUMP 4.4 (#1251) --- doc/api/pymongo/encryption_options.rst | 4 +--- doc/changelog.rst | 27 +++++++++++++++++++------- pymongo/_version.py | 2 +- pymongo/client_options.py | 2 +- 4 files changed, 23 insertions(+), 12 deletions(-) diff --git a/doc/api/pymongo/encryption_options.rst b/doc/api/pymongo/encryption_options.rst index 08bfc157a9..b8a886ea68 100644 --- a/doc/api/pymongo/encryption_options.rst +++ b/doc/api/pymongo/encryption_options.rst @@ -3,6 +3,4 @@ .. automodule:: pymongo.encryption_options :synopsis: Support for automatic client-side field level encryption - - .. autoclass:: pymongo.encryption_options.AutoEncryptionOpts - :members: + :members: diff --git a/doc/changelog.rst b/doc/changelog.rst index e0e316e5b6..eae105b617 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -5,18 +5,28 @@ Changes in Version 4.4 ----------------------- - Added support for MongoDB 7.0. +- Added support for Python 3.11. - Added support for passing a list containing (key, direction) pairs or keys to :meth:`~pymongo.collection.Collection.create_index`. -- pymongocrypt 1.6.0 or later is now required for client side field level - encryption support. +- Improved bson encoding performance (`PYTHON-3717`_ and `PYTHON-3718`_). - Improved support for Pyright to improve typing support for IDEs like Visual Studio Code or Visual Studio. - Improved support for type-checking with MyPy "strict" mode (`--strict`). -- Added support for Python 3.11. -- pymongocrypt 1.6.0 or later is now required for :ref:`In-Use Encryption` support. MongoDB Server 7.0 introduced a backwards breaking - change to the QE protocol. Users taking advantage of the Queryable Encryption beta must now upgrade to - MongoDB 7.0+ and PyMongo 4.4+. -- Previously, PyMongo's docs recommended using :meth:`datetime.datetime.utcnow` and :meth:`datetime.datetime.utcfromtimestamp`. utcnow and utcfromtimestamp are deprecated in Python 3.12, for reasons explained `in this Github issue`_. Instead, users should use :meth:`datetime.datetime.now(tz=timezone.utc)` and :meth:`datetime.datetime.fromtimestamp(tz=timezone.utc)` instead. +- Added :meth:`~pymongo.encryption.ClientEncryption.create_encrypted_collection`, + :class:`~pymongo.errors.EncryptedCollectionError`, + :meth:`~pymongo.encryption.ClientEncryption.encrypt_expression`, + :class:`~pymongo.encryption_options.RangeOpts`, + and :attr:`~pymongo.encryption.Algorithm.RANGEPREVIEW` as part of the experimental + Queryable Encryption beta. +- pymongocrypt 1.6.0 or later is now required for :ref:`In-Use Encryption` support. MongoDB + Server 7.0 introduced a backwards breaking change to the QE protocol. Users taking + advantage of the Queryable Encryption beta must now upgrade to MongoDB 7.0+ and + PyMongo 4.4+. +- Previously, PyMongo's docs recommended using :meth:`datetime.datetime.utcnow` and + :meth:`datetime.datetime.utcfromtimestamp`. utcnow and utcfromtimestamp are deprecated + in Python 3.12, for reasons explained `in this Github issue`_. Instead, users should + use :meth:`datetime.datetime.now(tz=timezone.utc)` and + :meth:`datetime.datetime.fromtimestamp(tz=timezone.utc)` instead. .. _in this Github issue: https://github.com/python/cpython/issues/103857 @@ -28,6 +38,9 @@ in this release. .. _PyMongo 4.4 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=34354 +.. _PYTHON-3717: https://jira.mongodb.org/browse/PYTHON-3717 +.. _PYTHON-3718: https://jira.mongodb.org/browse/PYTHON-3718 + Changes in Version 4.3.3 ------------------------ diff --git a/pymongo/_version.py b/pymongo/_version.py index a5885d8cc5..14ba007944 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, ".dev1") +version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0) def get_version_string() -> str: diff --git a/pymongo/client_options.py b/pymongo/client_options.py index c9f63dc95a..2e39b843ec 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -267,7 +267,7 @@ def read_concern(self): def timeout(self) -> Optional[float]: """The configured timeoutMS converted to seconds, or None. - ..versionadded: 4.2 + .. versionadded: 4.2 """ return self.__timeout From bafb73cb8c92c07b647c30fdca19032db379f337 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 21 Jun 2023 11:16:35 -0700 Subject: [PATCH 0424/1588] BUMP 4.5.0.dev0 --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 14ba007944..db32b1ddb2 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0) +version_tuple: Tuple[Union[int, str], ...] = (4, 5, 0, ".dev0") def get_version_string() -> str: From b16e06acfda8cf009da74c7706df833627533a74 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 21 Jun 2023 15:07:14 -0700 Subject: [PATCH 0425/1588] PYTHON-3727 Use tox for unit tests and switch to pytest (#1249) --- pytest.ini | 4 +++ test/conftest.py | 10 ++++++++ test/test_cmap.py | 6 ++--- test/test_crud_v1.py | 4 +-- test/test_data_lake.py | 4 +-- test/test_encryption.py | 4 +-- test/test_read_write_concern_spec.py | 4 +-- test/test_retryable_reads.py | 4 +-- test/test_retryable_writes.py | 4 +-- test/test_server_selection_in_window.py | 6 ++--- test/test_transactions.py | 6 ++--- test/utils.py | 2 +- tox.ini | 34 +++++++++++++++++-------- 13 files changed, 59 insertions(+), 33 deletions(-) create mode 100644 pytest.ini create mode 100644 test/conftest.py diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000000..daf6168964 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,4 @@ +[pytest] +testpaths = + test +norecursedirs = test/* diff --git a/test/conftest.py b/test/conftest.py new file mode 100644 index 0000000000..400fd9ed75 --- /dev/null +++ b/test/conftest.py @@ -0,0 +1,10 @@ +from test import setup, teardown + +import pytest + + +@pytest.fixture(scope="session", autouse=True) +def test_setup_and_teardown(): + setup() + yield + teardown() diff --git a/test/test_cmap.py b/test/test_cmap.py index 3b84524f44..1676ed66da 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -24,7 +24,7 @@ from test.pymongo_mocks import DummyMonitor from test.utils import ( CMAPListener, - TestCreator, + SpecTestCreator, camel_to_snake, client_context, get_pool, @@ -455,7 +455,7 @@ def run_scenario(self): return run_scenario -class CMAPTestCreator(TestCreator): +class CMAPSpecTestCreator(SpecTestCreator): def tests(self, scenario_def): """Extract the tests from a spec file. @@ -465,7 +465,7 @@ def tests(self, scenario_def): return [scenario_def] -test_creator = CMAPTestCreator(create_test, TestCMAP, TestCMAP.TEST_PATH) +test_creator = CMAPSpecTestCreator(create_test, TestCMAP, TestCMAP.TEST_PATH) test_creator.create_tests() diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py index 589da0a7d7..46aab2fba1 100644 --- a/test/test_crud_v1.py +++ b/test/test_crud_v1.py @@ -21,7 +21,7 @@ from test import IntegrationTest, unittest from test.utils import ( - TestCreator, + SpecTestCreator, camel_to_snake, camel_to_snake_args, camel_to_upper_camel, @@ -171,7 +171,7 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestAllScenarios, _TEST_PATH) +test_creator = SpecTestCreator(create_test, TestAllScenarios, _TEST_PATH) test_creator.create_tests() diff --git a/test/test_data_lake.py b/test/test_data_lake.py index ce210010bd..868cbe836b 100644 --- a/test/test_data_lake.py +++ b/test/test_data_lake.py @@ -23,7 +23,7 @@ from test.crud_v2_format import TestCrudV2 from test.utils import ( OvertCommandListener, - TestCreator, + SpecTestCreator, rs_client_noauth, rs_or_single_client, ) @@ -115,7 +115,7 @@ def run_scenario(self): return run_scenario -TestCreator(create_test, DataLakeTestSpec, _TEST_PATH).create_tests() +SpecTestCreator(create_test, DataLakeTestSpec, _TEST_PATH).create_tests() if __name__ == "__main__": diff --git a/test/test_encryption.py b/test/test_encryption.py index 0b9087359e..2f61b52ffb 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -50,7 +50,7 @@ from test.utils import ( AllowListEventListener, OvertCommandListener, - TestCreator, + SpecTestCreator, TopologyEventListener, camel_to_snake_args, is_greenthread_patched, @@ -695,7 +695,7 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestSpec, os.path.join(SPEC_PATH, "legacy")) +test_creator = SpecTestCreator(create_test, TestSpec, os.path.join(SPEC_PATH, "legacy")) test_creator.create_tests() diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 2b39f7d04e..b27e9fa033 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -24,7 +24,7 @@ from test import IntegrationTest, client_context, unittest from test.utils import ( EventListener, - TestCreator, + SpecTestCreator, disable_replication, enable_replication, rs_or_single_client, @@ -337,7 +337,7 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_operation_test, TestOperation, TestOperation.TEST_PATH) +test_creator = SpecTestCreator(create_operation_test, TestOperation, TestOperation.TEST_PATH) test_creator.create_tests() diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 97c51cd44f..df173ac27b 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -31,7 +31,7 @@ from test.utils import ( CMAPListener, OvertCommandListener, - TestCreator, + SpecTestCreator, rs_or_single_client, ) from test.utils_spec_runner import SpecRunner @@ -138,7 +138,7 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestSpec, _TEST_PATH) +test_creator = SpecTestCreator(create_test, TestSpec, _TEST_PATH) test_creator.create_tests() diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 32841a8227..89507b33c3 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -28,7 +28,7 @@ DeprecationFilter, EventListener, OvertCommandListener, - TestCreator, + SpecTestCreator, rs_or_single_client, ) from test.utils_spec_runner import SpecRunner @@ -120,7 +120,7 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestAllScenarios, _TEST_PATH) +test_creator = SpecTestCreator(create_test, TestAllScenarios, _TEST_PATH) test_creator.create_tests() diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index 63769a6457..6c015e0ed2 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -19,7 +19,7 @@ from test import IntegrationTest, client_context, unittest from test.utils import ( OvertCommandListener, - TestCreator, + SpecTestCreator, get_pool, rs_client, wait_until, @@ -76,7 +76,7 @@ def run_scenario(self): return run_scenario -class CustomTestCreator(TestCreator): +class CustomSpecTestCreator(SpecTestCreator): def tests(self, scenario_def): """Extract the tests from a spec file. @@ -86,7 +86,7 @@ def tests(self, scenario_def): return [scenario_def] -CustomTestCreator(create_test, TestAllScenarios, TEST_PATH).create_tests() +CustomSpecTestCreator(create_test, TestAllScenarios, TEST_PATH).create_tests() class FinderThread(threading.Thread): diff --git a/test/test_transactions.py b/test/test_transactions.py index 9b51927d67..57495b0ab7 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -23,7 +23,7 @@ from test import client_context, unittest from test.utils import ( OvertCommandListener, - TestCreator, + SpecTestCreator, rs_client, single_client, wait_until, @@ -581,11 +581,11 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestTransactions, TEST_PATH) +test_creator = SpecTestCreator(create_test, TestTransactions, TEST_PATH) test_creator.create_tests() -TestCreator( +SpecTestCreator( create_test, TestTransactionsConvenientAPI, TestTransactionsConvenientAPI.TEST_PATH ).create_tests() diff --git a/test/utils.py b/test/utils.py index 810a02b872..86edae8808 100644 --- a/test/utils.py +++ b/test/utils.py @@ -391,7 +391,7 @@ def call_count(self): return len(self._call_list) -class TestCreator: +class SpecTestCreator: """Class to create test cases from specifications.""" def __init__(self, create_test, test_class, test_path): diff --git a/tox.ini b/tox.ini index 46c1a697de..f6e2f1f755 100644 --- a/tox.ini +++ b/tox.ini @@ -4,6 +4,8 @@ requires = envlist = # Test using the system Python. test, + # Test the extra encryption functionality. + test-encryption, # Run pre-commit on all files. lint, # Run pre-commit on all files, including stages that require manual fixes. @@ -24,10 +26,21 @@ envlist = linkcheck [testenv:test] -description = run unit tests +description = run base set of unit tests with no extra functionality +deps = + pytest>=7 commands = python --version - python setup.py test {posargs} + pytest -v -rs {posargs} + +[testenv:test-encryption] +description = run base unit tests with encryption enabled +deps = {[testenv:test]deps} +extras = encryption +commands = + python --version + pytest {posargs} + [testenv:lint] description = run pre-commit @@ -45,25 +58,24 @@ commands = [testenv:typecheck-mypy] description = run mypy and pyright to typecheck +extras = + encryption + ocsp + zstd + aws deps = - mypy - zstandard + mypy==1.2.0 certifi; platform_system == "win32" or platform_system == "Darwin" typing_extensions - pyopenssl>=17.2.0 - requests<3.0.0 - service_identity>=18.1.0 - pymongocrypt>=1.6.0,<2.0.0 - pymongo-auth-aws<2.0.0 commands = mypy --install-types --non-interactive bson gridfs tools pymongo - mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test + mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" --exclude "test/conftest.py" test mypy --install-types --non-interactive test/test_typing.py test/test_typing_strict.py [testenv:typecheck-pyright] description = run pyright to typecheck deps = - mypy + mypy==1.2.0 pyright==1.1.290 commands = pyright test/test_typing.py test/test_typing_strict.py From df07641687506054264b065e672f46492b825b41 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 21 Jun 2023 16:25:58 -0700 Subject: [PATCH 0426/1588] PYTHON-2523 Remove unneeded bson-stdint-win32.h (#1253) --- bson/bson-endian.h | 1 - bson/bson-stdint-win32.h | 259 --------------------------------------- 2 files changed, 260 deletions(-) delete mode 100644 bson/bson-stdint-win32.h diff --git a/bson/bson-endian.h b/bson/bson-endian.h index c34a58dde1..e906b0776f 100644 --- a/bson/bson-endian.h +++ b/bson/bson-endian.h @@ -25,7 +25,6 @@ #ifdef _MSC_VER -# include "bson-stdint-win32.h" # define BSON_INLINE __inline #else # include diff --git a/bson/bson-stdint-win32.h b/bson/bson-stdint-win32.h deleted file mode 100644 index cb2acd9384..0000000000 --- a/bson/bson-stdint-win32.h +++ /dev/null @@ -1,259 +0,0 @@ -// ISO C9x compliant stdint.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006-2013 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the product nor the names of its contributors may -// be used to endorse or promote products derived from this software -// without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// - -#ifndef _MSC_VER // [ -#error "Use this header only with Microsoft Visual C++ compilers!" -#endif // _MSC_VER ] - -#ifndef _MSC_STDINT_H_ // [ -#define _MSC_STDINT_H_ - -#if _MSC_VER > 1000 -#pragma once -#endif - -#if _MSC_VER >= 1600 // [ -#include -#else // ] _MSC_VER >= 1600 [ - -#include - -// For Visual Studio 6 in C++ mode and for many Visual Studio versions when -// compiling for ARM we should wrap include with 'extern "C++" {}' -// or compiler give many errors like this: -// error C2733: second C linkage of overloaded function 'wmemchr' not allowed -#ifdef __cplusplus -extern "C" { -#endif -# include -#ifdef __cplusplus -} -#endif - -// Define _W64 macros to mark types changing their size, like intptr_t. -#ifndef _W64 -# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 -# define _W64 __w64 -# else -# define _W64 -# endif -#endif - - -// 7.18.1 Integer types - -// 7.18.1.1 Exact-width integer types - -// Visual Studio 6 and Embedded Visual C++ 4 doesn't -// realize that, e.g. char has the same size as __int8 -// so we give up on __intX for them. -#if (_MSC_VER < 1300) - typedef signed char int8_t; - typedef signed short int16_t; - typedef signed int int32_t; - typedef unsigned char uint8_t; - typedef unsigned short uint16_t; - typedef unsigned int uint32_t; -#else - typedef signed __int8 int8_t; - typedef signed __int16 int16_t; - typedef signed __int32 int32_t; - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; -#endif -typedef signed __int64 int64_t; -typedef unsigned __int64 uint64_t; - - -// 7.18.1.2 Minimum-width integer types -typedef int8_t int_least8_t; -typedef int16_t int_least16_t; -typedef int32_t int_least32_t; -typedef int64_t int_least64_t; -typedef uint8_t uint_least8_t; -typedef uint16_t uint_least16_t; -typedef uint32_t uint_least32_t; -typedef uint64_t uint_least64_t; - -// 7.18.1.3 Fastest minimum-width integer types -typedef int8_t int_fast8_t; -typedef int16_t int_fast16_t; -typedef int32_t int_fast32_t; -typedef int64_t int_fast64_t; -typedef uint8_t uint_fast8_t; -typedef uint16_t uint_fast16_t; -typedef uint32_t uint_fast32_t; -typedef uint64_t uint_fast64_t; - -// 7.18.1.4 Integer types capable of holding object pointers -#ifdef _WIN64 // [ - typedef signed __int64 intptr_t; - typedef unsigned __int64 uintptr_t; -#else // _WIN64 ][ - typedef _W64 signed int intptr_t; - typedef _W64 unsigned int uintptr_t; -#endif // _WIN64 ] - -// 7.18.1.5 Greatest-width integer types -typedef int64_t intmax_t; -typedef uint64_t uintmax_t; - - -// 7.18.2 Limits of specified-width integer types - -#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 - -// 7.18.2.1 Limits of exact-width integer types -#define INT8_MIN ((int8_t)_I8_MIN) -#define INT8_MAX _I8_MAX -#define INT16_MIN ((int16_t)_I16_MIN) -#define INT16_MAX _I16_MAX -#define INT32_MIN ((int32_t)_I32_MIN) -#define INT32_MAX _I32_MAX -#define INT64_MIN ((int64_t)_I64_MIN) -#define INT64_MAX _I64_MAX -#define UINT8_MAX _UI8_MAX -#define UINT16_MAX _UI16_MAX -#define UINT32_MAX _UI32_MAX -#define UINT64_MAX _UI64_MAX - -// 7.18.2.2 Limits of minimum-width integer types -#define INT_LEAST8_MIN INT8_MIN -#define INT_LEAST8_MAX INT8_MAX -#define INT_LEAST16_MIN INT16_MIN -#define INT_LEAST16_MAX INT16_MAX -#define INT_LEAST32_MIN INT32_MIN -#define INT_LEAST32_MAX INT32_MAX -#define INT_LEAST64_MIN INT64_MIN -#define INT_LEAST64_MAX INT64_MAX -#define UINT_LEAST8_MAX UINT8_MAX -#define UINT_LEAST16_MAX UINT16_MAX -#define UINT_LEAST32_MAX UINT32_MAX -#define UINT_LEAST64_MAX UINT64_MAX - -// 7.18.2.3 Limits of fastest minimum-width integer types -#define INT_FAST8_MIN INT8_MIN -#define INT_FAST8_MAX INT8_MAX -#define INT_FAST16_MIN INT16_MIN -#define INT_FAST16_MAX INT16_MAX -#define INT_FAST32_MIN INT32_MIN -#define INT_FAST32_MAX INT32_MAX -#define INT_FAST64_MIN INT64_MIN -#define INT_FAST64_MAX INT64_MAX -#define UINT_FAST8_MAX UINT8_MAX -#define UINT_FAST16_MAX UINT16_MAX -#define UINT_FAST32_MAX UINT32_MAX -#define UINT_FAST64_MAX UINT64_MAX - -// 7.18.2.4 Limits of integer types capable of holding object pointers -#ifdef _WIN64 // [ -# define INTPTR_MIN INT64_MIN -# define INTPTR_MAX INT64_MAX -# define UINTPTR_MAX UINT64_MAX -#else // _WIN64 ][ -# define INTPTR_MIN INT32_MIN -# define INTPTR_MAX INT32_MAX -# define UINTPTR_MAX UINT32_MAX -#endif // _WIN64 ] - -// 7.18.2.5 Limits of greatest-width integer types -#define INTMAX_MIN INT64_MIN -#define INTMAX_MAX INT64_MAX -#define UINTMAX_MAX UINT64_MAX - -// 7.18.3 Limits of other integer types - -#ifdef _WIN64 // [ -# define PTRDIFF_MIN _I64_MIN -# define PTRDIFF_MAX _I64_MAX -#else // _WIN64 ][ -# define PTRDIFF_MIN _I32_MIN -# define PTRDIFF_MAX _I32_MAX -#endif // _WIN64 ] - -#define SIG_ATOMIC_MIN INT_MIN -#define SIG_ATOMIC_MAX INT_MAX - -#ifndef SIZE_MAX // [ -# ifdef _WIN64 // [ -# define SIZE_MAX _UI64_MAX -# else // _WIN64 ][ -# define SIZE_MAX _UI32_MAX -# endif // _WIN64 ] -#endif // SIZE_MAX ] - -// WCHAR_MIN and WCHAR_MAX are also defined in -#ifndef WCHAR_MIN // [ -# define WCHAR_MIN 0 -#endif // WCHAR_MIN ] -#ifndef WCHAR_MAX // [ -# define WCHAR_MAX _UI16_MAX -#endif // WCHAR_MAX ] - -#define WINT_MIN 0 -#define WINT_MAX _UI16_MAX - -#endif // __STDC_LIMIT_MACROS ] - - -// 7.18.4 Limits of other integer types - -#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 - -// 7.18.4.1 Macros for minimum-width integer constants - -#define INT8_C(val) val##i8 -#define INT16_C(val) val##i16 -#define INT32_C(val) val##i32 -#define INT64_C(val) val##i64 - -#define UINT8_C(val) val##ui8 -#define UINT16_C(val) val##ui16 -#define UINT32_C(val) val##ui32 -#define UINT64_C(val) val##ui64 - -// 7.18.4.2 Macros for greatest-width integer constants -// These #ifndef's are needed to prevent collisions with . -// Check out Issue 9 for the details. -#ifndef INTMAX_C // [ -# define INTMAX_C INT64_C -#endif // INTMAX_C ] -#ifndef UINTMAX_C // [ -# define UINTMAX_C UINT64_C -#endif // UINTMAX_C ] - -#endif // __STDC_CONSTANT_MACROS ] - -#endif // _MSC_VER >= 1600 ] - -#endif // _MSC_STDINT_H_ ] From e78a91ef2858b666f1bfd8fb4de420074d940af3 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 22 Jun 2023 13:00:55 -0500 Subject: [PATCH 0427/1588] PYTHON-2965 Migrate to a PEP517 compliant build system (#1252) --- .evergreen/build-mac.sh | 5 +- .evergreen/build-manylinux-internal.sh | 3 +- .evergreen/build-windows.sh | 3 +- .evergreen/config.yml | 3 +- .evergreen/run-doctests.sh | 4 +- README.rst | 2 +- RELEASE.rst | 2 +- doc/index.rst | 3 +- doc/installation.rst | 4 +- pyproject.toml | 93 ++++++++++++ setup.py | 187 ++----------------------- tox.ini | 3 +- 12 files changed, 120 insertions(+), 192 deletions(-) create mode 100644 pyproject.toml diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index 60846ae92a..4e8be8cf58 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -14,9 +14,8 @@ PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 rm -rf build createvirtualenv $PYTHON releasevenv -python -m pip install --upgrade wheel -python -m pip install setuptools==63.2.0 -python setup.py bdist_wheel +python -m pip install build +python -m build --wheel . deactivate || true rm -rf releasevenv diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh index 7c3747f4e2..6f1c58fd86 100755 --- a/.evergreen/build-manylinux-internal.sh +++ b/.evergreen/build-manylinux-internal.sh @@ -16,7 +16,8 @@ for PYTHON in /opt/python/*/bin/python; do fi # https://github.com/pypa/manylinux/issues/49 rm -rf build - $PYTHON setup.py bdist_wheel + $PYTHON -m pip install build + $PYTHON -m build --wheel . rm -rf build # Audit wheels and write manylinux tag diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh index aeb16892b1..8748e5c18f 100755 --- a/.evergreen/build-windows.sh +++ b/.evergreen/build-windows.sh @@ -13,7 +13,8 @@ for VERSION in 37 38 39 310 311; do "C:/Python/32/Python${VERSION}/python.exe") for PYTHON in "${_pythons[@]}"; do rm -rf build - $PYTHON setup.py bdist_wheel + $PYTHON -m pip install build + $PYTHON -m build --wheel . # Test that each wheel is installable. for release in dist/*; do diff --git a/.evergreen/config.yml b/.evergreen/config.yml index df126f57a1..f3c159a1df 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1121,7 +1121,8 @@ functions: done # Build source distribution. cd src/ - /opt/python/3.7/bin/python3 setup.py sdist + /opt/python/3.7/bin/python3 -m pip install build + /opt/python/3.7/bin/python3 -m build --sdist . cp dist/* ../releases - command: archive.targz_pack params: diff --git a/.evergreen/run-doctests.sh b/.evergreen/run-doctests.sh index eebb0f784c..39e5102b6a 100644 --- a/.evergreen/run-doctests.sh +++ b/.evergreen/run-doctests.sh @@ -3,5 +3,5 @@ set -o xtrace set -o errexit -${PYTHON_BINARY} setup.py clean -${PYTHON_BINARY} setup.py doc -t +${PYTHON_BINARY} -m pip install tox +${PYTHON_BINARY} -m tox -e doc-test diff --git a/README.rst b/README.rst index 71d47bdc0b..6274e2c9dd 100644 --- a/README.rst +++ b/README.rst @@ -79,7 +79,7 @@ Or ``easy_install`` from You can also download the project source and do:: - $ python setup.py install + $ pip install . Do **not** install the "bson" package from pypi. PyMongo comes with its own bson package; doing "easy_install bson" installs a third-party package that diff --git a/RELEASE.rst b/RELEASE.rst index 4150126f22..caa67d3819 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -36,7 +36,7 @@ Doing a Release To test locally, ``python3 setup.py test`` will build the C extensions and test. ``python3 tools/clean.py`` will remove the extensions, and then ``python3 setup.py --no_ext test`` will run the tests without - them. You can also run the doctests: ``python3 setup.py doc -t``. + them. You can also run the doctests: ``tox -e doc-test``. 2. Check Jira to ensure all the tickets in this version have been completed. diff --git a/doc/index.rst b/doc/index.rst index e474d27d8f..7e357c2a4b 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -102,7 +102,8 @@ following command from the root directory of the **PyMongo** source: .. code-block:: bash - $ python setup.py doc + $ pip install tox + $ tox -e docs Indices and tables ------------------ diff --git a/doc/installation.rst b/doc/installation.rst index c4cbc78d93..8ba21b0f9d 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -89,7 +89,7 @@ latest source from GitHub and install the driver from the resulting tree:: $ git clone https://github.com/mongodb/mongo-python-driver.git pymongo $ cd pymongo/ - $ python3 setup.py install + $ pip install . Installing from source on Unix .............................. @@ -186,7 +186,7 @@ If you wish to install PyMongo without the C extensions, even if the extensions build properly, it can be done using a command line option to *setup.py*:: - $ python3 setup.py --no_ext install + $ NO_EXT=1 python -m pip install . Installing a beta or release candidate -------------------------------------- diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..3ad35c0b43 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,93 @@ +[build-system] +requires = ["setuptools>=63.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "pymongo" +dynamic = ["version"] +description = "Python driver for MongoDB " +readme = "README.rst" +license = {file="LICENSE"} +requires-python = ">=3.7" +authors = [ + { name = "The MongoDB Python Team" }, +] +keywords = [ + "bson", + "gridfs", + "mongo", + "mongodb", + "pymongo", +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Topic :: Database", + "Typing :: Typed", +] +dependencies = [ + "dnspython>=1.16.0,<3.0.0", +] + +[project.optional-dependencies] +aws = [ + "pymongo-auth-aws<2.0.0", +] +encryption = [ + "pymongo[aws]", + "pymongocrypt>=1.6.0,<2.0.0", +] +gssapi = [ + "pykerberos;os.name!='nt'", + "winkerberos>=0.5.0;os.name=='nt'" +] +# PyOpenSSL 17.0.0 introduced support for OCSP. 17.1.0 introduced +# a related feature we need. 17.2.0 fixes a bug +# in set_default_verify_paths we should really avoid. +# service_identity 18.1.0 introduced support for IP addr matching. +# Fallback to certifi on Windows if we can't load CA certs from the system +# store and just use certifi on macOS. +# https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths +ocsp = [ + "certifi;os.name=='nt' or sys_platform=='darwin'", + "pyopenssl>=17.2.0", + "requests<3.0.0", + "service_identity>=18.1.0", +] +snappy = [ + "python-snappy", +] +# PYTHON-3423 Removed in 4.3 but kept here to avoid pip warnings. +srv = [] +tls = [] +# PYTHON-2133 Removed in 4.0 but kept here to avoid pip warnings. +zstd = [ + "zstandard", +] + +[project.urls] +Homepage = "http://github.com/mongodb/mongo-python-driver" + +[tool.setuptools.dynamic] +version = {attr = "pymongo._version.__version__"} + +[tool.setuptools.packages.find] +include = ["bson","gridfs", "pymongo"] + +[tool.setuptools.package-data] +bson=["py.typed", "*.pyi"] +pymongo=["py.typed", "*.pyi"] +gridfs=["py.typed", "*.pyi"] diff --git a/setup.py b/setup.py index e570d04c5a..de8f1b4c18 100755 --- a/setup.py +++ b/setup.py @@ -1,69 +1,16 @@ import os -import platform -import re import sys import warnings -if sys.version_info[:3] < (3, 7): - raise RuntimeError("Python version >= 3.7 required.") - - # Hack to silence atexit traceback in some Python versions try: import multiprocessing # noqa: F401 except ImportError: pass -from setuptools import setup - -if sys.version_info[:2] < (3, 10): - from distutils.cmd import Command - from distutils.command.build_ext import build_ext - from distutils.core import Extension -else: - from setuptools import Command - from setuptools.command.build_ext import build_ext - from setuptools.extension import Extension - -_HAVE_SPHINX = True -try: - from sphinx.cmd import build as sphinx -except ImportError: - try: - import sphinx - except ImportError: - _HAVE_SPHINX = False - -version_ns = {} -with open("pymongo/_version.py") as fp: - exec(fp.read(), version_ns) -version = version_ns["__version__"] - -f = open("README.rst") -try: - try: - readme_content = f.read() - except BaseException: - readme_content = "" -finally: - f.close() - -# PYTHON-654 - Clang doesn't support -mno-fused-madd but the pythons Apple -# ships are built with it. This is a problem starting with Xcode 5.1 -# since clang 3.4 errors out when it encounters unrecognized compiler -# flags. This hack removes -mno-fused-madd from the CFLAGS automatically -# generated by distutils for Apple provided pythons, allowing C extension -# builds to complete without error. The inspiration comes from older -# versions of distutils.sysconfig.get_config_vars. -if sys.platform == "darwin" and "clang" in platform.python_compiler().lower(): - from distutils.sysconfig import get_config_vars - - res = get_config_vars() - for key in ("CFLAGS", "PY_CFLAGS"): - if key in res: - flags = res[key] - flags = re.sub("-mno-fused-madd", "", flags) - res[key] = flags +from setuptools import Command, setup +from setuptools.command.build_ext import build_ext +from setuptools.extension import Extension class test(Command): @@ -126,55 +73,6 @@ def run(self): sys.exit(not result.wasSuccessful()) -class doc(Command): - - description = "generate or test documentation" - - user_options = [("test", "t", "run doctests instead of generating documentation")] - - boolean_options = ["test"] - - def initialize_options(self): - self.test = False - - def finalize_options(self): - pass - - def run(self): - - if not _HAVE_SPHINX: - raise RuntimeError("You must install Sphinx to build or test the documentation.") - - if self.test: - path = os.path.join(os.path.abspath("."), "doc", "_build", "doctest") - mode = "doctest" - else: - path = os.path.join(os.path.abspath("."), "doc", "_build", version) - mode = "html" - - try: - os.makedirs(path) - except BaseException: - pass - - sphinx_args = ["-E", "-b", mode, "doc", path] - - # sphinx.main calls sys.exit when sphinx.build_main exists. - # Call build_main directly so we can check status and print - # the full path to the built docs. - if hasattr(sphinx, "build_main"): - status = sphinx.build_main(sphinx_args) - else: - status = sphinx.main(sphinx_args) - - if status: - raise RuntimeError("documentation step '%s' failed" % (mode,)) - - sys.stdout.write( - "\nDocumentation step '%s' performed, results here:\n %s/\n" % (mode, path) - ) - - class custom_build_ext(build_ext): """Allow C extension building to fail. @@ -272,39 +170,10 @@ def build_extension(self, ext): ), ] -# PyOpenSSL 17.0.0 introduced support for OCSP. 17.1.0 introduced -# a related feature we need. 17.2.0 fixes a bug -# in set_default_verify_paths we should really avoid. -# service_identity 18.1.0 introduced support for IP addr matching. -pyopenssl_reqs = ["pyopenssl>=17.2.0", "requests<3.0.0", "service_identity>=18.1.0"] -if sys.platform in ("win32", "darwin"): - # Fallback to certifi on Windows if we can't load CA certs from the system - # store and just use certifi on macOS. - # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths - pyopenssl_reqs.append("certifi") - -aws_reqs = ["pymongo-auth-aws<2.0.0"] - -extras_require = { - "encryption": ["pymongocrypt>=1.6.0,<2.0.0"] + aws_reqs, - "ocsp": pyopenssl_reqs, - "snappy": ["python-snappy"], - "zstd": ["zstandard"], - "aws": aws_reqs, - "srv": [], # PYTHON-3423 Removed in 4.3 but kept here to avoid pip warnings. - "tls": [], # PYTHON-2133 Removed in 4.0 but kept here to avoid pip warnings. -} - -# GSSAPI extras -if sys.platform == "win32": - extras_require["gssapi"] = ["winkerberos>=0.5.0"] -else: - extras_require["gssapi"] = ["pykerberos"] - -extra_opts = {} - -if "--no_ext" in sys.argv: + +if "--no_ext" in sys.argv or "NO_EXT" in os.environ: sys.argv.remove("--no_ext") + ext_modules = [] elif sys.platform.startswith("java") or sys.platform == "cli" or "PyPy" in sys.version: sys.stdout.write( """ @@ -314,46 +183,8 @@ def build_extension(self, ext): *****************************************************\n """ ) -else: - extra_opts["ext_modules"] = ext_modules + ext_modules = [] setup( - name="pymongo", - version=version, - description="Python driver for MongoDB ", - long_description=readme_content, - author="The MongoDB Python Team", - url="http://github.com/mongodb/mongo-python-driver", - keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], - install_requires=["dnspython>=1.16.0,<3.0.0"], - license="Apache License, Version 2.0", - python_requires=">=3.7", - classifiers=[ - "Development Status :: 5 - Production/Stable", - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "Operating System :: MacOS :: MacOS X", - "Operating System :: Microsoft :: Windows", - "Operating System :: POSIX", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: Implementation :: CPython", - "Programming Language :: Python :: Implementation :: PyPy", - "Topic :: Database", - "Typing :: Typed", - ], - cmdclass={"build_ext": custom_build_ext, "doc": doc, "test": test}, - extras_require=extras_require, - packages=["bson", "pymongo", "gridfs"], - package_data={ - "bson": ["py.typed", "*.pyi"], - "pymongo": ["py.typed", "*.pyi"], - "gridfs": ["py.typed", "*.pyi"], - }, - **extra_opts -) + cmdclass={"build_ext": custom_build_ext, "test": test}, ext_modules=ext_modules +) # type:ignore diff --git a/tox.ini b/tox.ini index f6e2f1f755..ba53a2011e 100644 --- a/tox.ini +++ b/tox.ini @@ -103,7 +103,7 @@ commands = [testenv:doc] description = build sphinx docs deps = - sphinx + -rdoc/docs-requirements.txt commands = sphinx-build -E -b html doc ./doc/_build/html @@ -111,6 +111,7 @@ commands = description = run sphinx doc tests deps = {[testenv:doc]deps} + gevent commands = sphinx-build -E -b doctest doc ./doc/_build/doctest From 424e6c46fa8ea39904aa1083059b48371af47da5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 22 Jun 2023 14:10:27 -0700 Subject: [PATCH 0428/1588] PYTHON-3762 Remove global code owners (#1256) --- .github/CODEOWNERS | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 3be0c9b0d1..0000000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1,2 +0,0 @@ -# Global owner for repo -* @blink1073 @NoahStapp @ShaneHarvey From 2a4dc9cb0c0e3ed93ff2002f696690dab6e64dfa Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 22 Jun 2023 16:18:55 -0500 Subject: [PATCH 0429/1588] PYTHON-3760 Add C extension building as part of tox test environment (#1255) --- setup.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/setup.py b/setup.py index de8f1b4c18..2c1a52ecaf 100755 --- a/setup.py +++ b/setup.py @@ -126,6 +126,8 @@ def run(self): try: build_ext.run(self) except Exception: + if "TOX_ENV_NAME" in os.environ: + raise e = sys.exc_info()[1] sys.stdout.write("%s\n" % str(e)) warnings.warn( @@ -141,6 +143,8 @@ def build_extension(self, ext): try: build_ext.build_extension(self, ext) except Exception: + if "TOX_ENV_NAME" in os.environ: + raise e = sys.exc_info()[1] sys.stdout.write("%s\n" % str(e)) warnings.warn( From a750098057ffff0d9c9e5c32e14ac6e41946c4d1 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Thu, 22 Jun 2023 14:20:33 -0700 Subject: [PATCH 0430/1588] PYTHON-3750 add types to server.py (#1248) --- pymongo/server.py | 67 +++++++++++++++++++++++++++++++++------------ pymongo/topology.py | 2 +- test/test_server.py | 2 +- 3 files changed, 51 insertions(+), 20 deletions(-) diff --git a/pymongo/server.py b/pymongo/server.py index 2eb91c5b5d..840d8b7cb8 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -13,22 +13,42 @@ # permissions and limitations under the License. """Communicate with one MongoDB server in a topology.""" +from __future__ import annotations from datetime import datetime +from typing import TYPE_CHECKING, Any, Callable, List, Mapping, Optional, Tuple, Union from bson import _decode_all_selective from pymongo.errors import NotPrimaryError, OperationFailure from pymongo.helpers import _check_command_response, _handle_reauth -from pymongo.message import _convert_exception, _OpMsg +from pymongo.message import _convert_exception, _GetMore, _OpMsg, _Query from pymongo.response import PinnedResponse, Response +if TYPE_CHECKING: + from contextlib import _GeneratorContextManager + from queue import Queue + from weakref import ReferenceType + + from bson.objectid import ObjectId + from pymongo.mongo_client import _MongoClientErrorHandler + from pymongo.monitor import Monitor + from pymongo.monitoring import _EventListeners + from pymongo.pool import Pool, SocketInfo + from pymongo.server_description import ServerDescription + _CURSOR_DOC_FIELDS = {"cursor": {"firstBatch": 1, "nextBatch": 1}} class Server: def __init__( - self, server_description, pool, monitor, topology_id=None, listeners=None, events=None - ): + self, + server_description: ServerDescription, + pool: Pool, + monitor: Monitor, + topology_id: Optional[ObjectId] = None, + listeners: Optional[_EventListeners] = None, + events: Optional[ReferenceType[Queue]] = None, + ) -> None: """Represent one MongoDB server.""" self._description = server_description self._pool = pool @@ -38,9 +58,9 @@ def __init__( self._listener = listeners self._events = None if self._publish: - self._events = events() + self._events = events() # type: ignore[misc] - def open(self): + def open(self) -> None: """Start monitoring, or restart after a fork. Multiple calls have no effect. @@ -48,11 +68,11 @@ def open(self): if not self._pool.opts.load_balanced: self._monitor.open() - def reset(self, service_id=None): + def reset(self, service_id: Optional[ObjectId] = None) -> None: """Clear the connection pool.""" self.pool.reset(service_id) - def close(self): + def close(self) -> None: """Clear the connection pool and stop the monitor. Reconnect with open(). @@ -69,12 +89,19 @@ def close(self): self._monitor.close() self._pool.reset_without_pause() - def request_check(self): + def request_check(self) -> None: """Check the server's state soon.""" self._monitor.request_check() @_handle_reauth - def run_operation(self, sock_info, operation, read_preference, listeners, unpack_res): + def run_operation( + self, + sock_info: SocketInfo, + operation: Union[_Query, _GetMore], + read_preference: bool, + listeners: _EventListeners, + unpack_res: Callable[..., List[Mapping[str, Any]]], + ) -> Response: """Run a _Query or _GetMore operation and return a Response object. This method is used only to run _Query/_GetMore operations from @@ -84,7 +111,7 @@ def run_operation(self, sock_info, operation, read_preference, listeners, unpack :Parameters: - `sock_info`: A SocketInfo instance. - `operation`: A _Query or _GetMore object. - - `set_secondary_okay`: Pass to operation.get_message. + - `read_preference`: The read preference to use. - `listeners`: Instance of _EventListeners or None. - `unpack_res`: A callable that decodes the wire protocol response. """ @@ -215,34 +242,38 @@ def run_operation(self, sock_info, operation, read_preference, listeners, unpack return response - def get_socket(self, handler=None): + def get_socket( + self, handler: Optional[_MongoClientErrorHandler] = None + ) -> _GeneratorContextManager[SocketInfo]: return self.pool.get_socket(handler) @property - def description(self): + def description(self) -> ServerDescription: return self._description @description.setter - def description(self, server_description): + def description(self, server_description: ServerDescription) -> None: assert server_description.address == self._description.address self._description = server_description @property - def pool(self): + def pool(self) -> Pool: return self._pool - def _split_message(self, message): + def _split_message( + self, message: Union[Tuple[int, Any], Tuple[int, Any, int]] + ) -> Tuple[int, Any, int]: """Return request_id, data, max_doc_size. :Parameters: - `message`: (request_id, data, max_doc_size) or (request_id, data) """ if len(message) == 3: - return message + return message # type: ignore[return-value] else: # get_more and kill_cursors messages don't include BSON documents. - request_id, data = message + request_id, data = message # type: ignore[misc] return request_id, data, 0 - def __repr__(self): + def __repr__(self) -> str: return f"<{self.__class__.__name__} {self._description!r}>" diff --git a/pymongo/topology.py b/pymongo/topology.py index 9759b39f9f..0a2eaf9420 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -732,7 +732,7 @@ def _update_servers(self): ) weak = None - if self._publish_server: + if self._publish_server and self._events is not None: weak = weakref.ref(self._events) server = Server( server_description=sd, diff --git a/test/test_server.py b/test/test_server.py index 064d77d024..58e39edd7f 100644 --- a/test/test_server.py +++ b/test/test_server.py @@ -29,7 +29,7 @@ class TestServer(unittest.TestCase): def test_repr(self): hello = Hello({"ok": 1}) sd = ServerDescription(("localhost", 27017), hello) - server = Server(sd, pool=object(), monitor=object()) + server = Server(sd, pool=object(), monitor=object()) # type: ignore[arg-type] self.assertTrue("Standalone" in str(server)) From eb8013ce026a639339603361e45364c3498a66cb Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Thu, 22 Jun 2023 14:21:04 -0700 Subject: [PATCH 0431/1588] PYTHON-3740 add types to helpers.py (#1246) --- pymongo/cursor.py | 3 +- pymongo/helpers.py | 67 ++++++++++++++++++++++++++++++++----------- pymongo/operations.py | 6 ++-- 3 files changed, 55 insertions(+), 21 deletions(-) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 8d131a711e..a5722c8d08 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -274,6 +274,7 @@ def __init__( self.__show_record_id = show_record_id self.__allow_disk_use = allow_disk_use self.__snapshot = snapshot + self.__hint: Union[str, SON[str, Any], None] self.__set_hint(hint) # Exhaust cursor support @@ -437,7 +438,7 @@ def close(self) -> None: def __query_spec(self): """Get the spec to use for a query.""" - operators = {} + operators: Dict[str, Any] = {} if self.__ordering: operators["$orderby"] = self.__ordering if self.__explain: diff --git a/pymongo/helpers.py b/pymongo/helpers.py index f4582854dc..4b26c36cff 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -13,11 +13,26 @@ # limitations under the License. """Bits and pieces used by the driver that don't really fit elsewhere.""" +from __future__ import annotations import sys import traceback from collections import abc -from typing import Any, List, NoReturn +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Iterable, + List, + Mapping, + NoReturn, + Optional, + Sequence, + Tuple, + TypeVar, + Union, + cast, +) from bson.son import SON from pymongo import ASCENDING @@ -34,8 +49,12 @@ ) from pymongo.hello import HelloCompat +if TYPE_CHECKING: + from pymongo.cursor import _Hint + from pymongo.operations import _IndexList + # From the SDAM spec, the "node is shutting down" codes. -_SHUTDOWN_CODES = frozenset( +_SHUTDOWN_CODES: frozenset = frozenset( [ 11600, # InterruptedAtShutdown 91, # ShutdownInProgress @@ -69,15 +88,17 @@ ) # Server code raised when re-authentication is required -_REAUTHENTICATION_REQUIRED_CODE = 391 +_REAUTHENTICATION_REQUIRED_CODE: int = 391 -def _gen_index_name(keys): +def _gen_index_name(keys: _IndexList) -> str: """Generate an index name from the set of fields it is over.""" return "_".join(["{}_{}".format(*item) for item in keys]) -def _index_list(key_or_list, direction=None): +def _index_list( + key_or_list: _Hint, direction: Optional[Union[int, str]] = None +) -> Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]]: """Helper to generate a list of (key, direction) pairs. Takes such a list, or a single key, or a single key and direction. @@ -93,7 +114,7 @@ def _index_list(key_or_list, direction=None): return list(key_or_list) elif not isinstance(key_or_list, (list, tuple)): raise TypeError("if no direction is specified, key_or_list must be an instance of list") - values = [] + values: List[Tuple[str, int]] = [] for item in key_or_list: if isinstance(item, str): item = (item, ASCENDING) @@ -101,7 +122,7 @@ def _index_list(key_or_list, direction=None): return values -def _index_document(index_list): +def _index_document(index_list: _IndexList) -> SON[str, Any]: """Helper to generate an index specifying document. Takes a list of (key, direction) pairs. @@ -134,13 +155,19 @@ def _index_document(index_list): def _check_command_response( - response, max_wire_version, allowable_errors=None, parse_write_concern_error=False -): + response: Mapping[str, Any], + max_wire_version: Optional[int], + allowable_errors: Optional[List[int]] = None, + parse_write_concern_error: bool = False, +) -> None: """Check the response to a command for errors.""" if "ok" not in response: # Server didn't recognize our message as a command. raise OperationFailure( - response.get("$err"), response.get("code"), response, max_wire_version + response.get("$err"), # type: ignore[arg-type] + response.get("code"), + response, + max_wire_version, ) if parse_write_concern_error and "writeConcernError" in response: @@ -210,7 +237,7 @@ def _raise_write_concern_error(error: Any) -> NoReturn: raise WriteConcernError(error.get("errmsg"), error.get("code"), error) -def _get_wce_doc(result): +def _get_wce_doc(result: Mapping[str, Any]) -> Optional[Mapping[str, Any]]: """Return the writeConcernError or None.""" wce = result.get("writeConcernError") if wce: @@ -222,7 +249,7 @@ def _get_wce_doc(result): return wce -def _check_write_command_response(result): +def _check_write_command_response(result: Mapping[str, Any]) -> None: """Backward compatibility helper for write command error handling.""" # Prefer write errors over write concern errors write_errors = result.get("writeErrors") @@ -234,7 +261,9 @@ def _check_write_command_response(result): _raise_write_concern_error(wce) -def _fields_list_to_dict(fields, option_name): +def _fields_list_to_dict( + fields: Union[Mapping[str, Any], Iterable[str]], option_name: str +) -> Mapping[str, Any]: """Takes a sequence of field names and returns a matching dictionary. ["a", "b"] becomes {"a": 1, "b": 1} @@ -254,7 +283,7 @@ def _fields_list_to_dict(fields, option_name): raise TypeError(f"{option_name} must be a mapping or list of key names") -def _handle_exception(): +def _handle_exception() -> None: """Print exceptions raised by subscribers to stderr.""" # Heavily influenced by logging.Handler.handleError. @@ -270,8 +299,12 @@ def _handle_exception(): del einfo -def _handle_reauth(func): - def inner(*args, **kwargs): +# See https://mypy.readthedocs.io/en/stable/generics.html?#decorator-factories +F = TypeVar("F", bound=Callable[..., Any]) + + +def _handle_reauth(func: F) -> F: + def inner(*args: Any, **kwargs: Any) -> Any: no_reauth = kwargs.pop("no_reauth", False) from pymongo.pool import SocketInfo @@ -299,4 +332,4 @@ def inner(*args, **kwargs): return func(*args, **kwargs) raise - return inner + return cast(F, inner) diff --git a/pymongo/operations.py b/pymongo/operations.py index 3ff4ed57a3..ed270c1ca6 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -94,7 +94,7 @@ def __init__( validate_is_mapping("filter", filter) if hint is not None: if not isinstance(hint, str): - hint = helpers._index_document(hint) + hint = helpers._index_document(hint) # type: ignore[assignment] self._filter = filter self._collation = collation self._hint = hint @@ -150,7 +150,7 @@ def __init__( validate_is_mapping("filter", filter) if hint is not None: if not isinstance(hint, str): - hint = helpers._index_document(hint) + hint = helpers._index_document(hint) # type: ignore[assignment] self._filter = filter self._collation = collation self._hint = hint @@ -213,7 +213,7 @@ def __init__( validate_boolean("upsert", upsert) if hint is not None: if not isinstance(hint, str): - hint = helpers._index_document(hint) + hint = helpers._index_document(hint) # type: ignore[assignment] self._filter = filter self._doc = replacement From 8b2320440863ccd87b50713a2dafdf0033016e4f Mon Sep 17 00:00:00 2001 From: stephan-hof Date: Mon, 26 Jun 2023 21:20:01 +0200 Subject: [PATCH 0432/1588] PYTHON-3758 Support overflow integers in fallback_encoder. (#1243) bson only supports 64-bit integer within range: [-9_223_372_036_854_775_807, +9_223_372_036_854_775_807] This change calls the fallback_encoder before raising OverflowError on integers outside of this range. --- bson/__init__.py | 13 ++++++++++++- bson/_cbsonmodule.c | 24 ++++++++++++++++-------- doc/contributors.rst | 1 + test/test_custom_types.py | 25 +++++++++++++++++++++++++ 4 files changed, 54 insertions(+), 9 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index d0a8daa273..fd11c9952b 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -896,12 +896,21 @@ def _name_value_to_bson( in_fallback_call: bool = False, ) -> bytes: """Encode a single name, value pair.""" + + was_integer_overflow = False + # First see if the type is already cached. KeyError will only ever # happen once per subtype. try: return _ENCODERS[type(value)](name, value, check_keys, opts) # type: ignore except KeyError: pass + except OverflowError: + if not isinstance(value, int): + raise + + # Give the fallback_encoder a chance + was_integer_overflow = True # Second, fall back to trying _type_marker. This has to be done # before the loop below since users could subclass one of our @@ -927,7 +936,7 @@ def _name_value_to_bson( # is done after trying the custom type encoder because checking for each # subtype is expensive. for base in _BUILT_IN_TYPES: - if isinstance(value, base): + if not was_integer_overflow and isinstance(value, base): func = _ENCODERS[base] # Cache this type for faster subsequent lookup. _ENCODERS[type(value)] = func @@ -941,6 +950,8 @@ def _name_value_to_bson( name, fallback_encoder(value), check_keys, opts, in_fallback_call=True ) + if was_integer_overflow: + raise OverflowError("BSON can only handle up to 8-byte ints") raise InvalidDocument(f"cannot encode object: {value!r}, of type: {type(value)!r}") diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 5918a678c6..68ea6b63c4 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -662,6 +662,13 @@ static int write_element_to_buffer(PyObject* self, buffer_t buffer, static void _set_cannot_encode(PyObject* value) { + if (PyLong_Check(value)) { + if ((PyLong_AsLongLong(value) == -1) && PyErr_Occurred()) { + return PyErr_SetString(PyExc_OverflowError, + "MongoDB can only handle up to 8-byte ints"); + } + } + PyObject* type = NULL; PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument == NULL) { @@ -1069,16 +1076,17 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, long long long_long_value; PyErr_Clear(); long_long_value = PyLong_AsLongLong(value); - if (PyErr_Occurred()) { /* Overflow AGAIN */ - PyErr_SetString(PyExc_OverflowError, - "MongoDB can only handle up to 8-byte ints"); - return 0; + if (PyErr_Occurred()) { + /* Ignore error and give the fallback_encoder a chance. */ + PyErr_Clear(); + } else { + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x12; + return buffer_write_int64(buffer, (int64_t)long_long_value); } - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x12; - return buffer_write_int64(buffer, (int64_t)long_long_value); + } else { + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x10; + return buffer_write_int32(buffer, (int32_t)int_value); } - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x10; - return buffer_write_int32(buffer, (int32_t)int_value); } else if (PyFloat_Check(value)) { const double d = PyFloat_AsDouble(value); *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x01; diff --git a/doc/contributors.rst b/doc/contributors.rst index e6d5e5310d..2a4ca1ea47 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -97,3 +97,4 @@ The following is a list of people who have contributed to - Sean Cheah (thalassemia) - Dainis Gorbunovs (DainisGorbunovs) - Iris Ho (sleepyStick) +- Stephan Hof (stephan-hof) diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 14d7b4b05d..7e190483a3 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -274,6 +274,22 @@ def fallback_encoder(value): with self.assertRaises(TypeError): encode(document, codec_options=codecopts) + def test_call_only_once_for_not_handled_big_integers(self): + called_with = [] + + def fallback_encoder(value): + called_with.append(value) + return value + + codecopts = self._get_codec_options(fallback_encoder) + document = {"a": {"b": {"c": 2 << 65}}} + + msg = "MongoDB can only handle up to 8-byte ints" + with self.assertRaises(OverflowError, msg=msg): + encode(document, codec_options=codecopts) + + self.assertEqual(called_with, [2 << 65]) + class TestBSONTypeEnDeCodecs(unittest.TestCase): def test_instantiation(self): @@ -623,6 +639,15 @@ def setUp(self): def tearDown(self): self.db.test.drop() + def test_overflow_int_w_custom_decoder(self): + type_registry = TypeRegistry(fallback_encoder=lambda val: str(val)) + codec_options = CodecOptions(type_registry=type_registry) + collection = self.db.get_collection("test", codec_options=codec_options) + + collection.insert_one({"_id": 1, "data": 2**520}) + ret = collection.find_one() + self.assertEqual(ret["data"], str(2**520)) + def test_command_errors_w_custom_type_decoder(self): db = self.db test_doc = {"_id": 1, "data": "a"} From 3d2a650cbeaf990b7ca1493ba80c5c9a5fe8e56a Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 12:32:05 -0700 Subject: [PATCH 0433/1588] PYTHON-3755 add types to aggregation.py (#1254) --- pymongo/aggregation.py | 80 ++++++++++++++++++++++++++-------------- pymongo/change_stream.py | 1 - 2 files changed, 53 insertions(+), 28 deletions(-) diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index a97455cb29..feac81c7c7 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -13,6 +13,10 @@ # permissions and limitations under the License. """Perform aggregation operations on a collection or database.""" +from __future__ import annotations + +from collections.abc import Callable, Mapping, MutableMapping +from typing import TYPE_CHECKING, Any, Optional, Union from bson.son import SON from pymongo import common @@ -20,6 +24,16 @@ from pymongo.errors import ConfigurationError from pymongo.read_preferences import ReadPreference, _AggWritePref +if TYPE_CHECKING: + from pymongo.client_session import ClientSession + from pymongo.collection import Collection + from pymongo.command_cursor import CommandCursor + from pymongo.database import Database + from pymongo.pool import SocketInfo + from pymongo.read_preferences import _ServerMode + from pymongo.server import Server + from pymongo.typings import _Pipeline + class _AggregationCommand: """The internal abstract base class for aggregation cursors. @@ -31,17 +45,16 @@ class _AggregationCommand: def __init__( self, - target, - cursor_class, - pipeline, - options, - explicit_session, - let=None, - user_fields=None, - result_processor=None, - comment=None, - show_expanded_events=None, - ): + target: Union[Database, Collection], + cursor_class: type[CommandCursor], + pipeline: _Pipeline, + options: MutableMapping[str, Any], + explicit_session: bool, + let: Optional[Mapping[str, Any]] = None, + user_fields: Optional[MutableMapping[str, Any]] = None, + result_processor: Optional[Callable[[Mapping[str, Any], SocketInfo], None]] = None, + comment: Any = None, + ) -> None: if "explain" in options: raise ConfigurationError( "The explain option is not supported. Use Database.command instead." @@ -85,28 +98,31 @@ def __init__( self._collation = validate_collation_or_none(options.pop("collation", None)) self._max_await_time_ms = options.pop("maxAwaitTimeMS", None) - self._write_preference = None + self._write_preference: Optional[_AggWritePref] = None @property - def _aggregation_target(self): + def _aggregation_target(self) -> Union[str, int]: """The argument to pass to the aggregate command.""" raise NotImplementedError @property - def _cursor_namespace(self): + def _cursor_namespace(self) -> str: """The namespace in which the aggregate command is run.""" raise NotImplementedError - def _cursor_collection(self, cursor_doc): + def _cursor_collection(self, cursor_doc: Mapping[str, Any]) -> Collection: """The Collection used for the aggregate command cursor.""" raise NotImplementedError @property - def _database(self): + def _database(self) -> Database: """The database against which the aggregation command is run.""" raise NotImplementedError - def get_read_preference(self, session): + def get_read_preference( + self, session: Optional[ClientSession] + ) -> Union[_AggWritePref, _ServerMode]: + if self._write_preference: return self._write_preference pref = self._target._read_preference_for(session) @@ -114,7 +130,13 @@ def get_read_preference(self, session): self._write_preference = pref = _AggWritePref(pref) return pref - def get_cursor(self, session, server, sock_info, read_preference): + def get_cursor( + self, + session: ClientSession, + server: Server, + sock_info: SocketInfo, + read_preference: _ServerMode, + ) -> CommandCursor: # Serialize command. cmd = SON([("aggregate", self._aggregation_target), ("pipeline", self._pipeline)]) cmd.update(self._options) @@ -183,25 +205,27 @@ def get_cursor(self, session, server, sock_info, read_preference): class _CollectionAggregationCommand(_AggregationCommand): + _target: Collection + @property - def _aggregation_target(self): + def _aggregation_target(self) -> str: return self._target.name @property - def _cursor_namespace(self): + def _cursor_namespace(self) -> str: return self._target.full_name - def _cursor_collection(self, cursor): + def _cursor_collection(self, cursor: Mapping[str, Any]) -> Collection: """The Collection used for the aggregate command cursor.""" return self._target @property - def _database(self): + def _database(self) -> Database: return self._target.database class _CollectionRawAggregationCommand(_CollectionAggregationCommand): - def __init__(self, *args, **kwargs): + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) # For raw-batches, we set the initial batchSize for the cursor to 0. @@ -210,19 +234,21 @@ def __init__(self, *args, **kwargs): class _DatabaseAggregationCommand(_AggregationCommand): + _target: Database + @property - def _aggregation_target(self): + def _aggregation_target(self) -> int: return 1 @property - def _cursor_namespace(self): + def _cursor_namespace(self) -> str: return f"{self._target.name}.$cmd.aggregate" @property - def _database(self): + def _database(self) -> Database: return self._target - def _cursor_collection(self, cursor): + def _cursor_collection(self, cursor: Mapping[str, Any]) -> Collection: """The Collection used for the aggregate command cursor.""" # Collection level aggregate may not always return the "ns" field # according to our MockupDB tests. Let's handle that case for db level diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 3a4d968c18..1e2be563b7 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -238,7 +238,6 @@ def _run_aggregation_cmd(self, session, explicit_session): explicit_session, result_processor=self._process_result, comment=self._comment, - show_expanded_events=self._show_expanded_events, ) return self._client._retryable_read( cmd.get_cursor, self._target._read_preference_for(session), session From 940404ad3fec11ab8d86345d7542b86acc7df4a2 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:08:42 -0700 Subject: [PATCH 0434/1588] PYTHON-3771 add types to client_options.py (#1266) --- pymongo/client_options.py | 73 ++++++++++++++++++++++++--------------- 1 file changed, 45 insertions(+), 28 deletions(-) diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 2e39b843ec..7e5be69283 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -13,25 +13,38 @@ # permissions and limitations under the License. """Tools to parse mongo client options.""" +from __future__ import annotations -from typing import Optional +from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, Tuple from bson.codec_options import _parse_codec_options from pymongo import common -from pymongo.auth import _build_credentials_tuple +from pymongo.auth import MongoCredential, _build_credentials_tuple from pymongo.common import validate_boolean from pymongo.compression_support import CompressionSettings from pymongo.errors import ConfigurationError from pymongo.monitoring import _EventListeners from pymongo.pool import PoolOptions from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name +from pymongo.read_preferences import ( + _ServerMode, + make_read_preference, + read_pref_mode_from_name, +) from pymongo.server_selectors import any_server_selector from pymongo.ssl_support import get_ssl_context from pymongo.write_concern import WriteConcern +if TYPE_CHECKING: + from bson.codec_options import CodecOptions + from pymongo.encryption import AutoEncryptionOpts + from pymongo.pyopenssl_context import SSLContext + from pymongo.server_selectors import Selection -def _parse_credentials(username, password, database, options): + +def _parse_credentials( + username: str, password: str, database: Optional[str], options: Mapping[str, Any] +) -> Optional[MongoCredential]: """Parse authentication credentials.""" mechanism = options.get("authmechanism", "DEFAULT" if username else None) source = options.get("authsource") @@ -40,7 +53,7 @@ def _parse_credentials(username, password, database, options): return None -def _parse_read_preference(options): +def _parse_read_preference(options: Mapping[str, Any]) -> _ServerMode: """Parse read preference options.""" if "read_preference" in options: return options["read_preference"] @@ -52,7 +65,7 @@ def _parse_read_preference(options): return make_read_preference(mode, tags, max_staleness) -def _parse_write_concern(options): +def _parse_write_concern(options: Mapping[str, Any]) -> WriteConcern: """Parse write concern options.""" concern = options.get("w") wtimeout = options.get("wtimeoutms") @@ -61,13 +74,13 @@ def _parse_write_concern(options): return WriteConcern(concern, wtimeout, j, fsync) -def _parse_read_concern(options): +def _parse_read_concern(options: Mapping[str, Any]) -> ReadConcern: """Parse read concern options.""" concern = options.get("readconcernlevel") return ReadConcern(concern) -def _parse_ssl_options(options): +def _parse_ssl_options(options: Mapping[str, Any]) -> Tuple[Optional[SSLContext], bool]: """Parse ssl options.""" use_tls = options.get("tls") if use_tls is not None: @@ -126,7 +139,9 @@ def _parse_ssl_options(options): return None, allow_invalid_hostnames -def _parse_pool_options(username, password, database, options): +def _parse_pool_options( + username: str, password: str, database: Optional[str], options: Mapping[str, Any] +) -> PoolOptions: """Parse connection pool options.""" credentials = _parse_credentials(username, password, database, options) max_pool_size = options.get("maxpoolsize", common.MAX_POOL_SIZE) @@ -175,7 +190,9 @@ class ClientOptions: instead. """ - def __init__(self, username, password, database, options): + def __init__( + self, username: str, password: str, database: Optional[str], options: Mapping[str, Any] + ): self.__options = options self.__codec_options = _parse_codec_options(options) self.__direct_connection = options.get("directconnection") @@ -200,66 +217,66 @@ def __init__(self, username, password, database, options): self.__timeout = options.get("timeoutms") @property - def _options(self): + def _options(self) -> Mapping[str, Any]: """The original options used to create this ClientOptions.""" return self.__options @property - def connect(self): + def connect(self) -> Optional[bool]: """Whether to begin discovering a MongoDB topology automatically.""" return self.__connect @property - def codec_options(self): + def codec_options(self) -> CodecOptions: """A :class:`~bson.codec_options.CodecOptions` instance.""" return self.__codec_options @property - def direct_connection(self): + def direct_connection(self) -> Optional[bool]: """Whether to connect to the deployment in 'Single' topology.""" return self.__direct_connection @property - def local_threshold_ms(self): + def local_threshold_ms(self) -> int: """The local threshold for this instance.""" return self.__local_threshold_ms @property - def server_selection_timeout(self): + def server_selection_timeout(self) -> int: """The server selection timeout for this instance in seconds.""" return self.__server_selection_timeout @property - def server_selector(self): + def server_selector(self) -> Callable[[Selection], Selection]: return self.__server_selector @property - def heartbeat_frequency(self): + def heartbeat_frequency(self) -> int: """The monitoring frequency in seconds.""" return self.__heartbeat_frequency @property - def pool_options(self): + def pool_options(self) -> PoolOptions: """A :class:`~pymongo.pool.PoolOptions` instance.""" return self.__pool_options @property - def read_preference(self): + def read_preference(self) -> _ServerMode: """A read preference instance.""" return self.__read_preference @property - def replica_set_name(self): + def replica_set_name(self) -> Optional[str]: """Replica set name or None.""" return self.__replica_set_name @property - def write_concern(self): + def write_concern(self) -> WriteConcern: """A :class:`~pymongo.write_concern.WriteConcern` instance.""" return self.__write_concern @property - def read_concern(self): + def read_concern(self) -> ReadConcern: """A :class:`~pymongo.read_concern.ReadConcern` instance.""" return self.__read_concern @@ -272,27 +289,27 @@ def timeout(self) -> Optional[float]: return self.__timeout @property - def retry_writes(self): + def retry_writes(self) -> bool: """If this instance should retry supported write operations.""" return self.__retry_writes @property - def retry_reads(self): + def retry_reads(self) -> bool: """If this instance should retry supported read operations.""" return self.__retry_reads @property - def auto_encryption_opts(self): + def auto_encryption_opts(self) -> Optional[AutoEncryptionOpts]: """A :class:`~pymongo.encryption.AutoEncryptionOpts` or None.""" return self.__auto_encryption_opts @property - def load_balanced(self): + def load_balanced(self) -> Optional[bool]: """True if the client was configured to connect to a load balancer.""" return self.__load_balanced @property - def event_listeners(self): + def event_listeners(self) -> _EventListeners: """The event listeners registered for this client. See :mod:`~pymongo.monitoring` for details. From 1e14e89d0e7f6f09b841adcacef65cd1e5675c44 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:13:07 -0700 Subject: [PATCH 0435/1588] PYTHON-3769 add types to auth_aws.py (#1264) --- pymongo/auth_aws.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/pymongo/auth_aws.py b/pymongo/auth_aws.py index bfa4c731d3..62aab6a219 100644 --- a/pymongo/auth_aws.py +++ b/pymongo/auth_aws.py @@ -13,6 +13,7 @@ # limitations under the License. """MONGODB-AWS Authentication helpers.""" +from __future__ import annotations try: import pymongo_auth_aws @@ -38,11 +39,18 @@ def set_cached_credentials(creds): pass +from typing import TYPE_CHECKING, Any, Mapping + import bson from bson.binary import Binary from bson.son import SON from pymongo.errors import ConfigurationError, OperationFailure +if TYPE_CHECKING: + from bson.typings import _DocumentIn, _ReadableBuffer + from pymongo.auth import MongoCredential + from pymongo.pool import SocketInfo + class _AwsSaslContext(AwsSaslContext): # type: ignore # Dependency injection: @@ -50,16 +58,16 @@ def binary_type(self): """Return the bson.binary.Binary type.""" return Binary - def bson_encode(self, doc): + def bson_encode(self, doc: _DocumentIn) -> bytes: """Encode a dictionary to BSON.""" return bson.encode(doc) - def bson_decode(self, data): + def bson_decode(self, data: _ReadableBuffer) -> Mapping[str, Any]: """Decode BSON to a dictionary.""" return bson.decode(data) -def _authenticate_aws(credentials, sock_info): +def _authenticate_aws(credentials: MongoCredential, sock_info: SocketInfo) -> None: """Authenticate using MONGODB-AWS.""" if not _HAVE_MONGODB_AWS: raise ConfigurationError( From 5397d74668d50dc450af3b4c108bdedaee952d85 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:13:25 -0700 Subject: [PATCH 0436/1588] PYTHON-3767 add types to ocsp_support.py (#1262) --- pymongo/ocsp_support.py | 129 ++++++++++++++++++++++++++-------------- 1 file changed, 84 insertions(+), 45 deletions(-) diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index dd070748a4..fa9bd1b7e6 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -13,11 +13,13 @@ # permissions and limitations under the License. """Support for requesting and verifying OCSP responses.""" +from __future__ import annotations import logging as _logging import re as _re from datetime import datetime as _datetime from datetime import timezone +from typing import TYPE_CHECKING, Iterable, List, Optional, Type, Union, cast from cryptography.exceptions import InvalidSignature as _InvalidSignature from cryptography.hazmat.backends import default_backend as _default_backend @@ -51,6 +53,26 @@ from pymongo import _csot +if TYPE_CHECKING: + from cryptography.hazmat.primitives.asymmetric import dsa, ec, ed448, ed25519, rsa + from cryptography.hazmat.primitives.asymmetric.utils import Prehashed + from cryptography.hazmat.primitives.hashes import HashAlgorithm + from cryptography.x509 import Certificate, Name + from cryptography.x509.extensions import Extension, ExtensionTypeVar + from cryptography.x509.ocsp import OCSPRequest, OCSPResponse + from OpenSSL.SSL import Connection + + from pymongo.ocsp_cache import _OCSPCache + from pymongo.pyopenssl_context import _CallbackData + + CertificateIssuerPublicKeyTypes = Union[ + dsa.DSAPublicKey, + rsa.RSAPublicKey, + ec.EllipticCurvePublicKey, + ed25519.Ed25519PublicKey, + ed448.Ed448PublicKey, + ] + # Note: the functions in this module generally return 1 or 0. The reason # is simple. The entry point, ocsp_callback, is registered as a callback # with OpenSSL through PyOpenSSL. The callback must return 1 (success) or @@ -63,7 +85,7 @@ ) -def _load_trusted_ca_certs(cafile): +def _load_trusted_ca_certs(cafile: str) -> List[Certificate]: """Parse the tlsCAFile into a list of certificates.""" with open(cafile, "rb") as f: data = f.read() @@ -76,7 +98,9 @@ def _load_trusted_ca_certs(cafile): return trusted_ca_certs -def _get_issuer_cert(cert, chain, trusted_ca_certs): +def _get_issuer_cert( + cert: Certificate, chain: Iterable[Certificate], trusted_ca_certs: Optional[List[Certificate]] +) -> Optional[Certificate]: issuer_name = cert.issuer for candidate in chain: if candidate.subject == issuer_name: @@ -93,16 +117,21 @@ def _get_issuer_cert(cert, chain, trusted_ca_certs): return None -def _verify_signature(key, signature, algorithm, data): +def _verify_signature( + key: CertificateIssuerPublicKeyTypes, + signature: bytes, + algorithm: Union[Prehashed, HashAlgorithm, None], + data: bytes, +) -> int: # See cryptography.x509.Certificate.public_key # for the public key types. try: if isinstance(key, _RSAPublicKey): - key.verify(signature, data, _PKCS1v15(), algorithm) + key.verify(signature, data, _PKCS1v15(), algorithm) # type: ignore[arg-type] elif isinstance(key, _DSAPublicKey): - key.verify(signature, data, algorithm) + key.verify(signature, data, algorithm) # type: ignore[arg-type] elif isinstance(key, _EllipticCurvePublicKey): - key.verify(signature, data, _ECDSA(algorithm)) + key.verify(signature, data, _ECDSA(algorithm)) # type: ignore[arg-type] else: key.verify(signature, data) except _InvalidSignature: @@ -110,14 +139,16 @@ def _verify_signature(key, signature, algorithm, data): return 1 -def _get_extension(cert, klass): +def _get_extension( + cert: Certificate, klass: Type[ExtensionTypeVar] +) -> Optional[Extension[ExtensionTypeVar]]: try: return cert.extensions.get_extension_for_class(klass) except _ExtensionNotFound: return None -def _public_key_hash(cert): +def _public_key_hash(cert: Certificate) -> bytes: public_key = cert.public_key() # https://tools.ietf.org/html/rfc2560#section-4.2.1 # "KeyHash ::= OCTET STRING -- SHA-1 hash of responder's public key @@ -134,7 +165,9 @@ def _public_key_hash(cert): return digest.finalize() -def _get_certs_by_key_hash(certificates, issuer, responder_key_hash): +def _get_certs_by_key_hash( + certificates: Iterable[Certificate], issuer: Certificate, responder_key_hash: Optional[bytes] +) -> List[Certificate]: return [ cert for cert in certificates @@ -142,7 +175,9 @@ def _get_certs_by_key_hash(certificates, issuer, responder_key_hash): ] -def _get_certs_by_name(certificates, issuer, responder_name): +def _get_certs_by_name( + certificates: Iterable[Certificate], issuer: Certificate, responder_name: Optional[Name] +) -> List[Certificate]: return [ cert for cert in certificates @@ -150,7 +185,7 @@ def _get_certs_by_name(certificates, issuer, responder_name): ] -def _verify_response_signature(issuer, response): +def _verify_response_signature(issuer: Certificate, response: OCSPResponse) -> int: # Response object will have a responder_name or responder_key_hash # not both. name = response.responder_name @@ -185,7 +220,7 @@ def _verify_response_signature(issuer, response): _LOGGER.debug("Delegate not authorized for OCSP signing") return 0 if not _verify_signature( - issuer.public_key(), + cast(CertificateIssuerPublicKeyTypes, issuer.public_key()), responder_cert.signature, responder_cert.signature_hash_algorithm, responder_cert.tbs_certificate_bytes, @@ -194,7 +229,7 @@ def _verify_response_signature(issuer, response): return 0 # RFC6960, Section 3.2, Number 2 ret = _verify_signature( - responder_cert.public_key(), + cast(CertificateIssuerPublicKeyTypes, responder_cert.public_key()), response.signature, response.signature_hash_algorithm, response.tbs_response_bytes, @@ -204,14 +239,14 @@ def _verify_response_signature(issuer, response): return ret -def _build_ocsp_request(cert, issuer): +def _build_ocsp_request(cert: Certificate, issuer: Certificate) -> OCSPRequest: # https://cryptography.io/en/latest/x509/ocsp/#creating-requests builder = _OCSPRequestBuilder() builder = builder.add_certificate(cert, issuer, _SHA1()) return builder.build() -def _verify_response(issuer, response): +def _verify_response(issuer: Certificate, response: OCSPResponse) -> int: _LOGGER.debug("Verifying response") # RFC6960, Section 3.2, Number 2, 3 and 4 happen here. res = _verify_response_signature(issuer, response) @@ -232,7 +267,9 @@ def _verify_response(issuer, response): return 1 -def _get_ocsp_response(cert, issuer, uri, ocsp_response_cache): +def _get_ocsp_response( + cert: Certificate, issuer: Certificate, uri: Union[str, bytes], ocsp_response_cache: _OCSPCache +) -> Optional[OCSPResponse]: ocsp_request = _build_ocsp_request(cert, issuer) try: ocsp_response = ocsp_response_cache[ocsp_request] @@ -275,30 +312,32 @@ def _get_ocsp_response(cert, issuer, uri, ocsp_response_cache): return ocsp_response -def _ocsp_callback(conn, ocsp_bytes, user_data): +def _ocsp_callback(conn: Connection, ocsp_bytes: bytes, user_data: Optional[_CallbackData]) -> bool: """Callback for use with OpenSSL.SSL.Context.set_ocsp_client_callback.""" - cert = conn.get_peer_certificate() - if cert is None: + # always pass in user_data but OpenSSL requires it be optional + assert user_data + pycert = conn.get_peer_certificate() + if pycert is None: _LOGGER.debug("No peer cert?") - return 0 - cert = cert.to_cryptography() + return False + cert = pycert.to_cryptography() # Use the verified chain when available (pyopenssl>=20.0). if hasattr(conn, "get_verified_chain"): - chain = conn.get_verified_chain() + pychain = conn.get_verified_chain() trusted_ca_certs = None else: - chain = conn.get_peer_cert_chain() + pychain = conn.get_peer_cert_chain() trusted_ca_certs = user_data.trusted_ca_certs - if not chain: + if not pychain: _LOGGER.debug("No peer cert chain?") - return 0 - chain = [cer.to_cryptography() for cer in chain] + return False + chain = [cer.to_cryptography() for cer in pychain] issuer = _get_issuer_cert(cert, chain, trusted_ca_certs) must_staple = False # https://tools.ietf.org/html/rfc7633#section-4.2.3.1 - ext = _get_extension(cert, _TLSFeature) - if ext is not None: - for feature in ext.value: + ext_tls = _get_extension(cert, _TLSFeature) + if ext_tls is not None: + for feature in ext_tls.value: if feature == _TLSFeatureType.status_request: _LOGGER.debug("Peer presented a must-staple cert") must_staple = True @@ -310,29 +349,29 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): _LOGGER.debug("Peer did not staple an OCSP response") if must_staple: _LOGGER.debug("Must-staple cert with no stapled response, hard fail.") - return 0 + return False if not user_data.check_ocsp_endpoint: _LOGGER.debug("OCSP endpoint checking is disabled, soft fail.") # No stapled OCSP response, checking responder URI disabled, soft fail. - return 1 + return True # https://tools.ietf.org/html/rfc6960#section-3.1 - ext = _get_extension(cert, _AuthorityInformationAccess) - if ext is None: + ext_aia = _get_extension(cert, _AuthorityInformationAccess) + if ext_aia is None: _LOGGER.debug("No authority access information, soft fail") # No stapled OCSP response, no responder URI, soft fail. - return 1 + return True uris = [ desc.access_location.value - for desc in ext.value + for desc in ext_aia.value if desc.access_method == _AuthorityInformationAccessOID.OCSP ] if not uris: _LOGGER.debug("No OCSP URI, soft fail") # No responder URI, soft fail. - return 1 + return True if issuer is None: _LOGGER.debug("No issuer cert?") - return 0 + return False _LOGGER.debug("Requesting OCSP data") # When requesting data from an OCSP endpoint we only fail on # successful, valid responses with a certificate status of REVOKED. @@ -346,28 +385,28 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): continue _LOGGER.debug("OCSP cert status: %r", response.certificate_status) if response.certificate_status == _OCSPCertStatus.GOOD: - return 1 + return True if response.certificate_status == _OCSPCertStatus.REVOKED: - return 0 + return False # Soft fail if we couldn't get a definitive status. _LOGGER.debug("No definitive OCSP cert status, soft fail") - return 1 + return True _LOGGER.debug("Peer stapled an OCSP response") if issuer is None: _LOGGER.debug("No issuer cert?") - return 0 + return False response = _load_der_ocsp_response(ocsp_bytes) _LOGGER.debug("OCSP response status: %r", response.response_status) # This happens in _request_ocsp when there is no stapled response so # we know if we can compare serial numbers for the request and response. if response.response_status != _OCSPResponseStatus.SUCCESSFUL: - return 0 + return False if not _verify_response(issuer, response): - return 0 + return False # Cache the verified, stapled response. ocsp_response_cache[_build_ocsp_request(cert, issuer)] = response _LOGGER.debug("OCSP cert status: %r", response.certificate_status) if response.certificate_status == _OCSPCertStatus.REVOKED: - return 0 - return 1 + return False + return True From 1f7cf0941d061db15661f1e77c92859fc2f7fd28 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:13:58 -0700 Subject: [PATCH 0437/1588] PYTHON-3772 add types to change_stream.py (#1267) --- pymongo/change_stream.py | 46 ++++++++++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 1e2be563b7..10bfd36236 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -16,13 +16,24 @@ from __future__ import annotations import copy -from typing import TYPE_CHECKING, Any, Dict, Generic, Mapping, Optional, Union +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generic, + List, + Mapping, + Optional, + Type, + Union, +) from bson import _bson_to_dict from bson.raw_bson import RawBSONDocument from bson.timestamp import Timestamp from pymongo import _csot, common from pymongo.aggregation import ( + _AggregationCommand, _CollectionAggregationCommand, _DatabaseAggregationCommand, ) @@ -67,6 +78,7 @@ from pymongo.collection import Collection from pymongo.database import Database from pymongo.mongo_client import MongoClient + from pymongo.pool import SocketInfo def _resumable(exc: PyMongoError) -> bool: @@ -150,18 +162,18 @@ def __init__( self._cursor = self._create_cursor() @property - def _aggregation_command_class(self): + def _aggregation_command_class(self) -> Type[_AggregationCommand]: """The aggregation command class to be used.""" raise NotImplementedError @property - def _client(self): + def _client(self) -> MongoClient: """The client against which the aggregation commands for this ChangeStream will be run. """ raise NotImplementedError - def _change_stream_options(self): + def _change_stream_options(self) -> Dict[str, Any]: """Return the options dict for the $changeStream pipeline stage.""" options: Dict[str, Any] = {} if self._full_document is not None: @@ -185,7 +197,7 @@ def _change_stream_options(self): return options - def _command_options(self): + def _command_options(self) -> Dict[str, Any]: """Return the options dict for the aggregation command.""" options = {} if self._max_await_time_ms is not None: @@ -194,14 +206,14 @@ def _command_options(self): options["batchSize"] = self._batch_size return options - def _aggregation_pipeline(self): + def _aggregation_pipeline(self) -> List[Dict[str, Any]]: """Return the full aggregation pipeline for this ChangeStream.""" options = self._change_stream_options() full_pipeline: list = [{"$changeStream": options}] full_pipeline.extend(self._pipeline) return full_pipeline - def _process_result(self, result, sock_info): + def _process_result(self, result: Mapping[str, Any], sock_info: SocketInfo) -> None: """Callback that caches the postBatchResumeToken or startAtOperationTime from a changeStream aggregate command response containing an empty batch of change documents. @@ -226,7 +238,9 @@ def _process_result(self, result, sock_info): "response : {!r}".format(result) ) - def _run_aggregation_cmd(self, session, explicit_session): + def _run_aggregation_cmd( + self, session: Optional[ClientSession], explicit_session: bool + ) -> CommandCursor: """Run the full aggregation pipeline for this ChangeStream and return the corresponding CommandCursor. """ @@ -247,7 +261,7 @@ def _create_cursor(self): with self._client._tmp_session(self._session, close=False) as s: return self._run_aggregation_cmd(session=s, explicit_session=self._session is not None) - def _resume(self): + def _resume(self) -> None: """Reestablish this change stream after a resumable error.""" try: self._cursor.close() @@ -437,12 +451,14 @@ class CollectionChangeStream(ChangeStream, Generic[_DocumentType]): .. versionadded:: 3.7 """ + _target: Collection[_DocumentType] + @property - def _aggregation_command_class(self): + def _aggregation_command_class(self) -> Type[_CollectionAggregationCommand]: return _CollectionAggregationCommand @property - def _client(self): + def _client(self) -> MongoClient: return self._target.database.client @@ -455,12 +471,14 @@ class DatabaseChangeStream(ChangeStream, Generic[_DocumentType]): .. versionadded:: 3.7 """ + _target: Database[_DocumentType] + @property - def _aggregation_command_class(self): + def _aggregation_command_class(self) -> Type[_DatabaseAggregationCommand]: return _DatabaseAggregationCommand @property - def _client(self): + def _client(self) -> MongoClient: return self._target.client @@ -473,7 +491,7 @@ class ClusterChangeStream(DatabaseChangeStream, Generic[_DocumentType]): .. versionadded:: 3.7 """ - def _change_stream_options(self): + def _change_stream_options(self) -> Dict[str, Any]: options = super()._change_stream_options() options["allChangesForCluster"] = True return options From 70666a65cc5ccf4c0f707fd5e6c121a6c0866582 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:15:38 -0700 Subject: [PATCH 0438/1588] PYTHON-3766 add types to ocsp_cache.py (#1261) --- pymongo/ocsp_cache.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py index f6ac4bb08c..b0ac4d654f 100644 --- a/pymongo/ocsp_cache.py +++ b/pymongo/ocsp_cache.py @@ -14,12 +14,18 @@ """Utilities for caching OCSP responses.""" +from __future__ import annotations + from collections import namedtuple from datetime import datetime as _datetime from datetime import timezone +from typing import TYPE_CHECKING from pymongo.lock import _create_lock +if TYPE_CHECKING: + from cryptography.x509.ocsp import OCSPRequest, OCSPResponse + class _OCSPCache: """A cache for OCSP responses.""" @@ -34,7 +40,7 @@ def __init__(self): # Hold this lock when accessing _data. self._lock = _create_lock() - def _get_cache_key(self, ocsp_request): + def _get_cache_key(self, ocsp_request: OCSPRequest) -> CACHE_KEY_TYPE: return self.CACHE_KEY_TYPE( hash_algorithm=ocsp_request.hash_algorithm.name.lower(), issuer_name_hash=ocsp_request.issuer_name_hash, @@ -42,7 +48,7 @@ def _get_cache_key(self, ocsp_request): serial_number=ocsp_request.serial_number, ) - def __setitem__(self, key, value): + def __setitem__(self, key: OCSPRequest, value: OCSPResponse) -> None: """Add/update a cache entry. 'key' is of type cryptography.x509.ocsp.OCSPRequest @@ -74,7 +80,7 @@ def __setitem__(self, key, value): if cached_value is None or cached_value.next_update < value.next_update: self._data[cache_key] = value - def __getitem__(self, item): + def __getitem__(self, item: OCSPRequest) -> OCSPResponse: """Get a cache entry if it exists. 'item' is of type cryptography.x509.ocsp.OCSPRequest From 386f6d8b7f6979d1360ef4dad7964ec18c8a4fea Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:15:58 -0700 Subject: [PATCH 0439/1588] PYTHON-3765 add types to server_api.py (#1260) --- pymongo/server_api.py | 15 ++++++++++----- test/test_versioned_api.py | 4 ++-- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/pymongo/server_api.py b/pymongo/server_api.py index 2393615032..47812818de 100644 --- a/pymongo/server_api.py +++ b/pymongo/server_api.py @@ -83,6 +83,9 @@ Classes ======= """ +from __future__ import annotations + +from typing import Any, MutableMapping, Optional class ServerApiVersion: @@ -98,7 +101,9 @@ class ServerApiVersion: class ServerApi: """MongoDB Stable API.""" - def __init__(self, version, strict=None, deprecation_errors=None): + def __init__( + self, version: str, strict: Optional[bool] = None, deprecation_errors: Optional[bool] = None + ): """Options to configure MongoDB Stable API. :Parameters: @@ -129,7 +134,7 @@ def __init__(self, version, strict=None, deprecation_errors=None): self._deprecation_errors = deprecation_errors @property - def version(self): + def version(self) -> str: """The API version setting. This value is sent to the server in the "apiVersion" field. @@ -137,7 +142,7 @@ def version(self): return self._version @property - def strict(self): + def strict(self) -> Optional[bool]: """The API strict mode setting. When set, this value is sent to the server in the "apiStrict" field. @@ -145,7 +150,7 @@ def strict(self): return self._strict @property - def deprecation_errors(self): + def deprecation_errors(self) -> Optional[bool]: """The API deprecation errors setting. When set, this value is sent to the server in the @@ -154,7 +159,7 @@ def deprecation_errors(self): return self._deprecation_errors -def _add_to_command(cmd, server_api): +def _add_to_command(cmd: MutableMapping[str, Any], server_api: Optional[ServerApi]) -> None: """Internal helper which adds API versioning options to a command. :Parameters: diff --git a/test/test_versioned_api.py b/test/test_versioned_api.py index 7dbf2c867d..3372c1a919 100644 --- a/test/test_versioned_api.py +++ b/test/test_versioned_api.py @@ -56,9 +56,9 @@ def test_server_api_validation(self): with self.assertRaises(ValueError): ServerApi("2") with self.assertRaises(TypeError): - ServerApi("1", strict="not-a-bool") + ServerApi("1", strict="not-a-bool") # type: ignore[arg-type] with self.assertRaises(TypeError): - ServerApi("1", deprecation_errors="not-a-bool") + ServerApi("1", deprecation_errors="not-a-bool") # type: ignore[arg-type] with self.assertRaises(TypeError): MongoClient(server_api="not-a-ServerApi") From 5c3cfa784830d5a06e129c5d6e074c1799a44381 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:16:20 -0700 Subject: [PATCH 0440/1588] PYTHON-3764 add types to settings.py (#1259) --- pymongo/settings.py | 79 ++++++++++++++++++++++--------------------- test/test_topology.py | 6 ++-- 2 files changed, 43 insertions(+), 42 deletions(-) diff --git a/pymongo/settings.py b/pymongo/settings.py index 5d6ddefd36..3436fcad6b 100644 --- a/pymongo/settings.py +++ b/pymongo/settings.py @@ -16,34 +16,35 @@ import threading import traceback +from typing import Any, Collection, Dict, Optional, Tuple, Type, Union from bson.objectid import ObjectId from pymongo import common, monitor, pool from pymongo.common import LOCAL_THRESHOLD_MS, SERVER_SELECTION_TIMEOUT from pymongo.errors import ConfigurationError -from pymongo.pool import PoolOptions +from pymongo.pool import Pool, PoolOptions from pymongo.server_description import ServerDescription -from pymongo.topology_description import TOPOLOGY_TYPE +from pymongo.topology_description import TOPOLOGY_TYPE, _ServerSelector class TopologySettings: def __init__( self, - seeds=None, - replica_set_name=None, - pool_class=None, - pool_options=None, - monitor_class=None, - condition_class=None, - local_threshold_ms=LOCAL_THRESHOLD_MS, - server_selection_timeout=SERVER_SELECTION_TIMEOUT, - heartbeat_frequency=common.HEARTBEAT_FREQUENCY, - server_selector=None, - fqdn=None, - direct_connection=False, - load_balanced=None, - srv_service_name=common.SRV_SERVICE_NAME, - srv_max_hosts=0, + seeds: Optional[Collection[Tuple[str, int]]] = None, + replica_set_name: Optional[str] = None, + pool_class: Optional[Type[Pool]] = None, + pool_options: Optional[PoolOptions] = None, + monitor_class: Optional[Type[monitor.Monitor]] = None, + condition_class: Optional[Type[threading.Condition]] = None, + local_threshold_ms: int = LOCAL_THRESHOLD_MS, + server_selection_timeout: int = SERVER_SELECTION_TIMEOUT, + heartbeat_frequency: int = common.HEARTBEAT_FREQUENCY, + server_selector: Optional[_ServerSelector] = None, + fqdn: Optional[str] = None, + direct_connection: Optional[bool] = False, + load_balanced: Optional[bool] = None, + srv_service_name: str = common.SRV_SERVICE_NAME, + srv_max_hosts: int = 0, ): """Represent MongoClient's configuration. @@ -55,12 +56,12 @@ def __init__( % (common.MIN_HEARTBEAT_INTERVAL * 1000,) ) - self._seeds = seeds or [("localhost", 27017)] + self._seeds: Collection[Tuple[str, int]] = seeds or [("localhost", 27017)] self._replica_set_name = replica_set_name - self._pool_class = pool_class or pool.Pool - self._pool_options = pool_options or PoolOptions() - self._monitor_class = monitor_class or monitor.Monitor - self._condition_class = condition_class or threading.Condition + self._pool_class: Type[Pool] = pool_class or pool.Pool + self._pool_options: PoolOptions = pool_options or PoolOptions() + self._monitor_class: Type[monitor.Monitor] = monitor_class or monitor.Monitor + self._condition_class: Type[threading.Condition] = condition_class or threading.Condition self._local_threshold_ms = local_threshold_ms self._server_selection_timeout = server_selection_timeout self._server_selector = server_selector @@ -77,52 +78,52 @@ def __init__( self._stack = "".join(traceback.format_stack()) @property - def seeds(self): + def seeds(self) -> Collection[Tuple[str, int]]: """List of server addresses.""" return self._seeds @property - def replica_set_name(self): + def replica_set_name(self) -> Optional[str]: return self._replica_set_name @property - def pool_class(self): + def pool_class(self) -> Type[Pool]: return self._pool_class @property - def pool_options(self): + def pool_options(self) -> PoolOptions: return self._pool_options @property - def monitor_class(self): + def monitor_class(self) -> Optional[Type[monitor.Monitor]]: return self._monitor_class @property - def condition_class(self): + def condition_class(self) -> Optional[Type[threading.Condition]]: return self._condition_class @property - def local_threshold_ms(self): + def local_threshold_ms(self) -> int: return self._local_threshold_ms @property - def server_selection_timeout(self): + def server_selection_timeout(self) -> int: return self._server_selection_timeout @property - def server_selector(self): + def server_selector(self) -> Optional[_ServerSelector]: return self._server_selector @property - def heartbeat_frequency(self): + def heartbeat_frequency(self) -> int: return self._heartbeat_frequency @property - def fqdn(self): + def fqdn(self) -> Optional[str]: return self._fqdn @property - def direct(self): + def direct(self) -> Optional[bool]: """Connect directly to a single server, or use a set of servers? True if there is one seed and no replica_set_name. @@ -130,21 +131,21 @@ def direct(self): return self._direct @property - def load_balanced(self): + def load_balanced(self) -> Optional[bool]: """True if the client was configured to connect to a load balancer.""" return self._load_balanced @property - def srv_service_name(self): + def srv_service_name(self) -> str: """The srvServiceName.""" return self._srv_service_name @property - def srv_max_hosts(self): + def srv_max_hosts(self) -> int: """The srvMaxHosts.""" return self._srv_max_hosts - def get_topology_type(self): + def get_topology_type(self) -> int: if self.load_balanced: return TOPOLOGY_TYPE.LoadBalanced elif self.direct: @@ -154,6 +155,6 @@ def get_topology_type(self): else: return TOPOLOGY_TYPE.Unknown - def get_server_descriptions(self): + def get_server_descriptions(self) -> Dict[Union[Tuple[str, int], Any], ServerDescription]: """Initial dict of (address, ServerDescription) for all seeds.""" return {address: ServerDescription(address) for address in self.seeds} diff --git a/test/test_topology.py b/test/test_topology.py index adbf19f571..a7bfeb766e 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -52,7 +52,7 @@ def create_mock_topology( topology_settings = TopologySettings( partitioned_seeds, replica_set_name=replica_set_name, - pool_class=MockPool, + pool_class=MockPool, # type: ignore[arg-type] monitor_class=monitor_class, direct_connection=direct_connection, ) @@ -451,7 +451,7 @@ def test_discover_set_name_from_primary(self): # Discovering a replica set without the setName supplied by the user # is not yet supported by MongoClient, but Topology can do it. topology_settings = SetNameDiscoverySettings( - seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor + seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor # type: ignore[arg-type] ) t = Topology(topology_settings) @@ -479,7 +479,7 @@ def test_discover_set_name_from_secondary(self): # Discovering a replica set without the setName supplied by the user # is not yet supported by MongoClient, but Topology can do it. topology_settings = SetNameDiscoverySettings( - seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor + seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor # type: ignore[arg-type] ) t = Topology(topology_settings) From de61d1ac8f6a812d42778b6c5b5745bfda6d0e69 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:17:47 -0700 Subject: [PATCH 0441/1588] PYTHON-3752 add types to bulk.py (#1250) --- pymongo/bulk.py | 180 +++++++++++++++++++++++++++++------------- pymongo/operations.py | 26 ++++-- 2 files changed, 145 insertions(+), 61 deletions(-) diff --git a/pymongo/bulk.py b/pymongo/bulk.py index b0f61b9f9f..49c355e34f 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -16,16 +16,30 @@ .. versionadded:: 2.7 """ +from __future__ import annotations + import copy +from collections.abc import MutableMapping from itertools import islice -from typing import Any, NoReturn +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterator, + List, + Mapping, + NoReturn, + Optional, + Tuple, + Type, + Union, +) from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument from bson.son import SON from pymongo import _csot, common -from pymongo.client_session import _validate_session_write_concern -from pymongo.collation import validate_collation_or_none +from pymongo.client_session import ClientSession, _validate_session_write_concern from pymongo.common import ( validate_is_document_type, validate_ok_for_replace, @@ -49,28 +63,34 @@ from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern -_DELETE_ALL = 0 -_DELETE_ONE = 1 +if TYPE_CHECKING: + from pymongo.collection import Collection + from pymongo.operations import _IndexKeyHint + from pymongo.pool import SocketInfo + from pymongo.typings import _DocumentType + +_DELETE_ALL: int = 0 +_DELETE_ONE: int = 1 # For backwards compatibility. See MongoDB src/mongo/base/error_codes.err -_BAD_VALUE = 2 -_UNKNOWN_ERROR = 8 -_WRITE_CONCERN_ERROR = 64 +_BAD_VALUE: int = 2 +_UNKNOWN_ERROR: int = 8 +_WRITE_CONCERN_ERROR: int = 64 -_COMMANDS = ("insert", "update", "delete") +_COMMANDS: Tuple[str, str, str] = ("insert", "update", "delete") class _Run: """Represents a batch of write operations.""" - def __init__(self, op_type): + def __init__(self, op_type: int) -> None: """Initialize a new Run object.""" - self.op_type = op_type - self.index_map = [] - self.ops = [] - self.idx_offset = 0 + self.op_type: int = op_type + self.index_map: List[int] = [] + self.ops: List[Any] = [] + self.idx_offset: int = 0 - def index(self, idx): + def index(self, idx: int) -> int: """Get the original index of an operation in this run. :Parameters: @@ -78,7 +98,7 @@ def index(self, idx): """ return self.index_map[idx] - def add(self, original_index, operation): + def add(self, original_index: int, operation: Any) -> None: """Add an operation to this Run instance. :Parameters: @@ -90,7 +110,12 @@ def add(self, original_index, operation): self.ops.append(operation) -def _merge_command(run, full_result, offset, result): +def _merge_command( + run: _Run, + full_result: MutableMapping[str, Any], + offset: int, + result: Mapping[str, Any], +) -> None: """Merge a write command result into the full bulk result.""" affected = result.get("n", 0) @@ -129,7 +154,7 @@ def _merge_command(run, full_result, offset, result): full_result["writeConcernErrors"].append(wce) -def _raise_bulk_write_error(full_result: Any) -> NoReturn: +def _raise_bulk_write_error(full_result: Mapping[str, Any]) -> NoReturn: """Raise a BulkWriteError from the full bulk api result.""" if full_result["writeErrors"]: full_result["writeErrors"].sort(key=lambda error: error["index"]) @@ -139,7 +164,14 @@ def _raise_bulk_write_error(full_result: Any) -> NoReturn: class _Bulk: """The private guts of the bulk write API.""" - def __init__(self, collection, ordered, bypass_document_validation, comment=None, let=None): + def __init__( + self, + collection: Collection[_DocumentType], + ordered: bool, + bypass_document_validation: bool, + comment: Optional[str] = None, + let: Optional[Any] = None, + ) -> None: """Initialize a _Bulk instance.""" self.collection = collection.with_options( codec_options=collection.codec_options._replace( @@ -149,9 +181,9 @@ def __init__(self, collection, ordered, bypass_document_validation, comment=None self.let = let if self.let is not None: common.validate_is_document_type("let", self.let) - self.comment = comment + self.comment: Optional[str] = comment self.ordered = ordered - self.ops = [] + self.ops: List[Tuple[int, Mapping[str, Any]]] = [] self.executed = False self.bypass_doc_val = bypass_document_validation self.uses_collation = False @@ -166,14 +198,14 @@ def __init__(self, collection, ordered, bypass_document_validation, comment=None self.next_run = None @property - def bulk_ctx_class(self): + def bulk_ctx_class(self) -> Type[_BulkWriteContext]: encrypter = self.collection.database.client._encrypter if encrypter and not encrypter._bypass_auto_encryption: return _EncryptedBulkWriteContext else: return _BulkWriteContext - def add_insert(self, document): + def add_insert(self, document: MutableMapping[str, Any]) -> None: """Add an insert document to the list of ops.""" validate_is_document_type("document", document) # Generate ObjectId client side. @@ -183,18 +215,22 @@ def add_insert(self, document): def add_update( self, - selector, - update, - multi=False, - upsert=False, - collation=None, - array_filters=None, - hint=None, - ): + selector: Mapping[str, Any], + update: Union[ + Mapping[str, Any], + List[Mapping[str, Any]], + ], + multi: bool = False, + upsert: bool = False, + collation: Optional[Mapping[str, Any]] = None, + array_filters: Optional[List[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create an update document and add it to the list of ops.""" validate_ok_for_update(update) - cmd = SON([("q", selector), ("u", update), ("multi", multi), ("upsert", upsert)]) - collation = validate_collation_or_none(collation) + cmd: Dict[str, Any] = dict( + [("q", selector), ("u", update), ("multi", multi), ("upsert", upsert)] + ) if collation is not None: self.uses_collation = True cmd["collation"] = collation @@ -209,11 +245,17 @@ def add_update( self.is_retryable = False self.ops.append((_UPDATE, cmd)) - def add_replace(self, selector, replacement, upsert=False, collation=None, hint=None): + def add_replace( + self, + selector: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: bool = False, + collation: Optional[Mapping[str, Any]] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create a replace document and add it to the list of ops.""" validate_ok_for_replace(replacement) cmd = SON([("q", selector), ("u", replacement), ("multi", False), ("upsert", upsert)]) - collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd["collation"] = collation @@ -222,10 +264,15 @@ def add_replace(self, selector, replacement, upsert=False, collation=None, hint= cmd["hint"] = hint self.ops.append((_UPDATE, cmd)) - def add_delete(self, selector, limit, collation=None, hint=None): + def add_delete( + self, + selector: Mapping[str, Any], + limit: int, + collation: Optional[Mapping[str, Any]] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create a delete document and add it to the list of ops.""" cmd = SON([("q", selector), ("limit", limit)]) - collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd["collation"] = collation @@ -237,7 +284,7 @@ def add_delete(self, selector, limit, collation=None, hint=None): self.is_retryable = False self.ops.append((_DELETE, cmd)) - def gen_ordered(self): + def gen_ordered(self) -> Iterator[Optional[_Run]]: """Generate batches of operations, batched by type of operation, in the order **provided**. """ @@ -251,7 +298,7 @@ def gen_ordered(self): run.add(idx, operation) yield run - def gen_unordered(self): + def gen_unordered(self) -> Iterator[_Run]: """Generate batches of operations, batched by type of operation, in arbitrary order. """ @@ -265,15 +312,15 @@ def gen_unordered(self): def _execute_command( self, - generator, - write_concern, - session, - sock_info, - op_id, - retryable, - full_result, - final_write_concern=None, - ): + generator: Iterator[Any], + write_concern: WriteConcern, + session: Optional[ClientSession], + sock_info: SocketInfo, + op_id: int, + retryable: bool, + full_result: MutableMapping[str, Any], + final_write_concern: Optional[WriteConcern] = None, + ) -> None: db_name = self.collection.database.name client = self.collection.database.client listeners = client._event_listeners @@ -366,7 +413,12 @@ def _execute_command( # Reset our state self.current_run = run = self.next_run - def execute_command(self, generator, write_concern, session): + def execute_command( + self, + generator: Iterator[Any], + write_concern: WriteConcern, + session: Optional[ClientSession], + ) -> Dict[str, Any]: """Execute using write commands.""" # nModified is only reported for write commands, not legacy ops. full_result = { @@ -381,9 +433,17 @@ def execute_command(self, generator, write_concern, session): } op_id = _randint() - def retryable_bulk(session, sock_info, retryable): + def retryable_bulk( + session: Optional[ClientSession], sock_info: SocketInfo, retryable: bool + ) -> None: self._execute_command( - generator, write_concern, session, sock_info, op_id, retryable, full_result + generator, + write_concern, + session, + sock_info, + op_id, + retryable, + full_result, ) client = self.collection.database.client @@ -394,7 +454,7 @@ def retryable_bulk(session, sock_info, retryable): _raise_bulk_write_error(full_result) return full_result - def execute_op_msg_no_results(self, sock_info, generator): + def execute_op_msg_no_results(self, sock_info: SocketInfo, generator: Iterator[Any]) -> None: """Execute write commands with OP_MSG and w=0 writeConcern, unordered.""" db_name = self.collection.database.name client = self.collection.database.client @@ -433,7 +493,12 @@ def execute_op_msg_no_results(self, sock_info, generator): run.idx_offset += len(to_send) self.current_run = run = next(generator, None) - def execute_command_no_results(self, sock_info, generator, write_concern): + def execute_command_no_results( + self, + sock_info: SocketInfo, + generator: Iterator[Any], + write_concern: WriteConcern, + ) -> None: """Execute write commands with OP_MSG and w=0 WriteConcern, ordered.""" full_result = { "writeErrors": [], @@ -464,7 +529,12 @@ def execute_command_no_results(self, sock_info, generator, write_concern): except OperationFailure: pass - def execute_no_results(self, sock_info, generator, write_concern): + def execute_no_results( + self, + sock_info: SocketInfo, + generator: Iterator[Any], + write_concern: WriteConcern, + ) -> None: """Execute all operations, returning no results (w=0).""" if self.uses_collation: raise ConfigurationError("Collation is unsupported for unacknowledged writes.") @@ -490,7 +560,7 @@ def execute_no_results(self, sock_info, generator, write_concern): return self.execute_command_no_results(sock_info, generator, write_concern) return self.execute_op_msg_no_results(sock_info, generator) - def execute(self, write_concern, session): + def execute(self, write_concern: WriteConcern, session: Optional[ClientSession]) -> Any: """Execute operations.""" if not self.ops: raise InvalidOperation("No operations to execute") diff --git a/pymongo/operations.py b/pymongo/operations.py index ed270c1ca6..fc9dac0fe5 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -101,7 +101,12 @@ def __init__( def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_delete(self._filter, 1, collation=self._collation, hint=self._hint) + bulkobj.add_delete( + self._filter, + 1, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) def __repr__(self): return f"DeleteOne({self._filter!r}, {self._collation!r})" @@ -157,7 +162,12 @@ def __init__( def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_delete(self._filter, 0, collation=self._collation, hint=self._hint) + bulkobj.add_delete( + self._filter, + 0, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) def __repr__(self): return f"DeleteMany({self._filter!r}, {self._collation!r})" @@ -224,12 +234,16 @@ def __init__( def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" bulkobj.add_replace( - self._filter, self._doc, self._upsert, collation=self._collation, hint=self._hint + self._filter, + self._doc, + self._upsert, + collation=validate_collation_or_none(self._collation), + hint=self._hint, ) def __eq__(self, other: Any) -> bool: if type(other) == type(self): - return (other._filter, other._doc, other._upsert, other._collation, other._hint) == ( + return (other._filter, other._doc, other._upsert, other._collation, other._hint,) == ( self._filter, self._doc, self._upsert, @@ -361,7 +375,7 @@ def _add_to_bulk(self, bulkobj): self._doc, False, self._upsert, - collation=self._collation, + collation=validate_collation_or_none(self._collation), array_filters=self._array_filters, hint=self._hint, ) @@ -419,7 +433,7 @@ def _add_to_bulk(self, bulkobj): self._doc, True, self._upsert, - collation=self._collation, + collation=validate_collation_or_none(self._collation), array_filters=self._array_filters, hint=self._hint, ) From ba7be3c1bb71cd13c483ac7c5fa6e12faa0a276d Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 09:24:17 -0700 Subject: [PATCH 0442/1588] PYTHON-3781 fix type for server_selector (#1269) --- pymongo/client_options.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 7e5be69283..91ef51a526 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -15,7 +15,7 @@ """Tools to parse mongo client options.""" from __future__ import annotations -from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, Tuple +from typing import TYPE_CHECKING, Any, Mapping, Optional, Tuple from bson.codec_options import _parse_codec_options from pymongo import common @@ -39,7 +39,7 @@ from bson.codec_options import CodecOptions from pymongo.encryption import AutoEncryptionOpts from pymongo.pyopenssl_context import SSLContext - from pymongo.server_selectors import Selection + from pymongo.topology_description import _ServerSelector def _parse_credentials( @@ -247,7 +247,7 @@ def server_selection_timeout(self) -> int: return self.__server_selection_timeout @property - def server_selector(self) -> Callable[[Selection], Selection]: + def server_selector(self) -> _ServerSelector: return self.__server_selector @property From 12cbeb86ec6f9398e762b7f5645f2cf977489af1 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 09:27:57 -0700 Subject: [PATCH 0443/1588] PYTHON-3784 add types to daemon.py (#1273) --- pymongo/daemon.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/pymongo/daemon.py b/pymongo/daemon.py index 4fdf147a59..643eb58b6e 100644 --- a/pymongo/daemon.py +++ b/pymongo/daemon.py @@ -23,13 +23,14 @@ import subprocess import sys import warnings +from typing import Optional, Sequence # The maximum amount of time to wait for the intermediate subprocess. _WAIT_TIMEOUT = 10 _THIS_FILE = os.path.realpath(__file__) -def _popen_wait(popen, timeout): +def _popen_wait(popen: subprocess.Popen, timeout: Optional[float]) -> Optional[int]: """Implement wait timeout support for Python 3.""" try: return popen.wait(timeout=timeout) @@ -38,7 +39,7 @@ def _popen_wait(popen, timeout): return None -def _silence_resource_warning(popen): +def _silence_resource_warning(popen: Optional[subprocess.Popen]) -> None: """Silence Popen's ResourceWarning. Note this should only be used if the process was created as a daemon. @@ -56,7 +57,7 @@ def _silence_resource_warning(popen): # On Windows we spawn the daemon process simply by using DETACHED_PROCESS. _DETACHED_PROCESS = getattr(subprocess, "DETACHED_PROCESS", 0x00000008) - def _spawn_daemon(args): + def _spawn_daemon(args: Sequence[str]) -> None: """Spawn a daemon process (Windows).""" try: with open(os.devnull, "r+b") as devnull: @@ -87,7 +88,7 @@ def _spawn_daemon(args): # to be safe to call from any thread. Using Popen instead of fork also # avoids triggering the application's os.register_at_fork() callbacks when # we spawn the mongocryptd daemon process. - def _spawn(args): + def _spawn(args: Sequence[str]) -> Optional[subprocess.Popen]: """Spawn the process and silence stdout/stderr.""" try: with open(os.devnull, "r+b") as devnull: @@ -100,8 +101,9 @@ def _spawn(args): RuntimeWarning, stacklevel=2, ) + return None - def _spawn_daemon_double_popen(args): + def _spawn_daemon_double_popen(args: Sequence[str]) -> None: """Spawn a daemon process using a double subprocess.Popen.""" spawner_args = [sys.executable, _THIS_FILE] spawner_args.extend(args) @@ -110,7 +112,7 @@ def _spawn_daemon_double_popen(args): # processes. _popen_wait(temp_proc, _WAIT_TIMEOUT) - def _spawn_daemon(args): + def _spawn_daemon(args: Sequence[str]) -> None: """Spawn a daemon process (Unix).""" # "If Python is unable to retrieve the real path to its executable, # sys.executable will be an empty string or None". From 91711ee366b268d59dc517ca94f67bcb7e948bb8 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 09:28:46 -0700 Subject: [PATCH 0444/1588] PYTHON-3783 add types to compression_support.py (#1272) --- pymongo/compression_support.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index 40bad403f3..030376fbd1 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -11,8 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import warnings +from typing import Any, Iterable, List, Union try: import snappy @@ -45,10 +47,10 @@ _NO_COMPRESSION.update(_SENSITIVE_COMMANDS) -def validate_compressors(dummy, value): +def validate_compressors(dummy: Any, value: Union[str, Iterable[str]]) -> List[str]: try: # `value` is string. - compressors = value.split(",") + compressors = value.split(",") # type: ignore[union-attr] except AttributeError: # `value` is an iterable. compressors = list(value) @@ -78,7 +80,7 @@ def validate_compressors(dummy, value): return compressors -def validate_zlib_compression_level(option, value): +def validate_zlib_compression_level(option: str, value: Any) -> int: try: level = int(value) except Exception: @@ -89,11 +91,13 @@ def validate_zlib_compression_level(option, value): class CompressionSettings: - def __init__(self, compressors, zlib_compression_level): + def __init__(self, compressors: List[str], zlib_compression_level: int): self.compressors = compressors self.zlib_compression_level = zlib_compression_level - def get_compression_context(self, compressors): + def get_compression_context( + self, compressors: List[str] + ) -> Union[SnappyContext, ZlibContext, ZstdContext, None]: if compressors: chosen = compressors[0] if chosen == "snappy": @@ -110,7 +114,7 @@ class SnappyContext: compressor_id = 1 @staticmethod - def compress(data): + def compress(data: bytes) -> bytes: return snappy.compress(data) @@ -128,13 +132,13 @@ class ZstdContext: compressor_id = 3 @staticmethod - def compress(data): + def compress(data: bytes) -> bytes: # ZstdCompressor is not thread safe. # TODO: Use a pool? return ZstdCompressor().compress(data) -def decompress(data, compressor_id): +def decompress(data: bytes, compressor_id: int) -> bytes: if compressor_id == SnappyContext.compressor_id: # python-snappy doesn't support the buffer interface. # https://github.com/andrix/python-snappy/issues/65 From 7d19205540792519fb86dc19674306219c8ebbf0 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 09:29:32 -0700 Subject: [PATCH 0445/1588] PYTHON-3782 add types to lock.py (#1271) --- pymongo/lock.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/lock.py b/pymongo/lock.py index b7c01f56b7..741876afcb 100644 --- a/pymongo/lock.py +++ b/pymongo/lock.py @@ -22,7 +22,7 @@ _forkable_locks: weakref.WeakSet = weakref.WeakSet() -def _create_lock(): +def _create_lock() -> threading.Lock: """Represents a lock that is tracked upon instantiation using a WeakSet and reset by pymongo upon forking. """ From ebba342aaa396f530056c25445f49af1319ad047 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 09:48:57 -0700 Subject: [PATCH 0446/1588] PYTHON-3787 add types to max_staleness_selectors.py (#1276) --- pymongo/max_staleness_selectors.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/pymongo/max_staleness_selectors.py b/pymongo/max_staleness_selectors.py index 28b0bb615e..2b7a7cc3c8 100644 --- a/pymongo/max_staleness_selectors.py +++ b/pymongo/max_staleness_selectors.py @@ -26,17 +26,22 @@ where "SMax" is the secondary with the greatest lastWriteDate. """ +from __future__ import annotations + +from typing import TYPE_CHECKING from pymongo.errors import ConfigurationError from pymongo.server_type import SERVER_TYPE +if TYPE_CHECKING: + from pymongo.server_selectors import Selection # Constant defined in Max Staleness Spec: An idle primary writes a no-op every # 10 seconds to refresh secondaries' lastWriteDate values. IDLE_WRITE_PERIOD = 10 SMALLEST_MAX_STALENESS = 90 -def _validate_max_staleness(max_staleness, heartbeat_frequency): +def _validate_max_staleness(max_staleness: int, heartbeat_frequency: int) -> None: # We checked for max staleness -1 before this, it must be positive here. if max_staleness < heartbeat_frequency + IDLE_WRITE_PERIOD: raise ConfigurationError( @@ -53,7 +58,7 @@ def _validate_max_staleness(max_staleness, heartbeat_frequency): ) -def _with_primary(max_staleness, selection): +def _with_primary(max_staleness: int, selection: Selection) -> Selection: """Apply max_staleness, in seconds, to a Selection with a known primary.""" primary = selection.primary sds = [] @@ -75,7 +80,7 @@ def _with_primary(max_staleness, selection): return selection.with_server_descriptions(sds) -def _no_primary(max_staleness, selection): +def _no_primary(max_staleness: int, selection: Selection) -> Selection: """Apply max_staleness, in seconds, to a Selection with no known primary.""" # Secondary that's replicated the most recent writes. smax = selection.secondary_with_max_last_write_date() @@ -98,7 +103,7 @@ def _no_primary(max_staleness, selection): return selection.with_server_descriptions(sds) -def select(max_staleness, selection): +def select(max_staleness: int, selection: Selection) -> Selection: """Apply max_staleness, in seconds, to a Selection.""" if max_staleness == -1: return selection From 2c563f128d71d612136ed50e3d91c15cdf6c25af Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 10:47:17 -0700 Subject: [PATCH 0447/1588] PYTHON-3785 add types to response.py (#1274) --- pymongo/response.py | 47 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 36 insertions(+), 11 deletions(-) diff --git a/pymongo/response.py b/pymongo/response.py index fc01b0f1bf..bd4795bfb0 100644 --- a/pymongo/response.py +++ b/pymongo/response.py @@ -13,12 +13,30 @@ # limitations under the License. """Represent a response from the server.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union + +if TYPE_CHECKING: + from datetime import timedelta + + from pymongo.message import _OpMsg, _OpReply + from pymongo.pool import SocketInfo + from pymongo.typings import _Address class Response: __slots__ = ("_data", "_address", "_request_id", "_duration", "_from_command", "_docs") - def __init__(self, data, address, request_id, duration, from_command, docs): + def __init__( + self, + data: Union[_OpMsg, _OpReply], + address: _Address, + request_id: int, + duration: Optional[timedelta], + from_command: bool, + docs: List[Mapping[str, Any]], + ): """Represent a response from the server. :Parameters: @@ -36,32 +54,32 @@ def __init__(self, data, address, request_id, duration, from_command, docs): self._docs = docs @property - def data(self): + def data(self) -> Union[_OpMsg, _OpReply]: """Server response's raw BSON bytes.""" return self._data @property - def address(self): + def address(self) -> _Address: """(host, port) of the source server.""" return self._address @property - def request_id(self): + def request_id(self) -> int: """The request id of this operation.""" return self._request_id @property - def duration(self): + def duration(self) -> Optional[timedelta]: """The duration of the operation.""" return self._duration @property - def from_command(self): + def from_command(self) -> bool: """If the response is a result from a db command.""" return self._from_command @property - def docs(self): + def docs(self) -> List[Mapping[str, Any]]: """The decoded document(s).""" return self._docs @@ -70,7 +88,15 @@ class PinnedResponse(Response): __slots__ = ("_socket_info", "_more_to_come") def __init__( - self, data, address, socket_info, request_id, duration, from_command, docs, more_to_come + self, + data: Union[_OpMsg, _OpReply], + address: _Address, + socket_info: SocketInfo, + request_id: int, + duration: Optional[timedelta], + from_command: bool, + docs: List[Mapping[str, Any]], + more_to_come: bool, ): """Represent a response to an exhaust cursor's initial query. @@ -78,7 +104,6 @@ def __init__( - `data`: A network response message. - `address`: (host, port) of the source server. - `socket_info`: The SocketInfo used for the initial query. - - `pool`: The Pool from which the SocketInfo came. - `request_id`: The request id of this operation. - `duration`: The duration of the operation. - `from_command`: If the response is the result of a db command. @@ -91,7 +116,7 @@ def __init__( self._more_to_come = more_to_come @property - def socket_info(self): + def socket_info(self) -> SocketInfo: """The SocketInfo used for the initial query. The server will send batches on this socket, without waiting for @@ -101,7 +126,7 @@ def socket_info(self): return self._socket_info @property - def more_to_come(self): + def more_to_come(self) -> bool: """If true, server is ready to send batches on the socket until the result set is exhausted or there is an error. """ From 2a75a181987e5083e2643b14fc7417c0d4d9bbe5 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 10:48:04 -0700 Subject: [PATCH 0448/1588] PYTHON-3788 add types to server_selectors.py (#1278) --- pymongo/server_selectors.py | 40 ++++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/pymongo/server_selectors.py b/pymongo/server_selectors.py index aa9d26b5fb..9a67015575 100644 --- a/pymongo/server_selectors.py +++ b/pymongo/server_selectors.py @@ -13,15 +13,25 @@ # permissions and limitations under the License. """Criteria to select some ServerDescriptions from a TopologyDescription.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, List, Optional, TypeVar from pymongo.server_type import SERVER_TYPE +if TYPE_CHECKING: + from pymongo.server_description import ServerDescription + from pymongo.topology_description import TopologyDescription + + +T = TypeVar("T") + class Selection: """Input or output of a server selector function.""" @classmethod - def from_topology_description(cls, topology_description): + def from_topology_description(cls, topology_description: TopologyDescription) -> Selection: known_servers = topology_description.known_servers primary = None for sd in known_servers: @@ -36,54 +46,60 @@ def from_topology_description(cls, topology_description): primary, ) - def __init__(self, topology_description, server_descriptions, common_wire_version, primary): + def __init__( + self, + topology_description: TopologyDescription, + server_descriptions: List[ServerDescription], + common_wire_version: Optional[int], + primary: Optional[ServerDescription], + ): self.topology_description = topology_description self.server_descriptions = server_descriptions self.primary = primary self.common_wire_version = common_wire_version - def with_server_descriptions(self, server_descriptions): + def with_server_descriptions(self, server_descriptions: List[ServerDescription]) -> Selection: return Selection( self.topology_description, server_descriptions, self.common_wire_version, self.primary ) - def secondary_with_max_last_write_date(self): + def secondary_with_max_last_write_date(self) -> Optional[ServerDescription]: secondaries = secondary_server_selector(self) if secondaries.server_descriptions: return max(secondaries.server_descriptions, key=lambda sd: sd.last_write_date) return None @property - def primary_selection(self): + def primary_selection(self) -> Selection: primaries = [self.primary] if self.primary else [] return self.with_server_descriptions(primaries) @property - def heartbeat_frequency(self): + def heartbeat_frequency(self) -> int: return self.topology_description.heartbeat_frequency @property - def topology_type(self): + def topology_type(self) -> int: return self.topology_description.topology_type - def __bool__(self): + def __bool__(self) -> bool: return bool(self.server_descriptions) - def __getitem__(self, item): + def __getitem__(self, item: int) -> ServerDescription: return self.server_descriptions[item] -def any_server_selector(selection): +def any_server_selector(selection: T) -> T: return selection -def readable_server_selector(selection): +def readable_server_selector(selection: Selection) -> Selection: return selection.with_server_descriptions( [s for s in selection.server_descriptions if s.is_readable] ) -def writable_server_selector(selection): +def writable_server_selector(selection: Selection) -> Selection: return selection.with_server_descriptions( [s for s in selection.server_descriptions if s.is_writable] ) From 01dd2f8ce091bd33bc917d977bc2f2cf9da788c7 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 10:49:04 -0700 Subject: [PATCH 0449/1588] PYTHON-3786 add types to srv_resolver.py (#1275) --- pymongo/srv_resolver.py | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index 583de818b0..57c48f1e13 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -13,9 +13,11 @@ # permissions and limitations under the License. """Support for resolving hosts and options from mongodb+srv:// URIs.""" +from __future__ import annotations import ipaddress import random +from typing import Any, List, Optional, Tuple, Union try: from dns import resolver @@ -30,14 +32,14 @@ # dnspython can return bytes or str from various parts # of its API depending on version. We always want str. -def maybe_decode(text): +def maybe_decode(text: Union[str, bytes]) -> str: if isinstance(text, bytes): return text.decode() return text # PYTHON-2667 Lazily call dns.resolver methods for compatibility with eventlet. -def _resolve(*args, **kwargs): +def _resolve(*args: Any, **kwargs: Any) -> resolver.Answer: if hasattr(resolver, "resolve"): # dnspython >= 2 return resolver.resolve(*args, **kwargs) @@ -52,7 +54,13 @@ def _resolve(*args, **kwargs): class _SrvResolver: - def __init__(self, fqdn, connect_timeout, srv_service_name, srv_max_hosts=0): + def __init__( + self, + fqdn: str, + connect_timeout: Optional[float], + srv_service_name: str, + srv_max_hosts: int = 0, + ): self.__fqdn = fqdn self.__srv = srv_service_name self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT @@ -72,7 +80,7 @@ def __init__(self, fqdn, connect_timeout, srv_service_name, srv_max_hosts=0): if self.__slen < 2: raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) - def get_options(self): + def get_options(self) -> Optional[str]: try: results = _resolve(self.__fqdn, "TXT", lifetime=self.__connect_timeout) except (resolver.NoAnswer, resolver.NXDOMAIN): @@ -84,7 +92,7 @@ def get_options(self): raise ConfigurationError("Only one TXT record is supported") return (b"&".join([b"".join(res.strings) for res in results])).decode("utf-8") - def _resolve_uri(self, encapsulate_errors): + def _resolve_uri(self, encapsulate_errors: bool) -> resolver.Answer: try: results = _resolve( "_" + self.__srv + "._tcp." + self.__fqdn, "SRV", lifetime=self.__connect_timeout @@ -97,7 +105,9 @@ def _resolve_uri(self, encapsulate_errors): raise ConfigurationError(str(exc)) return results - def _get_srv_response_and_hosts(self, encapsulate_errors): + def _get_srv_response_and_hosts( + self, encapsulate_errors: bool + ) -> Tuple[resolver.Answer, List[Tuple[str, Any]]]: results = self._resolve_uri(encapsulate_errors) # Construct address tuples @@ -117,10 +127,12 @@ def _get_srv_response_and_hosts(self, encapsulate_errors): nodes = random.sample(nodes, min(self.__srv_max_hosts, len(nodes))) return results, nodes - def get_hosts(self): + def get_hosts(self) -> List[Tuple[str, Any]]: _, nodes = self._get_srv_response_and_hosts(True) return nodes - def get_hosts_and_min_ttl(self): + def get_hosts_and_min_ttl(self) -> Tuple[List[Tuple[str, Any]], int]: results, nodes = self._get_srv_response_and_hosts(False) - return nodes, results.rrset.ttl + rrset = results.rrset + ttl = rrset.ttl if rrset else 0 + return nodes, ttl From d5882075d63fc9209640ebd7e92d60fd3684d6aa Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 28 Jun 2023 10:57:23 -0700 Subject: [PATCH 0450/1588] PYTHON-3789 Use tox for Evergreen mockupdb tests (#1277) --- .evergreen/config.yml | 4 +- .evergreen/run-mockupdb-tests.sh | 18 ------ test/mockupdb/test_handshake.py | 97 ++++++++++++++++---------------- tox.ini | 9 +++ 4 files changed, 60 insertions(+), 68 deletions(-) delete mode 100755 .evergreen/run-mockupdb-tests.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index f3c159a1df..8fa2df2415 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -357,7 +357,9 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} bash ${PROJECT_DIRECTORY}/.evergreen/run-mockupdb-tests.sh + + alias python=${PYTHON_BINARY} + python -m tox -e test-mockupdb "run doctests": - command: shell.exec diff --git a/.evergreen/run-mockupdb-tests.sh b/.evergreen/run-mockupdb-tests.sh deleted file mode 100755 index a76ed6316f..0000000000 --- a/.evergreen/run-mockupdb-tests.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# Must be run from pymongo repo root -set -o xtrace -set -o errexit - -. .evergreen/utils.sh - -${PYTHON_BINARY} setup.py clean - -createvirtualenv ${PYTHON_BINARY} mockuptests -trap "deactivate; rm -rf mockuptests" EXIT HUP - -# Install PyMongo from git clone so mockup-tests don't -# download it from pypi. -python -m pip install . -python -m pip install --upgrade 'https://github.com/ajdavis/mongo-mockup-db/archive/master.zip' -cd ./test/mockupdb -python -m unittest discover -v diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index d3f8922c4c..883d518f5b 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -23,49 +23,6 @@ from pymongo.server_api import ServerApi, ServerApiVersion -def test_hello_with_option(self, protocol, **kwargs): - hello = "ismaster" if isinstance(protocol(), OpQuery) else "hello" - # `db.command("hello"|"ismaster")` commands are the same for primaries and - # secondaries, so we only need one server. - primary = MockupDB() - # Set up a custom handler to save the first request from the driver. - self.handshake_req = None - - def respond(r): - # Only save the very first request from the driver. - if self.handshake_req is None: - self.handshake_req = r - load_balanced_kwargs = {"serviceId": ObjectId()} if kwargs.get("loadBalanced") else {} - return r.reply( - OpMsgReply(minWireVersion=0, maxWireVersion=13, **kwargs, **load_balanced_kwargs) - ) - - primary.autoresponds(respond) - primary.run() - self.addCleanup(primary.stop) - - # We need a special dict because MongoClient uses "server_api" and all - # of the commands use "apiVersion". - k_map = {("apiVersion", "1"): ("server_api", ServerApi(ServerApiVersion.V1))} - client = MongoClient( - "mongodb://" + primary.address_string, - appname="my app", # For _check_handshake_data() - **dict([k_map.get((k, v), (k, v)) for k, v in kwargs.items()]) # type: ignore[arg-type] - ) - - self.addCleanup(client.close) - - # We have an autoresponder luckily, so no need for `go()`. - assert client.db.command(hello) - - # We do this checking here rather than in the autoresponder `respond()` - # because it runs in another Python thread so there are some funky things - # with error handling within that thread, and we want to be able to use - # self.assertRaises(). - self.handshake_req.assert_matches(protocol(hello, **kwargs)) - _check_handshake_data(self.handshake_req) - - def _check_handshake_data(request): assert "client" in request data = request["client"] @@ -79,6 +36,48 @@ def _check_handshake_data(request): class TestHandshake(unittest.TestCase): + def hello_with_option_helper(self, protocol, **kwargs): + hello = "ismaster" if isinstance(protocol(), OpQuery) else "hello" + # `db.command("hello"|"ismaster")` commands are the same for primaries and + # secondaries, so we only need one server. + primary = MockupDB() + # Set up a custom handler to save the first request from the driver. + self.handshake_req = None + + def respond(r): + # Only save the very first request from the driver. + if self.handshake_req is None: + self.handshake_req = r + load_balanced_kwargs = {"serviceId": ObjectId()} if kwargs.get("loadBalanced") else {} + return r.reply( + OpMsgReply(minWireVersion=0, maxWireVersion=13, **kwargs, **load_balanced_kwargs) + ) + + primary.autoresponds(respond) + primary.run() + self.addCleanup(primary.stop) + + # We need a special dict because MongoClient uses "server_api" and all + # of the commands use "apiVersion". + k_map = {("apiVersion", "1"): ("server_api", ServerApi(ServerApiVersion.V1))} + client = MongoClient( + "mongodb://" + primary.address_string, + appname="my app", # For _check_handshake_data() + **dict([k_map.get((k, v), (k, v)) for k, v in kwargs.items()]) # type: ignore[arg-type] + ) + + self.addCleanup(client.close) + + # We have an autoresponder luckily, so no need for `go()`. + assert client.db.command(hello) + + # We do this checking here rather than in the autoresponder `respond()` + # because it runs in another Python thread so there are some funky things + # with error handling within that thread, and we want to be able to use + # self.assertRaises(). + self.handshake_req.assert_matches(protocol(hello, **kwargs)) + _check_handshake_data(self.handshake_req) + def test_client_handshake_data(self): primary, secondary = MockupDB(), MockupDB() for server in primary, secondary: @@ -208,21 +207,21 @@ def test_client_handshake_saslSupportedMechs(self): return def test_handshake_load_balanced(self): - test_hello_with_option(self, OpMsg, loadBalanced=True) + self.hello_with_option_helper(OpMsg, loadBalanced=True) with self.assertRaisesRegex(AssertionError, "does not match"): - test_hello_with_option(self, Command, loadBalanced=True) + self.hello_with_option_helper(Command, loadBalanced=True) def test_handshake_versioned_api(self): - test_hello_with_option(self, OpMsg, apiVersion="1") + self.hello_with_option_helper(OpMsg, apiVersion="1") with self.assertRaisesRegex(AssertionError, "does not match"): - test_hello_with_option(self, Command, apiVersion="1") + self.hello_with_option_helper(Command, apiVersion="1") def test_handshake_not_either(self): # If we don't specify either option then it should be using # OP_QUERY for the initial step of the handshake. - test_hello_with_option(self, Command) + self.hello_with_option_helper(Command) with self.assertRaisesRegex(AssertionError, "does not match"): - test_hello_with_option(self, OpMsg) + self.hello_with_option_helper(OpMsg) def test_handshake_max_wire(self): server = MockupDB() diff --git a/tox.ini b/tox.ini index ba53a2011e..bdabf17700 100644 --- a/tox.ini +++ b/tox.ini @@ -121,3 +121,12 @@ deps = {[testenv:doc]deps} commands = sphinx-build -E -b linkcheck doc ./doc/_build/linkcheck + +[testenv:test-mockupdb] +description = run mockupdb tests +deps = + {[testenv:test]deps} + https://github.com/ajdavis/mongo-mockup-db/archive/master.zip +passenv = * +commands = + python -m pytest -v ./test/mockupdb From 0c727bba856cb1cd44e4609f60605254c4ee86bf Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 11:53:43 -0700 Subject: [PATCH 0451/1588] PYTHON-3791 Fix access to last_write_date (#1279) --- pymongo/max_staleness_selectors.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pymongo/max_staleness_selectors.py b/pymongo/max_staleness_selectors.py index 2b7a7cc3c8..10c136a43e 100644 --- a/pymongo/max_staleness_selectors.py +++ b/pymongo/max_staleness_selectors.py @@ -61,11 +61,13 @@ def _validate_max_staleness(max_staleness: int, heartbeat_frequency: int) -> Non def _with_primary(max_staleness: int, selection: Selection) -> Selection: """Apply max_staleness, in seconds, to a Selection with a known primary.""" primary = selection.primary + assert primary sds = [] for s in selection.server_descriptions: if s.server_type == SERVER_TYPE.RSSecondary: # See max-staleness.rst for explanation of this formula. + assert s.last_write_date and primary.last_write_date staleness = ( (s.last_update_time - s.last_write_date) - (primary.last_update_time - primary.last_write_date) @@ -93,6 +95,7 @@ def _no_primary(max_staleness: int, selection: Selection) -> Selection: for s in selection.server_descriptions: if s.server_type == SERVER_TYPE.RSSecondary: # See max-staleness.rst for explanation of this formula. + assert smax.last_write_date and s.last_write_date staleness = smax.last_write_date - s.last_write_date + selection.heartbeat_frequency if staleness <= max_staleness: From 820823891da58d46f889fc332b0da6b55f11f39a Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 11:54:23 -0700 Subject: [PATCH 0452/1588] PYTHON-3773 add types to client_session.py (#1268) --- pymongo/client_session.py | 137 +++++++++++++++++++++++--------------- 1 file changed, 83 insertions(+), 54 deletions(-) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 08d9f03bb5..d196318664 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -144,9 +144,13 @@ Any, Callable, ContextManager, + List, Mapping, + MutableMapping, NoReturn, Optional, + Tuple, + Type, TypeVar, ) @@ -170,6 +174,12 @@ from pymongo.server_type import SERVER_TYPE from pymongo.write_concern import WriteConcern +if TYPE_CHECKING: + from types import TracebackType + + from pymongo.pool import SocketInfo + from pymongo.server import Server + class SessionOptions: """Options for a new :class:`ClientSession`. @@ -326,7 +336,9 @@ def max_commit_time_ms(self) -> Optional[int]: return self._max_commit_time_ms -def _validate_session_write_concern(session, write_concern): +def _validate_session_write_concern( + session: Optional[ClientSession], write_concern: Optional[WriteConcern] +) -> Optional[ClientSession]: """Validate that an explicit session is not used with an unack'ed write. Returns the session to use for the next operation. @@ -351,13 +363,18 @@ def _validate_session_write_concern(session, write_concern): class _TransactionContext: """Internal transaction context manager for start_transaction.""" - def __init__(self, session): + def __init__(self, session: ClientSession): self.__session = session - def __enter__(self): + def __enter__(self) -> _TransactionContext: return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: if self.__session.in_transaction: if exc_val is None: self.__session.commit_transaction() @@ -377,49 +394,49 @@ class _TxnState: class _Transaction: """Internal class to hold transaction information in a ClientSession.""" - def __init__(self, opts, client): + def __init__(self, opts: Optional[TransactionOptions], client: MongoClient): self.opts = opts self.state = _TxnState.NONE self.sharded = False - self.pinned_address = None - self.sock_mgr = None + self.pinned_address: Optional[Tuple[str, Optional[int]]] = None + self.sock_mgr: Optional[_SocketManager] = None self.recovery_token = None self.attempt = 0 self.client = client - def active(self): + def active(self) -> bool: return self.state in (_TxnState.STARTING, _TxnState.IN_PROGRESS) - def starting(self): + def starting(self) -> bool: return self.state == _TxnState.STARTING @property - def pinned_conn(self): + def pinned_conn(self) -> Optional[SocketInfo]: if self.active() and self.sock_mgr: return self.sock_mgr.sock return None - def pin(self, server, sock_info): + def pin(self, server: Server, sock_info: SocketInfo) -> None: self.sharded = True self.pinned_address = server.description.address if server.description.server_type == SERVER_TYPE.LoadBalancer: sock_info.pin_txn() self.sock_mgr = _SocketManager(sock_info, False) - def unpin(self): + def unpin(self) -> None: self.pinned_address = None if self.sock_mgr: self.sock_mgr.close() self.sock_mgr = None - def reset(self): + def reset(self) -> None: self.unpin() self.state = _TxnState.NONE self.sharded = False self.recovery_token = None self.attempt = 0 - def __del__(self): + def __del__(self) -> None: if self.sock_mgr: # Reuse the cursor closing machinery to return the socket to the # pool soon. @@ -433,7 +450,7 @@ def _reraise_with_unknown_commit(exc: Any) -> NoReturn: raise -def _max_time_expired_error(exc): +def _max_time_expired_error(exc: PyMongoError) -> bool: """Return true if exc is a MaxTimeMSExpired error.""" return isinstance(exc, OperationFailure) and exc.code == 50 @@ -454,7 +471,7 @@ def _max_time_expired_error(exc): _WITH_TRANSACTION_RETRY_TIME_LIMIT = 120 -def _within_time_limit(start_time): +def _within_time_limit(start_time: float) -> bool: """Are we within the with_transaction retry limit?""" return time.monotonic() - start_time < _WITH_TRANSACTION_RETRY_TIME_LIMIT @@ -489,8 +506,8 @@ def __init__( self._client: MongoClient = client self._server_session = server_session self._options = options - self._cluster_time = None - self._operation_time = None + self._cluster_time: Optional[Mapping[str, Any]] = None + self._operation_time: Optional[Timestamp] = None self._snapshot_time = None # Is this an implicitly created session? self._implicit = implicit @@ -503,7 +520,7 @@ def end_session(self) -> None: """ self._end_session(lock=True) - def _end_session(self, lock): + def _end_session(self, lock: bool) -> None: if self._server_session is not None: try: if self.in_transaction: @@ -515,7 +532,7 @@ def _end_session(self, lock): self._client._return_server_session(self._server_session, lock) self._server_session = None - def _check_ended(self): + def _check_ended(self) -> None: if self._server_session is None: raise InvalidOperation("Cannot use ended session") @@ -557,14 +574,14 @@ def operation_time(self) -> Optional[Timestamp]: """ return self._operation_time - def _inherit_option(self, name, val): + def _inherit_option(self, name: str, val: _T) -> _T: """Return the inherited TransactionOption value.""" if val: return val txn_opts = self.options.default_transaction_options - val = txn_opts and getattr(txn_opts, name) - if val: - return val + parent_val = txn_opts and getattr(txn_opts, name) + if parent_val: + return parent_val return getattr(self.client, name) def with_transaction( @@ -814,21 +831,22 @@ def abort_transaction(self) -> None: self._transaction.state = _TxnState.ABORTED self._unpin() - def _finish_transaction_with_retry(self, command_name): + def _finish_transaction_with_retry(self, command_name: str) -> List[Any]: """Run commit or abort with one retry after any retryable error. :Parameters: - `command_name`: Either "commitTransaction" or "abortTransaction". """ - def func(session, sock_info, retryable): + def func(session: ClientSession, sock_info: SocketInfo, retryable: bool) -> List[Any]: return self._finish_transaction(sock_info, command_name) return self._client._retry_internal(True, func, self, None) - def _finish_transaction(self, sock_info, command_name): + def _finish_transaction(self, sock_info: SocketInfo, command_name: str) -> List[Any]: self._transaction.attempt += 1 opts = self._transaction.opts + assert opts wc = opts.write_concern cmd = SON([(command_name, 1)]) if command_name == "commitTransaction": @@ -839,6 +857,7 @@ def _finish_transaction(self, sock_info, command_name): # subsequent commitTransaction commands should be upgraded to use # w:"majority" and set a default value of 10 seconds for wtimeout. if self._transaction.attempt > 1: + assert wc wc_doc = wc.document wc_doc["w"] = "majority" wc_doc.setdefault("wtimeout", 10000) @@ -851,7 +870,7 @@ def _finish_transaction(self, sock_info, command_name): sock_info, cmd, session=self, write_concern=wc, parse_write_concern_error=True ) - def _advance_cluster_time(self, cluster_time): + def _advance_cluster_time(self, cluster_time: Optional[Mapping[str, Any]]) -> None: """Internal cluster time helper.""" if self._cluster_time is None: self._cluster_time = cluster_time @@ -873,7 +892,7 @@ def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: raise ValueError("Invalid cluster_time") self._advance_cluster_time(cluster_time) - def _advance_operation_time(self, operation_time): + def _advance_operation_time(self, operation_time: Optional[Timestamp]) -> None: """Internal operation time helper.""" if self._operation_time is None: self._operation_time = operation_time @@ -893,7 +912,7 @@ def advance_operation_time(self, operation_time: Timestamp) -> None: raise TypeError("operation_time must be an instance of bson.timestamp.Timestamp") self._advance_operation_time(operation_time) - def _process_response(self, reply): + def _process_response(self, reply: Mapping[str, Any]) -> None: """Process a response to a command that was run with this session.""" self._advance_cluster_time(reply.get("$clusterTime")) self._advance_operation_time(reply.get("operationTime")) @@ -922,44 +941,51 @@ def in_transaction(self) -> bool: return self._transaction.active() @property - def _starting_transaction(self): + def _starting_transaction(self) -> bool: """True if this session is starting a multi-statement transaction.""" return self._transaction.starting() @property - def _pinned_address(self): + def _pinned_address(self) -> Optional[Tuple[str, Optional[int]]]: """The mongos address this transaction was created on.""" if self._transaction.active(): return self._transaction.pinned_address return None @property - def _pinned_connection(self): + def _pinned_connection(self) -> Optional[SocketInfo]: """The connection this transaction was started on.""" return self._transaction.pinned_conn - def _pin(self, server, sock_info): + def _pin(self, server: Server, sock_info: SocketInfo) -> None: """Pin this session to the given Server or to the given connection.""" self._transaction.pin(server, sock_info) - def _unpin(self): + def _unpin(self) -> None: """Unpin this session from any pinned Server.""" self._transaction.unpin() - def _txn_read_preference(self): + def _txn_read_preference(self) -> Optional[_ServerMode]: """Return read preference of this transaction or None.""" if self.in_transaction: + assert self._transaction.opts return self._transaction.opts.read_preference return None - def _materialize(self): + def _materialize(self) -> None: if isinstance(self._server_session, _EmptyServerSession): old = self._server_session self._server_session = self._client._topology.get_server_session() if old.started_retryable_write: self._server_session.inc_transaction_id() - def _apply_to(self, command, is_retryable, read_preference, sock_info): + def _apply_to( + self, + command: MutableMapping[str, Any], + is_retryable: bool, + read_preference: ReadPreference, + sock_info: SocketInfo, + ) -> None: self._check_ended() self._materialize() if self.options.snapshot: @@ -984,6 +1010,7 @@ def _apply_to(self, command, is_retryable, read_preference, sock_info): self._transaction.state = _TxnState.IN_PROGRESS command["startTransaction"] = True + assert self._transaction.opts if self._transaction.opts.read_concern: rc = self._transaction.opts.read_concern.document if rc: @@ -993,11 +1020,11 @@ def _apply_to(self, command, is_retryable, read_preference, sock_info): command["txnNumber"] = self._server_session.transaction_id command["autocommit"] = False - def _start_retryable_write(self): + def _start_retryable_write(self) -> None: self._check_ended() self._server_session.inc_transaction_id() - def _update_read_concern(self, cmd, sock_info): + def _update_read_concern(self, cmd: MutableMapping[str, Any], sock_info: SocketInfo) -> None: if self.options.causal_consistency and self.operation_time is not None: cmd.setdefault("readConcern", {})["afterClusterTime"] = self.operation_time if self.options.snapshot: @@ -1019,15 +1046,15 @@ def __init__(self): self.dirty = False self.started_retryable_write = False - def mark_dirty(self): + def mark_dirty(self) -> None: self.dirty = True - def inc_transaction_id(self): + def inc_transaction_id(self) -> None: self.started_retryable_write = True class _ServerSession: - def __init__(self, generation): + def __init__(self, generation: int): # Ensure id is type 4, regardless of CodecOptions.uuid_representation. self.session_id = {"id": Binary(uuid.uuid4().bytes, 4)} self.last_use = time.monotonic() @@ -1035,7 +1062,7 @@ def __init__(self, generation): self.dirty = False self.generation = generation - def mark_dirty(self): + def mark_dirty(self) -> None: """Mark this session as dirty. A server session is marked dirty when a command fails with a network @@ -1043,18 +1070,18 @@ def mark_dirty(self): """ self.dirty = True - def timed_out(self, session_timeout_minutes): + def timed_out(self, session_timeout_minutes: float) -> bool: idle_seconds = time.monotonic() - self.last_use # Timed out if we have less than a minute to live. return idle_seconds > (session_timeout_minutes - 1) * 60 @property - def transaction_id(self): + def transaction_id(self) -> Int64: """Positive 64-bit integer.""" return Int64(self._transaction_id) - def inc_transaction_id(self): + def inc_transaction_id(self) -> None: self._transaction_id += 1 @@ -1064,21 +1091,21 @@ class _ServerSessionPool(collections.deque): This class is not thread-safe, access it while holding the Topology lock. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) self.generation = 0 - def reset(self): + def reset(self) -> None: self.generation += 1 self.clear() - def pop_all(self): + def pop_all(self) -> List[_ServerSession]: ids = [] while self: ids.append(self.pop().session_id) return ids - def get_server_session(self, session_timeout_minutes): + def get_server_session(self, session_timeout_minutes: float) -> _ServerSession: # Although the Driver Sessions Spec says we only clear stale sessions # in return_server_session, PyMongo can't take a lock when returning # sessions from a __del__ method (like in Cursor.__die), so it can't @@ -1094,20 +1121,22 @@ def get_server_session(self, session_timeout_minutes): return _ServerSession(self.generation) - def return_server_session(self, server_session, session_timeout_minutes): + def return_server_session( + self, server_session: _ServerSession, session_timeout_minutes: Optional[float] + ) -> None: if session_timeout_minutes is not None: self._clear_stale(session_timeout_minutes) if server_session.timed_out(session_timeout_minutes): return self.return_server_session_no_lock(server_session) - def return_server_session_no_lock(self, server_session): + def return_server_session_no_lock(self, server_session: _ServerSession) -> None: # Discard sessions from an old pool to avoid duplicate sessions in the # child process after a fork. if server_session.generation == self.generation and not server_session.dirty: self.appendleft(server_session) - def _clear_stale(self, session_timeout_minutes): + def _clear_stale(self, session_timeout_minutes: float) -> None: # Clear stale sessions. The least recently used are on the right. while self: if self[-1].timed_out(session_timeout_minutes): From cae124c32c6d1d9e1e32b5731fe23b511e9e45eb Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 28 Jun 2023 14:19:04 -0700 Subject: [PATCH 0453/1588] PYTHON-3588 Expose an API to create a cursor from a command response (#1263) --- .evergreen/resync-specs.sh | 5 +- CONTRIBUTING.rst | 3 +- doc/changelog.rst | 6 + pymongo/command_cursor.py | 21 +- pymongo/database.py | 109 ++- test/run_command/unified/runCommand.json | 635 +++++++++++++ .../run_command/unified/runCursorCommand.json | 877 ++++++++++++++++++ test/test_database.py | 20 + test/test_run_command.py | 17 + test/unified_format.py | 36 +- tox.ini | 2 + 11 files changed, 1723 insertions(+), 8 deletions(-) create mode 100644 test/run_command/unified/runCommand.json create mode 100644 test/run_command/unified/runCursorCommand.json create mode 100644 test/test_run_command.py diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index 817a2d96bc..a74a0125e6 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -56,7 +56,7 @@ cpjson () { cd "$SPECS"/source/$1 find . -name '*.json' | grep -Ev "${BLOCKLIST}" | cpio -pdm \ $PYMONGO/test/$2 - printf "\nIgnored files for ${PWD}\n" + printf "\nIgnored files for ${PWD}:\n" IGNORED_FILES="$(printf "\n%s\n" "$(diff <(find . -name '*.json' | sort) \ <(find . -name '*.json' | grep -Ev "${BLOCKLIST}" | sort))" | \ sed -e '/^[0-9]/d' | sed -e 's|< ./||g' )" @@ -126,6 +126,9 @@ do retryable-writes|retryable_writes) cpjson retryable-writes/tests/ retryable_writes ;; + run-command|run_command) + cpjson run-command/tests/ run_command + ;; sdam|SDAM|server-discovery-and-monitoring|discovery_and_monitoring) cpjson server-discovery-and-monitoring/tests/errors \ discovery_and_monitoring/errors diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index a457b3e4c3..a897d0e067 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -98,7 +98,8 @@ use the script provided in ``.evergreen/resync-specs.sh``.:: git clone git@github.com:mongodb/specifications.git export MDB_SPECS=~/specifications cd ~/mongo-python-driver/.evergreen - ./resync-specs.sh -b "connection-string*" crud bson-corpus + ./resync-specs.sh -b "" spec1 spec2 ... + ./resync-specs.sh -b "connection-string*" crud bson-corpus # Updates crud and bson-corpus specs while ignoring all files with the regex "connection-string*" cd .. The ``-b`` flag adds as a regex pattern to block files you do not wish to diff --git a/doc/changelog.rst b/doc/changelog.rst index eae105b617..b112d3bc08 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,12 @@ Changelog ========= +Changes in Version 4.5 +----------------------- + +- Added :meth:`~pymongo.database.Database.cursor_command` + and :meth:`~pymongo.command_cursor.CommandCursor.try_next` to support executing an arbitrary command that returns a cursor. + Changes in Version 4.4 ----------------------- diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index c831dfb49b..7a2e528680 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -292,7 +292,7 @@ def next(self) -> _DocumentType: __next__ = next - def _try_next(self, get_more_allowed): + def _try_next(self, get_more_allowed: bool) -> Optional[_DocumentType]: """Advance the cursor blocking for at most one getMore command.""" if not len(self.__data) and not self.__killed and get_more_allowed: self._refresh() @@ -301,6 +301,25 @@ def _try_next(self, get_more_allowed): else: return None + def try_next(self) -> Optional[_DocumentType]: + """Advance the cursor without blocking indefinitely. + + This method returns the next document without waiting + indefinitely for data. + + If no document is cached locally then this method runs a single + getMore command. If the getMore yields any documents, the next + document is returned, otherwise, if the getMore returns no documents + (because there is no additional data) then ``None`` is returned. + + :Returns: + The next document or ``None`` when no document is available + after running a single getMore or when the cursor is closed. + + .. versionadded:: 4.5 + """ + return self._try_next(get_more_allowed=True) + def __enter__(self) -> "CommandCursor[_DocumentType]": return self diff --git a/pymongo/database.py b/pymongo/database.py index 1fa9913c60..7829c28fe2 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -42,7 +42,7 @@ from pymongo.collection import Collection from pymongo.command_cursor import CommandCursor from pymongo.common import _ecoc_coll_name, _esc_coll_name -from pymongo.errors import CollectionInvalid, InvalidName +from pymongo.errors import CollectionInvalid, InvalidName, InvalidOperation from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline @@ -833,6 +833,113 @@ def command( **kwargs, ) + @_csot.apply + def cursor_command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[bson.codec_options.CodecOptions[_CodecDocumentType]] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + batch_size: Optional[int] = None, + max_time_ms: Optional[int] = None, + **kwargs: Any, + ) -> CommandCursor: + """Issue a MongoDB command and parse the response as a cursor. + + If the response from the server does not include a cursor field, an error will be thrown. + + Otherwise, behaves identically to issuing a normal MongoDB command. + + :Parameters: + - `command`: document representing the command to be issued, + or the name of the command (for simple commands only). + + .. note:: the order of keys in the `command` document is + significant (the "verb" must come first), so commands + which require multiple keys (e.g. `findandmodify`) + should use an instance of :class:`~bson.son.SON` or + a string and kwargs instead of a Python `dict`. + + - `value` (optional): value to use for the command verb when + `command` is passed as a string + - `check` (optional): check the response for errors, raising + :class:`~pymongo.errors.OperationFailure` if there are any + - `allowable_errors`: if `check` is ``True``, error messages + in this list will be ignored by error-checking + - `read_preference` (optional): The read preference for this + operation. See :mod:`~pymongo.read_preferences` for options. + If the provided `session` is in a transaction, defaults to the + read preference configured for the transaction. + Otherwise, defaults to + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + - `codec_options`: A :class:`~bson.codec_options.CodecOptions` + instance. + - `session` (optional): A + :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + - `**kwargs` (optional): additional keyword arguments will + be added to the command document before it is sent + + + .. note:: :meth:`command` does **not** obey this Database's + :attr:`read_preference` or :attr:`codec_options`. You must use the + ``read_preference`` and ``codec_options`` parameters instead. + + .. note:: :meth:`command` does **not** apply any custom TypeDecoders + when decoding the command response. + + .. note:: If this client has been configured to use MongoDB Stable + API (see :ref:`versioned-api-ref`), then :meth:`command` will + automatically add API versioning options to the given command. + Explicitly adding API versioning options in the command and + declaring an API version on the client is not supported. + + .. seealso:: The MongoDB documentation on `commands `_. + """ + with self.__client._tmp_session(session, close=False) as tmp_session: + opts = codec_options or DEFAULT_CODEC_OPTIONS + + if read_preference is None: + read_preference = ( + tmp_session and tmp_session._txn_read_preference() + ) or ReadPreference.PRIMARY + with self.__client._socket_for_reads(read_preference, tmp_session) as ( + sock_info, + read_preference, + ): + response = self._command( + sock_info, + command, + value, + check, + allowable_errors, + read_preference, + opts, + session=tmp_session, + **kwargs, + ) + coll = self.get_collection("$cmd", read_preference=read_preference) + if response.get("cursor"): + cmd_cursor = CommandCursor( + coll, + response["cursor"], + sock_info.address, + batch_size=batch_size or 0, + max_await_time_ms=max_time_ms, + session=tmp_session, + explicit_session=session is not None, + comment=comment, + ) + cmd_cursor._maybe_pin_connection(sock_info) + return cmd_cursor + else: + raise InvalidOperation("Command does not return a cursor.") + def _retryable_read_command( self, command, diff --git a/test/run_command/unified/runCommand.json b/test/run_command/unified/runCommand.json new file mode 100644 index 0000000000..007e514bd7 --- /dev/null +++ b/test/run_command/unified/runCommand.json @@ -0,0 +1,635 @@ +{ + "description": "runCommand", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + }, + { + "database": { + "id": "dbWithRC", + "client": "client", + "databaseName": "dbWithRC", + "databaseOptions": { + "readConcern": { + "level": "local" + } + } + } + }, + { + "database": { + "id": "dbWithWC", + "client": "client", + "databaseName": "dbWithWC", + "databaseOptions": { + "writeConcern": { + "w": 0 + } + } + } + }, + { + "session": { + "id": "session", + "client": "client" + } + }, + { + "client": { + "id": "clientWithStableApi", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1", + "strict": true + } + } + }, + { + "database": { + "id": "dbWithStableApi", + "client": "clientWithStableApi", + "databaseName": "dbWithStableApi" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [] + } + ], + "tests": [ + { + "description": "always attaches $db and implicit lsid to given command and omits default readPreference", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$db": "db", + "lsid": { + "$$exists": true + }, + "$readPreference": { + "$$exists": false + } + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "always gossips the $clusterTime on the sent command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$clusterTime": { + "$$exists": true + } + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "attaches the provided session lsid to given command", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "session": "session" + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "attaches the provided $readPreference to given command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced", + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "nearest" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "mode": "nearest" + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not attach $readPreference to given command on standalone", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "nearest" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "$$exists": false + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not attach primary $readPreference to given command", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "primary" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "$$exists": false + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not inherit readConcern specified at the db level", + "operations": [ + { + "name": "runCommand", + "object": "dbWithRC", + "arguments": { + "commandName": "aggregate", + "command": { + "aggregate": "collection", + "pipeline": [], + "cursor": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection", + "readConcern": { + "$$exists": false + }, + "$db": "dbWithRC" + }, + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "does not inherit writeConcern specified at the db level", + "operations": [ + { + "name": "runCommand", + "object": "dbWithWC", + "arguments": { + "commandName": "insert", + "command": { + "insert": "collection", + "documents": [ + { + "foo": "bar" + } + ], + "ordered": true + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection", + "writeConcern": { + "$$exists": false + }, + "$db": "dbWithWC" + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "does not retry retryable errors on given command", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "attaches transaction fields to given command", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "sharded-replicaset", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "session": "session", + "commandName": "insert", + "command": { + "insert": "collection", + "documents": [ + { + "foo": "transaction" + } + ], + "ordered": true + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection", + "documents": [ + { + "foo": "transaction" + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "db" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "txnNumber": 1, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "attaches apiVersion fields to given command when stableApi is configured on the client", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "dbWithStableApi", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "clientWithStableApi", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$db": "dbWithStableApi", + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + }, + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/run_command/unified/runCursorCommand.json b/test/run_command/unified/runCursorCommand.json new file mode 100644 index 0000000000..4f1ec8a01a --- /dev/null +++ b/test/run_command/unified/runCursorCommand.json @@ -0,0 +1,877 @@ +{ + "description": "runCursorCommand", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ] + } + }, + { + "session": { + "id": "session", + "client": "client" + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "successfully executes checkMetadataConsistency cursor creating command", + "runOnRequirements": [ + { + "minServerVersion": "7.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "checkMetadataConsistency", + "command": { + "checkMetadataConsistency": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "checkMetadataConsistency": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "checkMetadataConsistency" + } + } + ] + } + ] + }, + { + "description": "errors if the command response is not a cursor", + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "creates an implicit session that is reused across getMores", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "accepts an explicit session that is reused across getMores", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "session": "session", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "returns pinned connections to the pool when the cursor is exhausted", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "session": "session", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 1, + "x": 11 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 2, + "x": 22 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 3, + "x": 33 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 4, + "x": 44 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 5, + "x": 55 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "returns pinned connections to the pool when the cursor is closed", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 1 + } + }, + { + "name": "close", + "object": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 0 + } + } + ] + }, + { + "description": "supports configuring getMore batchSize", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 5, + "command": { + "find": "collection", + "batchSize": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "batchSize": 5, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "supports configuring getMore maxTimeMS", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "maxTimeMS": 300, + "command": { + "find": "collection", + "maxTimeMS": 200, + "batchSize": 1 + } + }, + "ignoreResultAndError": true + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "maxTimeMS": 200, + "batchSize": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "maxTimeMS": 300, + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "supports configuring getMore comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "comment": { + "hello": "getMore" + }, + "command": { + "find": "collection", + "batchSize": 1, + "comment": { + "hello": "find" + } + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 1, + "comment": { + "hello": "find" + }, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "comment": { + "hello": "getMore" + }, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "does not close the cursor when receiving an empty batch", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection", + "capped": true, + "size": 4096, + "max": 3 + }, + "saveResultAsEntity": "cappedCollection" + }, + { + "name": "insertMany", + "object": "cappedCollection", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + }, + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "cursorType": "tailable", + "commandName": "find", + "batchSize": 2, + "command": { + "find": "cappedCollection", + "tailable": true + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "close", + "object": "cursor" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "cappedCollection" + }, + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "cappedCollection" + }, + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "cappedCollection" + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "cappedCollection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "cappedCollection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "cappedCollection", + "cursors": { + "$$type": "array" + } + }, + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/test_database.py b/test/test_database.py index 140d169db3..041b339e6a 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -18,6 +18,8 @@ import sys from typing import Any, Iterable, List, Mapping, Union +from pymongo.command_cursor import CommandCursor + sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest @@ -42,6 +44,7 @@ CollectionInvalid, ExecutionTimeout, InvalidName, + InvalidOperation, OperationFailure, WriteConcernError, ) @@ -407,6 +410,23 @@ def test_command_with_regex(self): for doc in result["cursor"]["firstBatch"]: self.assertTrue(isinstance(doc["r"], Regex)) + def test_cursor_command(self): + db = self.client.pymongo_test + db.test.drop() + + docs = [{"_id": i, "doc": i} for i in range(3)] + db.test.insert_many(docs) + + cursor = db.cursor_command("find", "test") + + self.assertIsInstance(cursor, CommandCursor) + + result_docs = list(cursor) + self.assertEqual(docs, result_docs) + + def test_cursor_command_invalid(self): + self.assertRaises(InvalidOperation, self.db.cursor_command, "usersInfo", "test") + def test_password_digest(self): self.assertRaises(TypeError, auth._password_digest, 5) self.assertRaises(TypeError, auth._password_digest, True) diff --git a/test/test_run_command.py b/test/test_run_command.py new file mode 100644 index 0000000000..848fd2cb92 --- /dev/null +++ b/test/test_run_command.py @@ -0,0 +1,17 @@ +import os +import unittest +from test.unified_format import generate_test_classes + +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "run_command") + + +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index 90cb442b28..72db9e7d47 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -64,10 +64,11 @@ from bson.objectid import ObjectId from bson.regex import RE_TYPE, Regex from gridfs import GridFSBucket, GridOut -from pymongo import ASCENDING, MongoClient, _csot +from pymongo import ASCENDING, CursorType, MongoClient, _csot from pymongo.change_stream import ChangeStream from pymongo.client_session import ClientSession, TransactionOptions, _TxnState from pymongo.collection import Collection +from pymongo.command_cursor import CommandCursor from pymongo.database import Database from pymongo.encryption import ClientEncryption from pymongo.encryption_options import _HAVE_PYMONGOCRYPT @@ -1087,6 +1088,31 @@ def _databaseOperation_runCommand(self, target, **kwargs): kwargs["command"] = ordered_command return target.command(**kwargs) + def _databaseOperation_runCursorCommand(self, target, **kwargs): + return list(self._databaseOperation_createCommandCursor(target, **kwargs)) + + def _databaseOperation_createCommandCursor(self, target, **kwargs): + self.__raise_if_unsupported("createCommandCursor", target, Database) + # Ensure the first key is the command name. + ordered_command = SON([(kwargs.pop("command_name"), 1)]) + ordered_command.update(kwargs["command"]) + kwargs["command"] = ordered_command + + cursor_type = kwargs.pop("cursor_type", "nonTailable") + if cursor_type == CursorType.TAILABLE: + ordered_command["tailable"] = True + elif cursor_type == CursorType.TAILABLE_AWAIT: + ordered_command["tailable"] = True + ordered_command["awaitData"] = True + elif cursor_type != "nonTailable": + self.fail(f"unknown cursorType: {cursor_type}") + + if "maxTimeMS" in kwargs: + kwargs["max_time_ms"] = kwargs["maxTimeMS"] + del kwargs["maxTimeMS"] + + return target.cursor_command(**kwargs) + def _databaseOperation_listCollections(self, target, *args, **kwargs): if "batch_size" in kwargs: kwargs["cursor"] = {"batchSize": kwargs.pop("batch_size")} @@ -1150,7 +1176,9 @@ def _changeStreamOperation_iterateUntilDocumentOrError(self, target, *args, **kw return next(target) def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs): - self.__raise_if_unsupported("iterateUntilDocumentOrError", target, NonLazyCursor) + self.__raise_if_unsupported( + "iterateUntilDocumentOrError", target, NonLazyCursor, CommandCursor + ) while target.alive: try: return next(target) @@ -1159,7 +1187,7 @@ def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs): return None def _cursor_close(self, target, *args, **kwargs): - self.__raise_if_unsupported("close", target, NonLazyCursor) + self.__raise_if_unsupported("close", target, NonLazyCursor, CommandCursor) return target.close() def _clientEncryptionOperation_createDataKey(self, target, *args, **kwargs): @@ -1250,7 +1278,7 @@ def run_entity_operation(self, spec): doc.setdefault("metadata", {})["contentType"] = doc.pop("contentType") elif isinstance(target, ChangeStream): method_name = f"_changeStreamOperation_{opname}" - elif isinstance(target, NonLazyCursor): + elif isinstance(target, (NonLazyCursor, CommandCursor)): method_name = f"_cursor_{opname}" elif isinstance(target, ClientSession): method_name = f"_sessionOperation_{opname}" diff --git a/tox.ini b/tox.ini index bdabf17700..240126f8a5 100644 --- a/tox.ini +++ b/tox.ini @@ -91,6 +91,8 @@ commands = [testenv:typecheck] description = run mypy and pyright to typecheck +extras = + {[testenv:typecheck-mypy]extras} deps = {[testenv:typecheck-mypy]deps} {[testenv:typecheck-pyright]deps} From e56c08afccde4387d3bb24043098ff1626703a78 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 15:25:46 -0700 Subject: [PATCH 0454/1588] PYTHON-3770 add types to auth_oidc.py (#1265) --- pymongo/auth_oidc.py | 42 ++++++++++++++++++++++++++++-------------- pymongo/pool.py | 1 + 2 files changed, 29 insertions(+), 14 deletions(-) diff --git a/pymongo/auth_oidc.py b/pymongo/auth_oidc.py index 543dc0200d..a3afbdb3fe 100644 --- a/pymongo/auth_oidc.py +++ b/pymongo/auth_oidc.py @@ -13,11 +13,13 @@ # limitations under the License. """MONGODB-OIDC Authentication helpers.""" +from __future__ import annotations + import os import threading from dataclasses import dataclass, field from datetime import datetime, timedelta, timezone -from typing import Callable, Dict, List, Optional +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple import bson from bson.binary import Binary @@ -25,6 +27,10 @@ from pymongo.errors import ConfigurationError, OperationFailure from pymongo.helpers import _REAUTHENTICATION_REQUIRED_CODE +if TYPE_CHECKING: + from pymongo.auth import MongoCredential + from pymongo.pool import SocketInfo + @dataclass class _OIDCProperties: @@ -44,7 +50,9 @@ class _OIDCProperties: _CACHE: Dict[str, "_OIDCAuthenticator"] = {} -def _get_authenticator(credentials, address): +def _get_authenticator( + credentials: MongoCredential, address: Tuple[str, int] +) -> _OIDCAuthenticator: # Clear out old items in the cache. now_utc = datetime.now(timezone.utc) to_remove = [] @@ -81,7 +89,7 @@ def _get_authenticator(credentials, address): return _CACHE[cache_key] -def _get_cache_exp(): +def _get_cache_exp() -> datetime: return datetime.now(timezone.utc) + timedelta(minutes=CACHE_TIMEOUT_MINUTES) @@ -98,7 +106,7 @@ class _OIDCAuthenticator: cache_exp_utc: datetime = field(default_factory=_get_cache_exp) lock: threading.Lock = field(default_factory=threading.Lock) - def get_current_token(self, use_callbacks=True): + def get_current_token(self, use_callbacks: bool = True) -> Optional[str]: properties = self.properties request_cb = properties.request_token_callback @@ -116,16 +124,15 @@ def get_current_token(self, use_callbacks=True): current_valid_token = True timeout = CALLBACK_TIMEOUT_SECONDS - if not use_callbacks and not current_valid_token: return None if not current_valid_token and request_cb is not None: - prev_token = self.idp_resp and self.idp_resp["access_token"] + prev_token = self.idp_resp["access_token"] if self.idp_resp else None with self.lock: # See if the token was changed while we were waiting for the # lock. - new_token = self.idp_resp and self.idp_resp["access_token"] + new_token = self.idp_resp["access_token"] if self.idp_resp else None if new_token != prev_token: return new_token @@ -173,14 +180,14 @@ def get_current_token(self, use_callbacks=True): return token - def auth_start_cmd(self, use_callbacks=True): + def auth_start_cmd(self, use_callbacks: bool = True) -> Optional[SON[str, Any]]: properties = self.properties # Handle aws provider credentials. if properties.provider_name == "aws": aws_identity_file = os.environ["AWS_WEB_IDENTITY_TOKEN_FILE"] with open(aws_identity_file) as fid: - token = fid.read().strip() + token: Optional[str] = fid.read().strip() payload = {"jwt": token} cmd = SON( [ @@ -230,14 +237,16 @@ def auth_start_cmd(self, use_callbacks=True): ] ) - def clear(self): + def clear(self) -> None: self.idp_info = None self.idp_resp = None self.token_exp_utc = None - def run_command(self, sock_info, cmd): + def run_command( + self, sock_info: SocketInfo, cmd: Mapping[str, Any] + ) -> Optional[Mapping[str, Any]]: try: - return sock_info.command("$external", cmd, no_reauth=True) + return sock_info.command("$external", cmd, no_reauth=True) # type: ignore[call-arg] except OperationFailure as exc: self.clear() if exc.code == _REAUTHENTICATION_REQUIRED_CODE: @@ -247,7 +256,9 @@ def run_command(self, sock_info, cmd): return self.authenticate(sock_info, reauthenticate=True) raise - def authenticate(self, sock_info, reauthenticate=False): + def authenticate( + self, sock_info: SocketInfo, reauthenticate: bool = False + ) -> Optional[Mapping[str, Any]]: if reauthenticate: prev_id = getattr(sock_info, "oidc_token_gen_id", None) # Check if we've already changed tokens. @@ -264,6 +275,7 @@ def authenticate(self, sock_info, reauthenticate=False): resp = ctx.speculative_authenticate else: cmd = self.auth_start_cmd() + assert cmd is not None resp = self.run_command(sock_info, cmd) if resp["done"]: @@ -293,7 +305,9 @@ def authenticate(self, sock_info, reauthenticate=False): return resp -def _authenticate_oidc(credentials, sock_info, reauthenticate): +def _authenticate_oidc( + credentials: MongoCredential, sock_info: SocketInfo, reauthenticate: bool +) -> Optional[Mapping[str, Any]]: """Authenticate using MONGODB-OIDC.""" authenticator = _get_authenticator(credentials, sock_info.address) return authenticator.authenticate(sock_info, reauthenticate=reauthenticate) diff --git a/pymongo/pool.py b/pymongo/pool.py index 2b498078c2..a827d10f9c 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -647,6 +647,7 @@ def __init__(self, sock, pool, address, id): self.compression_settings = pool.opts._compression_settings self.compression_context = None self.socket_checker = SocketChecker() + self.oidc_token_gen_id = None # Support for mechanism negotiation on the initial handshake. self.negotiated_mechs = None self.auth_ctx = None From 46276439ac702d5654a74a4ac5b039a2fffd4632 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 30 Jun 2023 17:02:36 -0400 Subject: [PATCH 0455/1588] PYTHON-3796 Fix typo in docs and reformat using blacken-docs (#1284) --- doc/examples/encryption.rst | 307 ++++++++++++++++++++---------------- 1 file changed, 174 insertions(+), 133 deletions(-) diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 52fc548285..ecee03180c 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -146,21 +146,19 @@ the client into sending unencrypted data that should be encrypted. JSON Schemas supplied in the ``schema_map`` only apply to configuring automatic client-side field level encryption. Other validation rules in the JSON schema will not be enforced by the driver and -will result in an error.:: +will result in an error. - import os +.. code-block:: python + import os from bson.codec_options import CodecOptions from bson import json_util - from pymongo import MongoClient - from pymongo.encryption import (Algorithm, - ClientEncryption) + from pymongo.encryption import Algorithm, ClientEncryption from pymongo.encryption_options import AutoEncryptionOpts - def create_json_schema_file(kms_providers, key_vault_namespace, - key_vault_client): + def create_json_schema_file(kms_providers, key_vault_namespace, key_vault_client): client_encryption = ClientEncryption( kms_providers, key_vault_namespace, @@ -170,31 +168,33 @@ will result in an error.:: # on MongoClient, Database, or Collection. We will not be calling # encrypt() or decrypt() in this example so we can use any # CodecOptions. - CodecOptions()) + CodecOptions(), + ) # Create a new data key and json schema for the encryptedField. # https://dochub.mongodb.org/core/client-side-field-level-encryption-automatic-encryption-rules data_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['pymongo_encryption_example_1']) + "local", key_alt_names=["pymongo_encryption_example_1"] + ) schema = { "properties": { "encryptedField": { "encrypt": { "keyId": [data_key_id], "bsonType": "string", - "algorithm": - Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + "algorithm": Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, } } }, - "bsonType": "object" + "bsonType": "object", } # Use CANONICAL_JSON_OPTIONS so that other drivers and tools will be # able to parse the MongoDB extended JSON file. json_schema_string = json_util.dumps( - schema, json_options=json_util.CANONICAL_JSON_OPTIONS) + schema, json_options=json_util.CANONICAL_JSON_OPTIONS + ) - with open('jsonSchema.json', 'w') as file: + with open("jsonSchema.json", "w") as file: file.write(json_schema_string) @@ -221,19 +221,20 @@ will result in an error.:: key_vault.create_index( "keyAltNames", unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) - create_json_schema_file( - kms_providers, key_vault_namespace, key_vault_client) + create_json_schema_file(kms_providers, key_vault_namespace, key_vault_client) # Load the JSON Schema and construct the local schema_map option. - with open('jsonSchema.json', 'r') as file: + with open("jsonSchema.json", "r") as file: json_schema_string = file.read() json_schema = json_util.loads(json_schema_string) schema_map = {encrypted_namespace: json_schema} auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, schema_map=schema_map) + kms_providers, key_vault_namespace, schema_map=schema_map + ) client = MongoClient(auto_encryption_opts=auto_encryption_opts) db_name, coll_name = encrypted_namespace.split(".", 1) @@ -242,14 +243,15 @@ will result in an error.:: coll.drop() coll.insert_one({"encryptedField": "123456789"}) - print('Decrypted document: %s' % (coll.find_one(),)) + print("Decrypted document: %s" % (coll.find_one(),)) unencrypted_coll = MongoClient()[db_name][coll_name] - print('Encrypted document: %s' % (unencrypted_coll.find_one(),)) + print("Encrypted document: %s" % (unencrypted_coll.find_one(),)) if __name__ == "__main__": main() + Server-Side Field Level Encryption Enforcement `````````````````````````````````````````````` @@ -263,7 +265,9 @@ encryption using :class:`~pymongo.encryption.ClientEncryption` to create a new encryption data key and create a collection with the `Automatic Encryption JSON Schema Syntax -`_:: +`_: + +.. code-block:: python import os @@ -271,8 +275,7 @@ data key and create a collection with the from bson.binary import STANDARD from pymongo import MongoClient - from pymongo.encryption import (Algorithm, - ClientEncryption) + from pymongo.encryption import Algorithm, ClientEncryption from pymongo.encryption_options import AutoEncryptionOpts from pymongo.errors import OperationFailure from pymongo.write_concern import WriteConcern @@ -301,7 +304,8 @@ data key and create a collection with the key_vault.create_index( "keyAltNames", unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) client_encryption = ClientEncryption( kms_providers, @@ -312,27 +316,27 @@ data key and create a collection with the # on MongoClient, Database, or Collection. We will not be calling # encrypt() or decrypt() in this example so we can use any # CodecOptions. - CodecOptions()) + CodecOptions(), + ) # Create a new data key and json schema for the encryptedField. data_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['pymongo_encryption_example_2']) + "local", key_alt_names=["pymongo_encryption_example_2"] + ) json_schema = { "properties": { "encryptedField": { "encrypt": { "keyId": [data_key_id], "bsonType": "string", - "algorithm": - Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + "algorithm": Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, } } }, - "bsonType": "object" + "bsonType": "object", } - auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace) + auto_encryption_opts = AutoEncryptionOpts(kms_providers, key_vault_namespace) client = MongoClient(auto_encryption_opts=auto_encryption_opts) db_name, coll_name = encrypted_namespace.split(".", 1) db = client[db_name] @@ -348,17 +352,18 @@ data key and create a collection with the # JSON Schema. codec_options=CodecOptions(uuid_representation=STANDARD), write_concern=WriteConcern(w="majority"), - validator={"$jsonSchema": json_schema}) + validator={"$jsonSchema": json_schema}, + ) coll = client[db_name][coll_name] coll.insert_one({"encryptedField": "123456789"}) - print('Decrypted document: %s' % (coll.find_one(),)) + print("Decrypted document: %s" % (coll.find_one(),)) unencrypted_coll = MongoClient()[db_name][coll_name] - print('Encrypted document: %s' % (unencrypted_coll.find_one(),)) + print("Encrypted document: %s" % (unencrypted_coll.find_one(),)) try: unencrypted_coll.insert_one({"encryptedField": "123456789"}) except OperationFailure as exc: - print('Unencrypted insert failed: %s' % (exc.details,)) + print("Unencrypted insert failed: %s" % (exc.details,)) if __name__ == "__main__": @@ -372,13 +377,14 @@ Explicit Encryption Explicit encryption is a MongoDB community feature and does not use the ``mongocryptd`` process. Explicit encryption is provided by the -:class:`~pymongo.encryption.ClientEncryption` class, for example:: +:class:`~pymongo.encryption.ClientEncryption` class, for example: + +.. code-block:: python import os from pymongo import MongoClient - from pymongo.encryption import (Algorithm, - ClientEncryption) + from pymongo.encryption import Algorithm, ClientEncryption def main(): @@ -405,7 +411,8 @@ Explicit encryption is a MongoDB community feature and does not use the key_vault.create_index( "keyAltNames", unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) client_encryption = ClientEncryption( kms_providers, @@ -416,24 +423,27 @@ Explicit encryption is a MongoDB community feature and does not use the # The CodecOptions class used for encrypting and decrypting. # This should be the same CodecOptions instance you have configured # on MongoClient, Database, or Collection. - coll.codec_options) + coll.codec_options, + ) # Create a new data key for the encryptedField. data_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['pymongo_encryption_example_3']) + "local", key_alt_names=["pymongo_encryption_example_3"] + ) # Explicitly encrypt a field: encrypted_field = client_encryption.encrypt( "123456789", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=data_key_id) + key_id=data_key_id, + ) coll.insert_one({"encryptedField": encrypted_field}) doc = coll.find_one() - print('Encrypted document: %s' % (doc,)) + print("Encrypted document: %s" % (doc,)) # Explicitly decrypt the field: doc["encryptedField"] = client_encryption.decrypt(doc["encryptedField"]) - print('Decrypted document: %s' % (doc,)) + print("Decrypted document: %s" % (doc,)) # Cleanup resources. client_encryption.close() @@ -451,13 +461,14 @@ Although automatic encryption requires MongoDB 4.2 enterprise or a MongoDB 4.2 Atlas cluster, automatic *decryption* is supported for all users. To configure automatic *decryption* without automatic *encryption* set ``bypass_auto_encryption=True`` in -:class:`~pymongo.encryption_options.AutoEncryptionOpts`:: +:class:`~pymongo.encryption_options.AutoEncryptionOpts`: + +.. code-block:: python import os from pymongo import MongoClient - from pymongo.encryption import (Algorithm, - ClientEncryption) + from pymongo.encryption import Algorithm, ClientEncryption from pymongo.encryption_options import AutoEncryptionOpts @@ -476,7 +487,8 @@ To configure automatic *decryption* without automatic *encryption* set # the automatic _decryption_ behavior. bypass_auto_encryption will # also disable spawning mongocryptd. auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, bypass_auto_encryption=True) + kms_providers, key_vault_namespace, bypass_auto_encryption=True + ) client = MongoClient(auto_encryption_opts=auto_encryption_opts) coll = client.test.coll @@ -490,7 +502,8 @@ To configure automatic *decryption* without automatic *encryption* set key_vault.create_index( "keyAltNames", unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) client_encryption = ClientEncryption( kms_providers, @@ -501,28 +514,32 @@ To configure automatic *decryption* without automatic *encryption* set # The CodecOptions class used for encrypting and decrypting. # This should be the same CodecOptions instance you have configured # on MongoClient, Database, or Collection. - coll.codec_options) + coll.codec_options, + ) # Create a new data key for the encryptedField. data_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['pymongo_encryption_example_4']) + "local", key_alt_names=["pymongo_encryption_example_4"] + ) # Explicitly encrypt a field: encrypted_field = client_encryption.encrypt( "123456789", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name='pymongo_encryption_example_4') + key_alt_name="pymongo_encryption_example_4", + ) coll.insert_one({"encryptedField": encrypted_field}) # Automatically decrypts any encrypted fields. doc = coll.find_one() - print('Decrypted document: %s' % (doc,)) + print("Decrypted document: %s" % (doc,)) unencrypted_coll = MongoClient().test.coll - print('Encrypted document: %s' % (unencrypted_coll.find_one(),)) + print("Encrypted document: %s" % (unencrypted_coll.find_one(),)) # Cleanup resources. client_encryption.close() client.close() + if __name__ == "__main__": main() @@ -539,38 +556,44 @@ AWS, GCP, and Azure cloud environments. To enable the driver's behavior to obtain credentials from the environment, add the appropriate key ("aws", "gcp", or "azure") with an empty map to "kms_providers" in either :class:`~pymongo.encryption_options.AutoEncryptionOpts` or :class:`~pymongo.encryption.ClientEncryption` options. -An application using AWS credentials would look like:: +An application using AWS credentials would look like: + +.. code-block:: python from pymongo import MongoClient from pymongo.encryption import ClientEncryption + client = MongoClient() client_encryption = ClientEncryption( - # The empty dictionary enables on-demand credentials. - kms_providers={"aws": {}}, - key_vault_namespace="keyvault.datakeys", - key_vault_client=client, - codec_options=client.codec_options, + # The empty dictionary enables on-demand credentials. + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client, + codec_options=client.codec_options, ) master_key = { - "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:123456789:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:123456789:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), } client_encryption.create_data_key("aws", master_key) The above will enable the same behavior of obtaining AWS credentials from the environment as is used for :ref:`MONGODB-AWS` authentication, including the caching to avoid rate limiting. -An application using GCP credentials would look like:: +An application using GCP credentials would look like: + +.. code-block:: python from pymongo import MongoClient from pymongo.encryption import ClientEncryption + client = MongoClient() client_encryption = ClientEncryption( - # The empty dictionary enables on-demand credentials. - kms_providers={"gcp": {}}, - key_vault_namespace="keyvault.datakeys", - key_vault_client=client, - codec_options=client.codec_options, + # The empty dictionary enables on-demand credentials. + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client, + codec_options=client.codec_options, ) master_key = { "projectId": "my-project", @@ -583,15 +606,17 @@ An application using GCP credentials would look like:: The driver will query the `VM instance metadata `_ to obtain credentials. An application using Azure credentials would look like, this time using -:class:`~pymongo.encryption_options.AutoEncryptionOpts`:: +:class:`~pymongo.encryption_options.AutoEncryptionOpts`: + +.. code-block:: python from pymongo import MongoClient from pymongo.encryption_options import AutoEncryptionOpts + # The empty dictionary enables on-demand credentials. - kms_providers={"azure": {}}, - key_vault_namespace="keyvault.datakeys" - auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace) + kms_providers = ({"azure": {}},) + key_vault_namespace = "keyvault.datakeys" + auto_encryption_opts = AutoEncryptionOpts(kms_providers, key_vault_namespace) client = MongoClient(auto_encryption_opts=auto_encryption_opts) coll = client.test.coll coll.insert_one({"encryptedField": "123456789"}) @@ -615,7 +640,9 @@ Data is encrypted client-side. Queryable Encryption supports indexed encrypted f which are further processed server-side. Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, -as demonstrated by the following example:: +as demonstrated by the following example: + +.. code-block:: python import os from bson.codec_options import CodecOptions @@ -623,7 +650,6 @@ as demonstrated by the following example:: from pymongo.encryption import Algorithm, ClientEncryption, QueryType from pymongo.encryption_options import AutoEncryptionOpts - local_master_key = os.urandom(96) kms_providers = {"local": {"key": local_master_key}} key_vault_namespace = "keyvault.datakeys" @@ -638,30 +664,33 @@ as demonstrated by the following example:: encrypted_fields_map = { "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": key1_id, - "queries": [{"queryType": "equality"}], - }, - { - "path": "lastName", - "bsonType": "string", - "keyId": key2_id, - } - ] + "escCollection": "encryptedCollection.esc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": key1_id, + "queries": [{"queryType": "equality"}], + }, + { + "path": "lastName", + "bsonType": "string", + "keyId": key2_id, + }, + ], } } auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, encrypted_fields_map=encrypted_fields_map) + kms_providers, + key_vault_namespace, + encrypted_fields_map=encrypted_fields_map, + ) client = MongoClient(auto_encryption_opts=auto_encryption_opts) - client.default.drop_collection('encryptedCollection') - coll = client.default.create_collection('encryptedCollection') - coll.insert_one({ "_id": 1, "firstName": "Jane", "lastName": "Doe" }) + client.default.drop_collection("encryptedCollection") + coll = client.default.create_collection("encryptedCollection") + coll.insert_one({"_id": 1, "firstName": "Jane", "lastName": "Doe"}) docs = list(coll.find({"firstName": "Jane"})) print(docs) @@ -679,13 +708,18 @@ which are further processed server-side. Explicit encryption in Queryable Encryption is performed using the ``encrypt`` and ``decrypt`` methods. Automatic encryption (to allow the ``find_one`` to automatically decrypt) is configured -using an ``encrypted_fields`` mapping, as demonstrated by the following example:: +using an ``encrypted_fields`` mapping, as demonstrated by the following example: - import os +.. code-block:: python + import os from pymongo import MongoClient - from pymongo.encryption import (Algorithm, AutoEncryptionOpts, - ClientEncryption, QueryType) + from pymongo.encryption import ( + Algorithm, + AutoEncryptionOpts, + ClientEncryption, + QueryType, + ) def main(): @@ -708,7 +742,8 @@ using an ``encrypted_fields`` mapping, as demonstrated by the following example: key_vault.create_index( "keyAltNames", unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) client_encryption = ClientEncryption( kms_providers, @@ -719,32 +754,29 @@ using an ``encrypted_fields`` mapping, as demonstrated by the following example: # The CodecOptions class used for encrypting and decrypting. # This should be the same CodecOptions instance you have configured # on MongoClient, Database, or Collection. - client.codec_options) + client.codec_options, + ) # Create a new data key for the encryptedField. - indexed_key_id = client_encryption.create_data_key( - 'local') - unindexed_key_id = client_encryption.create_data_key( - 'local') + indexed_key_id = client_encryption.create_data_key("local") + unindexed_key_id = client_encryption.create_data_key("local") encrypted_fields = { - "escCollection": "enxcol_.default.esc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": indexed_key_id, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality" - } - }, - { - "keyId": unindexed_key_id, - "path": "encryptedUnindexed", - "bsonType": "string", - } - ] + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": indexed_key_id, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": {"queryType": "equality"}, + }, + { + "keyId": unindexed_key_id, + "path": "encryptedUnindexed", + "bsonType": "string", + }, + ], } opts = AutoEncryptionOpts( @@ -765,27 +797,36 @@ using an ``encrypted_fields`` mapping, as demonstrated by the following example: # Create and encrypt an indexed and unindexed value. val = "encrypted indexed value" unindexed_val = "encrypted unindexed value" - insert_payload_indexed = client_encryption.encrypt(val, Algorithm.INDEXED, indexed_key_id, contention_factor=1) - insert_payload_unindexed = client_encryption.encrypt(unindexed_val, Algorithm.UNINDEXED, - unindexed_key_id) + insert_payload_indexed = client_encryption.encrypt( + val, Algorithm.INDEXED, indexed_key_id, contention_factor=1 + ) + insert_payload_unindexed = client_encryption.encrypt( + unindexed_val, Algorithm.UNINDEXED, unindexed_key_id + ) # Insert the payloads. - coll.insert_one({ - "encryptedIndexed": insert_payload_indexed, - "encryptedUnindexed": insert_payload_unindexed - }) + coll.insert_one( + { + "encryptedIndexed": insert_payload_indexed, + "encryptedUnindexed": insert_payload_unindexed, + } + ) # Encrypt our find payload using QueryType.EQUALITY. - # The value of "data_key_id" must be the same as used to encrypt the values - # above. + # The value of "indexed_key_id" must be the same as used to encrypt + # the values above. find_payload = client_encryption.encrypt( - val, Algorithm.INDEXED, indexed_key_id, query_type=QueryType.EQUALITY, contention_factor=1 + val, + Algorithm.INDEXED, + indexed_key_id, + query_type=QueryType.EQUALITY, + contention_factor=1, ) # Find the document we inserted using the encrypted payload. # The returned document is automatically decrypted. doc = coll.find_one({"encryptedIndexed": find_payload}) - print('Returned document: %s' % (doc,)) + print("Returned document: %s" % (doc,)) # Cleanup resources. client_encryption.close() From a3940ac278e9a23fc2deaa52b304fb5d5d607bd1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 3 Jul 2023 05:24:25 -0500 Subject: [PATCH 0456/1588] PYTHON-3759 Update to Newer Build Hosts (#1257) --- .evergreen/build-manylinux.sh | 5 +++++ .evergreen/config.yml | 15 ++++----------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index 871151a5f3..38490c3142 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -1,6 +1,11 @@ #!/bin/bash -ex docker version + +# Set up qemu support using the method used in docker/setup-qemu-action +# https://github.com/docker/setup-qemu-action/blob/2b82ce82d56a2a04d2637cd93a637ae1b359c0a7/README.md?plain=1#L46 +docker run --rm --privileged tonistiigi/binfmt:latest --install all + # manylinux1 2021-05-05-b64d921 and manylinux2014 2021-05-05-1ac6ef3 were # the last releases to generate pip < 20.3 compatible wheels. After that # auditwheel was upgraded to v4 which produces PEP 600 manylinux_x_y wheels diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 8fa2df2415..928a56df1a 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1352,7 +1352,7 @@ tasks: - name: "release-manylinux" tags: ["release_tag"] - run_on: ubuntu2004-large + run_on: ubuntu2204-large exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). commands: - func: "build release" @@ -1360,7 +1360,7 @@ tasks: - name: "release-old-manylinux" tags: ["release_tag"] - run_on: ubuntu2004-large + run_on: ubuntu2204-large exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). commands: - command: shell.exec @@ -2292,13 +2292,6 @@ axes: display_name: "Archlinux" run_on: archlinux-test batchtime: 10080 # 7 days - - id: debian92 - display_name: "Debian 9.2" - run_on: debian92-test - batchtime: 10080 # 7 days - variables: - python3_binary: "/opt/python/3.8/bin/python3" - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/debian92/master/latest/libmongocrypt.tar.gz - id: macos-1014 display_name: "macOS 10.14" run_on: macos-1014 @@ -2328,14 +2321,14 @@ axes: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - id: rhel7 display_name: "RHEL 7.x" - run_on: rhel76-small + run_on: rhel79-small batchtime: 10080 # 7 days variables: python3_binary: "/opt/python/3.8/bin/python3" libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz - id: rhel8 display_name: "RHEL 8.x" - run_on: rhel84-small + run_on: rhel87-small batchtime: 10080 # 7 days variables: python3_binary: "/opt/python/3.8/bin/python3" From 0b5bdccf3ae1233a032c525bc809c84eed3520d1 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Mon, 3 Jul 2023 09:15:04 -0700 Subject: [PATCH 0457/1588] PYTHON-3729 use PyObject_GetAddr instead of PyObject_GetAddrString (#1281) --- bson/_cbsonmodule.c | 117 +++++++++++++++++++++++++++++--------- pymongo/_cmessagemodule.c | 34 ++++++++--- 2 files changed, 116 insertions(+), 35 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 68ea6b63c4..c26ad252cc 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -56,6 +56,20 @@ struct module_state { PyObject* _min_datetime_ms; PyObject* _max_datetime_ms; PyObject* _type_marker_str; + PyObject* _flags_str; + PyObject* _pattern_str; + PyObject* _encoder_map_str; + PyObject* _decoder_map_str; + PyObject* _fallback_encoder_str; + PyObject* _raw_str; + PyObject* _subtype_str; + PyObject* _binary_str; + PyObject* _scope_str; + PyObject* _inc_str; + PyObject* _time_str; + PyObject* _bid_str; + PyObject* _replace_str; + PyObject* _astimezone_str; }; #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) @@ -219,7 +233,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, /* Write a RawBSONDocument to the buffer. * Returns the number of bytes written or 0 on failure. */ -static int write_raw_doc(buffer_t buffer, PyObject* raw); +static int write_raw_doc(buffer_t buffer, PyObject* raw, PyObject* _raw); /* Date stuff */ static PyObject* datetime_from_millis(long long millis) { @@ -468,8 +482,24 @@ static int _load_python_objects(PyObject* module) { PyObject* compiled = NULL; struct module_state *state = GETSTATE(module); - /* Python str for faster _type_marker check */ - state->_type_marker_str = PyUnicode_FromString("_type_marker"); + /* Cache commonly used attribute names to improve performance. */ + if (!((state->_type_marker_str = PyUnicode_FromString("_type_marker")) && + (state->_flags_str = PyUnicode_FromString("flags")) && + (state->_pattern_str = PyUnicode_FromString("pattern")) && + (state->_encoder_map_str = PyUnicode_FromString("_encoder_map")) && + (state->_decoder_map_str = PyUnicode_FromString("_decoder_map")) && + (state->_fallback_encoder_str = PyUnicode_FromString("_fallback_encoder")) && + (state->_raw_str = PyUnicode_FromString("raw")) && + (state->_subtype_str = PyUnicode_FromString("subtype")) && + (state->_binary_str = PyUnicode_FromString("binary")) && + (state->_scope_str = PyUnicode_FromString("scope")) && + (state->_inc_str = PyUnicode_FromString("inc")) && + (state->_time_str = PyUnicode_FromString("time")) && + (state->_bid_str = PyUnicode_FromString("bid")) && + (state->_replace_str = PyUnicode_FromString("replace")) && + (state->_astimezone_str = PyUnicode_FromString("astimezone")))) { + return 1; + } if (_load_object(&state->Binary, "bson.binary", "Binary") || _load_object(&state->Code, "bson.code", "Code") || @@ -555,25 +585,25 @@ static long _type_marker(PyObject* object, PyObject* _type_marker_str) { * Return 1 on success. options->document_class is a new reference. * Return 0 on failure. */ -int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registry) { +int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registry, PyObject* _encoder_map_str, PyObject* _decoder_map_str, PyObject* _fallback_encoder_str) { registry->encoder_map = NULL; registry->decoder_map = NULL; registry->fallback_encoder = NULL; registry->registry_obj = NULL; - registry->encoder_map = PyObject_GetAttrString(registry_obj, "_encoder_map"); + registry->encoder_map = PyObject_GetAttr(registry_obj, _encoder_map_str); if (registry->encoder_map == NULL) { goto fail; } registry->is_encoder_empty = (PyDict_Size(registry->encoder_map) == 0); - registry->decoder_map = PyObject_GetAttrString(registry_obj, "_decoder_map"); + registry->decoder_map = PyObject_GetAttr(registry_obj, _decoder_map_str); if (registry->decoder_map == NULL) { goto fail; } registry->is_decoder_empty = (PyDict_Size(registry->decoder_map) == 0); - registry->fallback_encoder = PyObject_GetAttrString(registry_obj, "_fallback_encoder"); + registry->fallback_encoder = PyObject_GetAttr(registry_obj, _fallback_encoder_str); if (registry->fallback_encoder == NULL) { goto fail; } @@ -597,6 +627,7 @@ int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registr */ int convert_codec_options(PyObject* self, PyObject* options_obj, codec_options_t* options) { PyObject* type_registry_obj = NULL; + struct module_state *state = GETSTATE(self); long type_marker; options->unicode_decode_error_handler = NULL; @@ -613,13 +644,13 @@ int convert_codec_options(PyObject* self, PyObject* options_obj, codec_options_t } type_marker = _type_marker(options->document_class, - GETSTATE(self)->_type_marker_str); + state->_type_marker_str); if (type_marker < 0) { return 0; } if (!cbson_convert_type_registry(type_registry_obj, - &options->type_registry)) { + &options->type_registry, state->_encoder_map_str, state->_decoder_map_str, state->_fallback_encoder_str)) { return 0; } @@ -692,7 +723,7 @@ _set_cannot_encode(PyObject* value) { * Sets exception and returns 0 on failure. */ static int _write_regex_to_buffer( - buffer_t buffer, int type_byte, PyObject* value) { + buffer_t buffer, int type_byte, PyObject* value, PyObject* _flags_str, PyObject* _pattern_str) { PyObject* py_flags; PyObject* py_pattern; @@ -708,7 +739,7 @@ static int _write_regex_to_buffer( * Both the builtin re type and our Regex class have attributes * "flags" and "pattern". */ - py_flags = PyObject_GetAttrString(value, "flags"); + py_flags = PyObject_GetAttr(value, _flags_str); if (!py_flags) { return 0; } @@ -717,7 +748,7 @@ static int _write_regex_to_buffer( if (int_flags == -1 && PyErr_Occurred()) { return 0; } - py_pattern = PyObject_GetAttrString(value, "pattern"); + py_pattern = PyObject_GetAttr(value, _pattern_str); if (!py_pattern) { return 0; } @@ -838,7 +869,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, int size; *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x05; - subtype_object = PyObject_GetAttrString(value, "subtype"); + subtype_object = PyObject_GetAttr(value, state->_subtype_str); if (!subtype_object) { return 0; } @@ -886,7 +917,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, { /* ObjectId */ const char* data; - PyObject* pystring = PyObject_GetAttrString(value, "binary"); + PyObject* pystring = PyObject_GetAttr(value, state->_binary_str); if (!pystring) { return 0; } @@ -906,7 +937,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, case 11: { /* Regex */ - return _write_regex_to_buffer(buffer, type_byte, value); + return _write_regex_to_buffer(buffer, type_byte, value, state->_flags_str, state->_pattern_str); } case 13: { @@ -915,7 +946,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, length_location, length; - PyObject* scope = PyObject_GetAttrString(value, "scope"); + PyObject* scope = PyObject_GetAttr(value, state->_scope_str); if (!scope) { return 0; } @@ -958,7 +989,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, PyObject* obj; unsigned long i; - obj = PyObject_GetAttrString(value, "inc"); + obj = PyObject_GetAttr(value, state->_inc_str); if (!obj) { return 0; } @@ -971,7 +1002,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } - obj = PyObject_GetAttrString(value, "time"); + obj = PyObject_GetAttr(value, state->_time_str); if (!obj) { return 0; } @@ -1006,7 +1037,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, { /* Decimal128 */ const char* data; - PyObject* pystring = PyObject_GetAttrString(value, "bid"); + PyObject* pystring = PyObject_GetAttr(value, state->_bid_str); if (!pystring) { return 0; } @@ -1041,7 +1072,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, case 101: { /* RawBSONDocument */ - if (!write_raw_doc(buffer, value)) { + if (!write_raw_doc(buffer, value, state->_raw_str)) { return 0; } *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; @@ -1206,7 +1237,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; return buffer_write_int64(buffer, (int64_t)millis); } else if (PyObject_TypeCheck(value, state->REType)) { - return _write_regex_to_buffer(buffer, type_byte, value); + return _write_regex_to_buffer(buffer, type_byte, value, state->_flags_str, state->_pattern_str); } /* @@ -1437,14 +1468,14 @@ int decode_and_write_pair(PyObject* self, buffer_t buffer, /* Write a RawBSONDocument to the buffer. * Returns the number of bytes written or 0 on failure. */ -static int write_raw_doc(buffer_t buffer, PyObject* raw) { +static int write_raw_doc(buffer_t buffer, PyObject* raw, PyObject* _raw_str) { char* bytes; Py_ssize_t len; int len_int; int bytes_written = 0; PyObject* bytes_obj = NULL; - bytes_obj = PyObject_GetAttrString(raw, "raw"); + bytes_obj = PyObject_GetAttr(raw, _raw_str); if (!bytes_obj) { goto fail; } @@ -1485,7 +1516,7 @@ int write_dict(PyObject* self, buffer_t buffer, } if (101 == type_marker) { - return write_raw_doc(buffer, dict); + return write_raw_doc(buffer, dict, state->_raw_str); } mapping_type = _get_object(state->Mapping, "collections.abc", "Mapping"); @@ -1606,6 +1637,7 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { buffer_t buffer; PyObject* raw_bson_document_bytes_obj; long type_marker; + struct module_state *state = GETSTATE(self); if (!(PyArg_ParseTuple(args, "ObO|b", &dict, &check_keys, &options_obj, &top_level) && @@ -1614,13 +1646,13 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { } /* check for RawBSONDocument */ - type_marker = _type_marker(dict, GETSTATE(self)->_type_marker_str); + type_marker = _type_marker(dict, state->_type_marker_str); if (type_marker < 0) { destroy_codec_options(&options); return NULL; } else if (101 == type_marker) { destroy_codec_options(&options); - raw_bson_document_bytes_obj = PyObject_GetAttrString(dict, "raw"); + raw_bson_document_bytes_obj = PyObject_GetAttr(dict, state->_raw_str); if (NULL == raw_bson_document_bytes_obj) { return NULL; } @@ -2102,7 +2134,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, if (!naive) { goto invalid; } - replace = PyObject_GetAttrString(naive, "replace"); + replace = PyObject_GetAttr(naive, state->_replace_str); Py_DECREF(naive); if (!replace) { goto invalid; @@ -2137,7 +2169,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, /* convert to local time */ if (options->tzinfo != Py_None) { - astimezone = PyObject_GetAttrString(value, "astimezone"); + astimezone = PyObject_GetAttr(value, state->_astimezone_str); Py_DECREF(value); if (!astimezone) { Py_DECREF(replace); @@ -3051,6 +3083,21 @@ static int _cbson_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->MaxKey); Py_VISIT(GETSTATE(m)->UTC); Py_VISIT(GETSTATE(m)->REType); + Py_VISIT(GETSTATE(m)->_type_marker_str); + Py_VISIT(GETSTATE(m)->_flags_str); + Py_VISIT(GETSTATE(m)->_pattern_str); + Py_VISIT(GETSTATE(m)->_encoder_map_str); + Py_VISIT(GETSTATE(m)->_decoder_map_str); + Py_VISIT(GETSTATE(m)->_fallback_encoder_str); + Py_VISIT(GETSTATE(m)->_raw_str); + Py_VISIT(GETSTATE(m)->_subtype_str); + Py_VISIT(GETSTATE(m)->_binary_str); + Py_VISIT(GETSTATE(m)->_scope_str); + Py_VISIT(GETSTATE(m)->_inc_str); + Py_VISIT(GETSTATE(m)->_time_str); + Py_VISIT(GETSTATE(m)->_bid_str); + Py_VISIT(GETSTATE(m)->_replace_str); + Py_VISIT(GETSTATE(m)->_astimezone_str); return 0; } @@ -3067,6 +3114,20 @@ static int _cbson_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->UTC); Py_CLEAR(GETSTATE(m)->REType); Py_CLEAR(GETSTATE(m)->_type_marker_str); + Py_CLEAR(GETSTATE(m)->_flags_str); + Py_CLEAR(GETSTATE(m)->_pattern_str); + Py_CLEAR(GETSTATE(m)->_encoder_map_str); + Py_CLEAR(GETSTATE(m)->_decoder_map_str); + Py_CLEAR(GETSTATE(m)->_fallback_encoder_str); + Py_CLEAR(GETSTATE(m)->_raw_str); + Py_CLEAR(GETSTATE(m)->_subtype_str); + Py_CLEAR(GETSTATE(m)->_binary_str); + Py_CLEAR(GETSTATE(m)->_scope_str); + Py_CLEAR(GETSTATE(m)->_inc_str); + Py_CLEAR(GETSTATE(m)->_time_str); + Py_CLEAR(GETSTATE(m)->_bid_str); + Py_CLEAR(GETSTATE(m)->_replace_str); + Py_CLEAR(GETSTATE(m)->_astimezone_str); return 0; } diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index ee7623d832..7ac66a1e4b 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -28,6 +28,10 @@ struct module_state { PyObject* _cbson; + PyObject* _max_bson_size_str; + PyObject* _max_message_size_str; + PyObject* _max_write_batch_size_str; + PyObject* _max_split_size_str; }; /* See comments about module initialization in _cbsonmodule.c */ @@ -366,21 +370,21 @@ _batched_op_msg( PyObject* iterator = NULL; char* flags = ack ? "\x00\x00\x00\x00" : "\x02\x00\x00\x00"; - max_bson_size_obj = PyObject_GetAttrString(ctx, "max_bson_size"); + max_bson_size_obj = PyObject_GetAttr(ctx, state->_max_bson_size_str); max_bson_size = PyLong_AsLong(max_bson_size_obj); Py_XDECREF(max_bson_size_obj); if (max_bson_size == -1) { return 0; } - max_write_batch_size_obj = PyObject_GetAttrString(ctx, "max_write_batch_size"); + max_write_batch_size_obj = PyObject_GetAttr(ctx, state->_max_write_batch_size_str); max_write_batch_size = PyLong_AsLong(max_write_batch_size_obj); Py_XDECREF(max_write_batch_size_obj); if (max_write_batch_size == -1) { return 0; } - max_message_size_obj = PyObject_GetAttrString(ctx, "max_message_size"); + max_message_size_obj = PyObject_GetAttr(ctx, state->_max_message_size_str); max_message_size = PyLong_AsLong(max_message_size_obj); Py_XDECREF(max_message_size_obj); if (max_message_size == -1) { @@ -667,7 +671,7 @@ _batched_write_command( PyObject* doc = NULL; PyObject* iterator = NULL; - max_bson_size_obj = PyObject_GetAttrString(ctx, "max_bson_size"); + max_bson_size_obj = PyObject_GetAttr(ctx, state->_max_bson_size_str); max_bson_size = PyLong_AsLong(max_bson_size_obj); Py_XDECREF(max_bson_size_obj); if (max_bson_size == -1) { @@ -679,7 +683,7 @@ _batched_write_command( */ max_cmd_size = max_bson_size + 16382; - max_write_batch_size_obj = PyObject_GetAttrString(ctx, "max_write_batch_size"); + max_write_batch_size_obj = PyObject_GetAttr(ctx, state->_max_write_batch_size_str); max_write_batch_size = PyLong_AsLong(max_write_batch_size_obj); Py_XDECREF(max_write_batch_size_obj); if (max_write_batch_size == -1) { @@ -689,7 +693,7 @@ _batched_write_command( // max_split_size is the size at which to perform a batch split. // Normally this this value is equal to max_bson_size (16MiB). However, // when auto encryption is enabled max_split_size is reduced to 2MiB. - max_split_size_obj = PyObject_GetAttrString(ctx, "max_split_size"); + max_split_size_obj = PyObject_GetAttr(ctx, state->_max_split_size_str); max_split_size = PyLong_AsLong(max_split_size_obj); Py_XDECREF(max_split_size_obj); if (max_split_size == -1) { @@ -924,11 +928,19 @@ static PyMethodDef _CMessageMethods[] = { #define INITERROR return NULL static int _cmessage_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->_cbson); + Py_VISIT(GETSTATE(m)->_max_bson_size_str); + Py_VISIT(GETSTATE(m)->_max_message_size_str); + Py_VISIT(GETSTATE(m)->_max_split_size_str); + Py_VISIT(GETSTATE(m)->_max_write_batch_size_str); return 0; } static int _cmessage_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->_cbson); + Py_CLEAR(GETSTATE(m)->_max_bson_size_str); + Py_CLEAR(GETSTATE(m)->_max_message_size_str); + Py_CLEAR(GETSTATE(m)->_max_split_size_str); + Py_CLEAR(GETSTATE(m)->_max_write_batch_size_str); return 0; } @@ -950,6 +962,7 @@ PyInit__cmessage(void) PyObject *_cbson = NULL; PyObject *c_api_object = NULL; PyObject *m = NULL; + struct module_state* state = NULL; /* Store a reference to the _cbson module since it's needed to call some * of its functions @@ -977,7 +990,14 @@ PyInit__cmessage(void) goto fail; } - GETSTATE(m)->_cbson = _cbson; + state = GETSTATE(m); + state->_cbson = _cbson; + if (!((state->_max_bson_size_str = PyUnicode_FromString("max_bson_size")) && + (state->_max_message_size_str = PyUnicode_FromString("max_message_size")) && + (state->_max_write_batch_size_str = PyUnicode_FromString("max_write_batch_size")) && + (state->_max_split_size_str = PyUnicode_FromString("max_split_size")))) { + goto fail; + } Py_DECREF(c_api_object); From 94fabf5e988b02041693ff7f0921e4eee32ee9a0 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 3 Jul 2023 09:17:11 -0700 Subject: [PATCH 0458/1588] PYTHON-3793 Make tox fail with invalid environment (#1286) --- .evergreen/config.yml | 2 +- .evergreen/run-doctests.sh | 2 +- .github/workflows/test-python.yml | 10 +++++----- RELEASE.rst | 2 +- doc/index.rst | 2 +- tox.ini | 14 +++++++++++++- 6 files changed, 22 insertions(+), 10 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 928a56df1a..8062d35652 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -359,7 +359,7 @@ functions: ${PREPARE_SHELL} alias python=${PYTHON_BINARY} - python -m tox -e test-mockupdb + python -m tox -m test-mockupdb "run doctests": - command: shell.exec diff --git a/.evergreen/run-doctests.sh b/.evergreen/run-doctests.sh index 39e5102b6a..be71f1789a 100644 --- a/.evergreen/run-doctests.sh +++ b/.evergreen/run-doctests.sh @@ -4,4 +4,4 @@ set -o xtrace set -o errexit ${PYTHON_BINARY} -m pip install tox -${PYTHON_BINARY} -m tox -e doc-test +${PYTHON_BINARY} -m tox -m doc-test diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index d7c442cc49..93e10ac562 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -42,7 +42,7 @@ jobs: mongodb-version: 4.4 - name: Run tests run: | - tox -e test + tox -m test mypytest: name: Run mypy @@ -64,13 +64,13 @@ jobs: pip install tox - name: Run mypy run: | - tox -e typecheck-mypy + tox -m typecheck-mypy - name: Run pyright run: | - tox -e typecheck-pyright + tox -m typecheck-pyright - name: Run pyright strict run: | - tox -e typecheck-pyright-strict + tox -m typecheck-pyright-strict linkcheck: name: Check Links @@ -86,4 +86,4 @@ jobs: pip install tox - name: Check links run: | - tox -e linkcheck + tox -m linkcheck diff --git a/RELEASE.rst b/RELEASE.rst index caa67d3819..74a45e829a 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -36,7 +36,7 @@ Doing a Release To test locally, ``python3 setup.py test`` will build the C extensions and test. ``python3 tools/clean.py`` will remove the extensions, and then ``python3 setup.py --no_ext test`` will run the tests without - them. You can also run the doctests: ``tox -e doc-test``. + them. You can also run the doctests: ``tox -m doc-test``. 2. Check Jira to ensure all the tickets in this version have been completed. diff --git a/doc/index.rst b/doc/index.rst index 7e357c2a4b..2f0ba1d36a 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -103,7 +103,7 @@ following command from the root directory of the **PyMongo** source: .. code-block:: bash $ pip install tox - $ tox -e docs + $ tox -m doc Indices and tables ------------------ diff --git a/tox.ini b/tox.ini index 240126f8a5..ac65ed1ad9 100644 --- a/tox.ini +++ b/tox.ini @@ -24,6 +24,19 @@ envlist = doc-test, # Linkcheck sphinx docs linkcheck +labels = # Use labels and -m instead of -e so that tox -m